c6552d980d4fc667cd807c5b0165514e0d98d56c
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_dpt.h"
88 #include "intel_fbc.h"
89 #include "intel_fdi.h"
90 #include "intel_fbdev.h"
91 #include "intel_fifo_underrun.h"
92 #include "intel_frontbuffer.h"
93 #include "intel_hdcp.h"
94 #include "intel_hotplug.h"
95 #include "intel_overlay.h"
96 #include "intel_panel.h"
97 #include "intel_pipe_crc.h"
98 #include "intel_pm.h"
99 #include "intel_pps.h"
100 #include "intel_psr.h"
101 #include "intel_quirks.h"
102 #include "intel_sideband.h"
103 #include "intel_sprite.h"
104 #include "intel_tc.h"
105 #include "intel_vga.h"
106 #include "i9xx_plane.h"
107 #include "skl_scaler.h"
108 #include "skl_universal_plane.h"
109
110 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
111                                 struct intel_crtc_state *pipe_config);
112 static void ilk_pch_clock_get(struct intel_crtc *crtc,
113                               struct intel_crtc_state *pipe_config);
114
115 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
116 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
118                                          const struct intel_link_m_n *m_n,
119                                          const struct intel_link_m_n *m2_n2);
120 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
121 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
124 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126                                          struct drm_modeset_acquire_ctx *ctx);
127
128 /* returns HPLL frequency in kHz */
129 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
130 {
131         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
132
133         /* Obtain SKU information */
134         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
135                 CCK_FUSE_HPLL_FREQ_MASK;
136
137         return vco_freq[hpll_freq] * 1000;
138 }
139
140 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
141                       const char *name, u32 reg, int ref_freq)
142 {
143         u32 val;
144         int divider;
145
146         val = vlv_cck_read(dev_priv, reg);
147         divider = val & CCK_FREQUENCY_VALUES;
148
149         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
150                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
151                  "%s change in progress\n", name);
152
153         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
154 }
155
156 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
157                            const char *name, u32 reg)
158 {
159         int hpll;
160
161         vlv_cck_get(dev_priv);
162
163         if (dev_priv->hpll_freq == 0)
164                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
165
166         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
167
168         vlv_cck_put(dev_priv);
169
170         return hpll;
171 }
172
173 static void intel_update_czclk(struct drm_i915_private *dev_priv)
174 {
175         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
176                 return;
177
178         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
179                                                       CCK_CZ_CLOCK_CONTROL);
180
181         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
182                 dev_priv->czclk_freq);
183 }
184
185 /* WA Display #0827: Gen9:all */
186 static void
187 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
188 {
189         if (enable)
190                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
191                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
192         else
193                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
194                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
195 }
196
197 /* Wa_2006604312:icl,ehl */
198 static void
199 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
200                        bool enable)
201 {
202         if (enable)
203                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
204                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
205         else
206                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
207                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
208 }
209
210 static bool
211 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
212 {
213         return crtc_state->master_transcoder != INVALID_TRANSCODER;
214 }
215
216 static bool
217 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
218 {
219         return crtc_state->sync_mode_slaves_mask != 0;
220 }
221
222 bool
223 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
224 {
225         return is_trans_port_sync_master(crtc_state) ||
226                 is_trans_port_sync_slave(crtc_state);
227 }
228
229 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
230                                     enum pipe pipe)
231 {
232         i915_reg_t reg = PIPEDSL(pipe);
233         u32 line1, line2;
234         u32 line_mask;
235
236         if (DISPLAY_VER(dev_priv) == 2)
237                 line_mask = DSL_LINEMASK_GEN2;
238         else
239                 line_mask = DSL_LINEMASK_GEN3;
240
241         line1 = intel_de_read(dev_priv, reg) & line_mask;
242         msleep(5);
243         line2 = intel_de_read(dev_priv, reg) & line_mask;
244
245         return line1 != line2;
246 }
247
248 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
249 {
250         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
251         enum pipe pipe = crtc->pipe;
252
253         /* Wait for the display line to settle/start moving */
254         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
255                 drm_err(&dev_priv->drm,
256                         "pipe %c scanline %s wait timed out\n",
257                         pipe_name(pipe), onoff(state));
258 }
259
260 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
261 {
262         wait_for_pipe_scanline_moving(crtc, false);
263 }
264
265 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
266 {
267         wait_for_pipe_scanline_moving(crtc, true);
268 }
269
270 static void
271 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
272 {
273         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
274         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
275
276         if (DISPLAY_VER(dev_priv) >= 4) {
277                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
278                 i915_reg_t reg = PIPECONF(cpu_transcoder);
279
280                 /* Wait for the Pipe State to go off */
281                 if (intel_de_wait_for_clear(dev_priv, reg,
282                                             I965_PIPECONF_ACTIVE, 100))
283                         drm_WARN(&dev_priv->drm, 1,
284                                  "pipe_off wait timed out\n");
285         } else {
286                 intel_wait_for_pipe_scanline_stopped(crtc);
287         }
288 }
289
290 /* Only for pre-ILK configs */
291 void assert_pll(struct drm_i915_private *dev_priv,
292                 enum pipe pipe, bool state)
293 {
294         u32 val;
295         bool cur_state;
296
297         val = intel_de_read(dev_priv, DPLL(pipe));
298         cur_state = !!(val & DPLL_VCO_ENABLE);
299         I915_STATE_WARN(cur_state != state,
300              "PLL state assertion failure (expected %s, current %s)\n",
301                         onoff(state), onoff(cur_state));
302 }
303
304 /* XXX: the dsi pll is shared between MIPI DSI ports */
305 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
306 {
307         u32 val;
308         bool cur_state;
309
310         vlv_cck_get(dev_priv);
311         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
312         vlv_cck_put(dev_priv);
313
314         cur_state = val & DSI_PLL_VCO_EN;
315         I915_STATE_WARN(cur_state != state,
316              "DSI PLL state assertion failure (expected %s, current %s)\n",
317                         onoff(state), onoff(cur_state));
318 }
319
320 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
321                           enum pipe pipe, bool state)
322 {
323         bool cur_state;
324
325         if (HAS_DDI(dev_priv)) {
326                 /*
327                  * DDI does not have a specific FDI_TX register.
328                  *
329                  * FDI is never fed from EDP transcoder
330                  * so pipe->transcoder cast is fine here.
331                  */
332                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
333                 u32 val = intel_de_read(dev_priv,
334                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
335                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
336         } else {
337                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
338                 cur_state = !!(val & FDI_TX_ENABLE);
339         }
340         I915_STATE_WARN(cur_state != state,
341              "FDI TX state assertion failure (expected %s, current %s)\n",
342                         onoff(state), onoff(cur_state));
343 }
344 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
345 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
346
347 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
348                           enum pipe pipe, bool state)
349 {
350         u32 val;
351         bool cur_state;
352
353         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
354         cur_state = !!(val & FDI_RX_ENABLE);
355         I915_STATE_WARN(cur_state != state,
356              "FDI RX state assertion failure (expected %s, current %s)\n",
357                         onoff(state), onoff(cur_state));
358 }
359 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
360 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
361
362 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
363                                       enum pipe pipe)
364 {
365         u32 val;
366
367         /* ILK FDI PLL is always enabled */
368         if (IS_IRONLAKE(dev_priv))
369                 return;
370
371         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
372         if (HAS_DDI(dev_priv))
373                 return;
374
375         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
376         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
377 }
378
379 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
380                        enum pipe pipe, bool state)
381 {
382         u32 val;
383         bool cur_state;
384
385         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
386         cur_state = !!(val & FDI_RX_PLL_ENABLE);
387         I915_STATE_WARN(cur_state != state,
388              "FDI RX PLL assertion failure (expected %s, current %s)\n",
389                         onoff(state), onoff(cur_state));
390 }
391
392 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
393 {
394         i915_reg_t pp_reg;
395         u32 val;
396         enum pipe panel_pipe = INVALID_PIPE;
397         bool locked = true;
398
399         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
400                 return;
401
402         if (HAS_PCH_SPLIT(dev_priv)) {
403                 u32 port_sel;
404
405                 pp_reg = PP_CONTROL(0);
406                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
407
408                 switch (port_sel) {
409                 case PANEL_PORT_SELECT_LVDS:
410                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
411                         break;
412                 case PANEL_PORT_SELECT_DPA:
413                         g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
414                         break;
415                 case PANEL_PORT_SELECT_DPC:
416                         g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
417                         break;
418                 case PANEL_PORT_SELECT_DPD:
419                         g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
420                         break;
421                 default:
422                         MISSING_CASE(port_sel);
423                         break;
424                 }
425         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
426                 /* presumably write lock depends on pipe, not port select */
427                 pp_reg = PP_CONTROL(pipe);
428                 panel_pipe = pipe;
429         } else {
430                 u32 port_sel;
431
432                 pp_reg = PP_CONTROL(0);
433                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
434
435                 drm_WARN_ON(&dev_priv->drm,
436                             port_sel != PANEL_PORT_SELECT_LVDS);
437                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
438         }
439
440         val = intel_de_read(dev_priv, pp_reg);
441         if (!(val & PANEL_POWER_ON) ||
442             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
443                 locked = false;
444
445         I915_STATE_WARN(panel_pipe == pipe && locked,
446              "panel assertion failure, pipe %c regs locked\n",
447              pipe_name(pipe));
448 }
449
450 void assert_pipe(struct drm_i915_private *dev_priv,
451                  enum transcoder cpu_transcoder, bool state)
452 {
453         bool cur_state;
454         enum intel_display_power_domain power_domain;
455         intel_wakeref_t wakeref;
456
457         /* we keep both pipes enabled on 830 */
458         if (IS_I830(dev_priv))
459                 state = true;
460
461         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
462         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
463         if (wakeref) {
464                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
465                 cur_state = !!(val & PIPECONF_ENABLE);
466
467                 intel_display_power_put(dev_priv, power_domain, wakeref);
468         } else {
469                 cur_state = false;
470         }
471
472         I915_STATE_WARN(cur_state != state,
473                         "transcoder %s assertion failure (expected %s, current %s)\n",
474                         transcoder_name(cpu_transcoder),
475                         onoff(state), onoff(cur_state));
476 }
477
478 static void assert_plane(struct intel_plane *plane, bool state)
479 {
480         enum pipe pipe;
481         bool cur_state;
482
483         cur_state = plane->get_hw_state(plane, &pipe);
484
485         I915_STATE_WARN(cur_state != state,
486                         "%s assertion failure (expected %s, current %s)\n",
487                         plane->base.name, onoff(state), onoff(cur_state));
488 }
489
490 #define assert_plane_enabled(p) assert_plane(p, true)
491 #define assert_plane_disabled(p) assert_plane(p, false)
492
493 static void assert_planes_disabled(struct intel_crtc *crtc)
494 {
495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
496         struct intel_plane *plane;
497
498         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
499                 assert_plane_disabled(plane);
500 }
501
502 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
503                                     enum pipe pipe)
504 {
505         u32 val;
506         bool enabled;
507
508         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
509         enabled = !!(val & TRANS_ENABLE);
510         I915_STATE_WARN(enabled,
511              "transcoder assertion failed, should be off on pipe %c but is still active\n",
512              pipe_name(pipe));
513 }
514
515 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
516                                    enum pipe pipe, enum port port,
517                                    i915_reg_t dp_reg)
518 {
519         enum pipe port_pipe;
520         bool state;
521
522         state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
523
524         I915_STATE_WARN(state && port_pipe == pipe,
525                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
526                         port_name(port), pipe_name(pipe));
527
528         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
529                         "IBX PCH DP %c still using transcoder B\n",
530                         port_name(port));
531 }
532
533 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
534                                      enum pipe pipe, enum port port,
535                                      i915_reg_t hdmi_reg)
536 {
537         enum pipe port_pipe;
538         bool state;
539
540         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
541
542         I915_STATE_WARN(state && port_pipe == pipe,
543                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
544                         port_name(port), pipe_name(pipe));
545
546         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
547                         "IBX PCH HDMI %c still using transcoder B\n",
548                         port_name(port));
549 }
550
551 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
552                                       enum pipe pipe)
553 {
554         enum pipe port_pipe;
555
556         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
557         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
558         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
559
560         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
561                         port_pipe == pipe,
562                         "PCH VGA enabled on transcoder %c, should be disabled\n",
563                         pipe_name(pipe));
564
565         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
566                         port_pipe == pipe,
567                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
568                         pipe_name(pipe));
569
570         /* PCH SDVOB multiplex with HDMIB */
571         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
572         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
573         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
574 }
575
576 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
577                          struct intel_digital_port *dig_port,
578                          unsigned int expected_mask)
579 {
580         u32 port_mask;
581         i915_reg_t dpll_reg;
582
583         switch (dig_port->base.port) {
584         case PORT_B:
585                 port_mask = DPLL_PORTB_READY_MASK;
586                 dpll_reg = DPLL(0);
587                 break;
588         case PORT_C:
589                 port_mask = DPLL_PORTC_READY_MASK;
590                 dpll_reg = DPLL(0);
591                 expected_mask <<= 4;
592                 break;
593         case PORT_D:
594                 port_mask = DPLL_PORTD_READY_MASK;
595                 dpll_reg = DPIO_PHY_STATUS;
596                 break;
597         default:
598                 BUG();
599         }
600
601         if (intel_de_wait_for_register(dev_priv, dpll_reg,
602                                        port_mask, expected_mask, 1000))
603                 drm_WARN(&dev_priv->drm, 1,
604                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
605                          dig_port->base.base.base.id, dig_port->base.base.name,
606                          intel_de_read(dev_priv, dpll_reg) & port_mask,
607                          expected_mask);
608 }
609
610 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
611 {
612         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
613         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
614         enum pipe pipe = crtc->pipe;
615         i915_reg_t reg;
616         u32 val, pipeconf_val;
617
618         /* Make sure PCH DPLL is enabled */
619         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
620
621         /* FDI must be feeding us bits for PCH ports */
622         assert_fdi_tx_enabled(dev_priv, pipe);
623         assert_fdi_rx_enabled(dev_priv, pipe);
624
625         if (HAS_PCH_CPT(dev_priv)) {
626                 reg = TRANS_CHICKEN2(pipe);
627                 val = intel_de_read(dev_priv, reg);
628                 /*
629                  * Workaround: Set the timing override bit
630                  * before enabling the pch transcoder.
631                  */
632                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
633                 /* Configure frame start delay to match the CPU */
634                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
635                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
636                 intel_de_write(dev_priv, reg, val);
637         }
638
639         reg = PCH_TRANSCONF(pipe);
640         val = intel_de_read(dev_priv, reg);
641         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
642
643         if (HAS_PCH_IBX(dev_priv)) {
644                 /* Configure frame start delay to match the CPU */
645                 val &= ~TRANS_FRAME_START_DELAY_MASK;
646                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
647
648                 /*
649                  * Make the BPC in transcoder be consistent with
650                  * that in pipeconf reg. For HDMI we must use 8bpc
651                  * here for both 8bpc and 12bpc.
652                  */
653                 val &= ~PIPECONF_BPC_MASK;
654                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
655                         val |= PIPECONF_8BPC;
656                 else
657                         val |= pipeconf_val & PIPECONF_BPC_MASK;
658         }
659
660         val &= ~TRANS_INTERLACE_MASK;
661         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
662                 if (HAS_PCH_IBX(dev_priv) &&
663                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
664                         val |= TRANS_LEGACY_INTERLACED_ILK;
665                 else
666                         val |= TRANS_INTERLACED;
667         } else {
668                 val |= TRANS_PROGRESSIVE;
669         }
670
671         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
672         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
673                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
674                         pipe_name(pipe));
675 }
676
677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
678                                       enum transcoder cpu_transcoder)
679 {
680         u32 val, pipeconf_val;
681
682         /* FDI must be feeding us bits for PCH ports */
683         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
684         assert_fdi_rx_enabled(dev_priv, PIPE_A);
685
686         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
687         /* Workaround: set timing override bit. */
688         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
689         /* Configure frame start delay to match the CPU */
690         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
691         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
692         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
693
694         val = TRANS_ENABLE;
695         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
696
697         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
698             PIPECONF_INTERLACED_ILK)
699                 val |= TRANS_INTERLACED;
700         else
701                 val |= TRANS_PROGRESSIVE;
702
703         intel_de_write(dev_priv, LPT_TRANSCONF, val);
704         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
705                                   TRANS_STATE_ENABLE, 100))
706                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
707 }
708
709 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
710                                        enum pipe pipe)
711 {
712         i915_reg_t reg;
713         u32 val;
714
715         /* FDI relies on the transcoder */
716         assert_fdi_tx_disabled(dev_priv, pipe);
717         assert_fdi_rx_disabled(dev_priv, pipe);
718
719         /* Ports must be off as well */
720         assert_pch_ports_disabled(dev_priv, pipe);
721
722         reg = PCH_TRANSCONF(pipe);
723         val = intel_de_read(dev_priv, reg);
724         val &= ~TRANS_ENABLE;
725         intel_de_write(dev_priv, reg, val);
726         /* wait for PCH transcoder off, transcoder state */
727         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
728                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
729                         pipe_name(pipe));
730
731         if (HAS_PCH_CPT(dev_priv)) {
732                 /* Workaround: Clear the timing override chicken bit again. */
733                 reg = TRANS_CHICKEN2(pipe);
734                 val = intel_de_read(dev_priv, reg);
735                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
736                 intel_de_write(dev_priv, reg, val);
737         }
738 }
739
740 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
741 {
742         u32 val;
743
744         val = intel_de_read(dev_priv, LPT_TRANSCONF);
745         val &= ~TRANS_ENABLE;
746         intel_de_write(dev_priv, LPT_TRANSCONF, val);
747         /* wait for PCH transcoder off, transcoder state */
748         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
749                                     TRANS_STATE_ENABLE, 50))
750                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
751
752         /* Workaround: clear timing override bit. */
753         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
754         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
755         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
756 }
757
758 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
759 {
760         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
761
762         if (HAS_PCH_LPT(dev_priv))
763                 return PIPE_A;
764         else
765                 return crtc->pipe;
766 }
767
768 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
769 {
770         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
771         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
772         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
773         enum pipe pipe = crtc->pipe;
774         i915_reg_t reg;
775         u32 val;
776
777         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
778
779         assert_planes_disabled(crtc);
780
781         /*
782          * A pipe without a PLL won't actually be able to drive bits from
783          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
784          * need the check.
785          */
786         if (HAS_GMCH(dev_priv)) {
787                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
788                         assert_dsi_pll_enabled(dev_priv);
789                 else
790                         assert_pll_enabled(dev_priv, pipe);
791         } else {
792                 if (new_crtc_state->has_pch_encoder) {
793                         /* if driving the PCH, we need FDI enabled */
794                         assert_fdi_rx_pll_enabled(dev_priv,
795                                                   intel_crtc_pch_transcoder(crtc));
796                         assert_fdi_tx_pll_enabled(dev_priv,
797                                                   (enum pipe) cpu_transcoder);
798                 }
799                 /* FIXME: assert CPU port conditions for SNB+ */
800         }
801
802         /* Wa_22012358565:adl-p */
803         if (DISPLAY_VER(dev_priv) == 13)
804                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
805                              0, PIPE_ARB_USE_PROG_SLOTS);
806
807         reg = PIPECONF(cpu_transcoder);
808         val = intel_de_read(dev_priv, reg);
809         if (val & PIPECONF_ENABLE) {
810                 /* we keep both pipes enabled on 830 */
811                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
812                 return;
813         }
814
815         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
816         intel_de_posting_read(dev_priv, reg);
817
818         /*
819          * Until the pipe starts PIPEDSL reads will return a stale value,
820          * which causes an apparent vblank timestamp jump when PIPEDSL
821          * resets to its proper value. That also messes up the frame count
822          * when it's derived from the timestamps. So let's wait for the
823          * pipe to start properly before we call drm_crtc_vblank_on()
824          */
825         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
826                 intel_wait_for_pipe_scanline_moving(crtc);
827 }
828
829 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
830 {
831         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
832         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
833         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
834         enum pipe pipe = crtc->pipe;
835         i915_reg_t reg;
836         u32 val;
837
838         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
839
840         /*
841          * Make sure planes won't keep trying to pump pixels to us,
842          * or we might hang the display.
843          */
844         assert_planes_disabled(crtc);
845
846         reg = PIPECONF(cpu_transcoder);
847         val = intel_de_read(dev_priv, reg);
848         if ((val & PIPECONF_ENABLE) == 0)
849                 return;
850
851         /*
852          * Double wide has implications for planes
853          * so best keep it disabled when not needed.
854          */
855         if (old_crtc_state->double_wide)
856                 val &= ~PIPECONF_DOUBLE_WIDE;
857
858         /* Don't disable pipe or pipe PLLs if needed */
859         if (!IS_I830(dev_priv))
860                 val &= ~PIPECONF_ENABLE;
861
862         if (DISPLAY_VER(dev_priv) >= 12)
863                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
864                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
865
866         intel_de_write(dev_priv, reg, val);
867         if ((val & PIPECONF_ENABLE) == 0)
868                 intel_wait_for_pipe_off(old_crtc_state);
869 }
870
871 bool
872 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
873                                     u64 modifier)
874 {
875         return info->is_yuv &&
876                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
877 }
878
879 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
880 {
881         unsigned int size = 0;
882         int i;
883
884         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
885                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
886
887         return size;
888 }
889
890 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
891 {
892         unsigned int size = 0;
893         int i;
894
895         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
896                 size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
897
898         return size;
899 }
900
901 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
902 {
903         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
904         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
905
906         return DISPLAY_VER(dev_priv) < 4 ||
907                 (plane->has_fbc &&
908                  plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
909 }
910
911 static struct i915_vma *
912 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
913                      const struct i915_ggtt_view *view,
914                      bool uses_fence,
915                      unsigned long *out_flags,
916                      struct i915_address_space *vm)
917 {
918         struct drm_device *dev = fb->dev;
919         struct drm_i915_private *dev_priv = to_i915(dev);
920         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
921         struct i915_vma *vma;
922         u32 alignment;
923         int ret;
924
925         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
926                 return ERR_PTR(-EINVAL);
927
928         alignment = 4096 * 512;
929
930         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
931
932         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
933         if (ret) {
934                 vma = ERR_PTR(ret);
935                 goto err;
936         }
937
938         vma = i915_vma_instance(obj, vm, view);
939         if (IS_ERR(vma))
940                 goto err;
941
942         if (i915_vma_misplaced(vma, 0, alignment, 0)) {
943                 ret = i915_vma_unbind(vma);
944                 if (ret) {
945                         vma = ERR_PTR(ret);
946                         goto err;
947                 }
948         }
949
950         ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
951         if (ret) {
952                 vma = ERR_PTR(ret);
953                 goto err;
954         }
955
956         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
957
958         i915_gem_object_flush_if_display(obj);
959
960         i915_vma_get(vma);
961 err:
962         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
963
964         return vma;
965 }
966
967 struct i915_vma *
968 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
969                            bool phys_cursor,
970                            const struct i915_ggtt_view *view,
971                            bool uses_fence,
972                            unsigned long *out_flags)
973 {
974         struct drm_device *dev = fb->dev;
975         struct drm_i915_private *dev_priv = to_i915(dev);
976         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
977         intel_wakeref_t wakeref;
978         struct i915_gem_ww_ctx ww;
979         struct i915_vma *vma;
980         unsigned int pinctl;
981         u32 alignment;
982         int ret;
983
984         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
985                 return ERR_PTR(-EINVAL);
986
987         if (phys_cursor)
988                 alignment = intel_cursor_alignment(dev_priv);
989         else
990                 alignment = intel_surf_alignment(fb, 0);
991         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
992                 return ERR_PTR(-EINVAL);
993
994         /* Note that the w/a also requires 64 PTE of padding following the
995          * bo. We currently fill all unused PTE with the shadow page and so
996          * we should always have valid PTE following the scanout preventing
997          * the VT-d warning.
998          */
999         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1000                 alignment = 256 * 1024;
1001
1002         /*
1003          * Global gtt pte registers are special registers which actually forward
1004          * writes to a chunk of system memory. Which means that there is no risk
1005          * that the register values disappear as soon as we call
1006          * intel_runtime_pm_put(), so it is correct to wrap only the
1007          * pin/unpin/fence and not more.
1008          */
1009         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1010
1011         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1012
1013         /*
1014          * Valleyview is definitely limited to scanning out the first
1015          * 512MiB. Lets presume this behaviour was inherited from the
1016          * g4x display engine and that all earlier gen are similarly
1017          * limited. Testing suggests that it is a little more
1018          * complicated than this. For example, Cherryview appears quite
1019          * happy to scanout from anywhere within its global aperture.
1020          */
1021         pinctl = 0;
1022         if (HAS_GMCH(dev_priv))
1023                 pinctl |= PIN_MAPPABLE;
1024
1025         i915_gem_ww_ctx_init(&ww, true);
1026 retry:
1027         ret = i915_gem_object_lock(obj, &ww);
1028         if (!ret && phys_cursor)
1029                 ret = i915_gem_object_attach_phys(obj, alignment);
1030         else if (!ret && HAS_LMEM(dev_priv))
1031                 ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1032         /* TODO: Do we need to sync when migration becomes async? */
1033         if (!ret)
1034                 ret = i915_gem_object_pin_pages(obj);
1035         if (ret)
1036                 goto err;
1037
1038         if (!ret) {
1039                 vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1040                                                            view, pinctl);
1041                 if (IS_ERR(vma)) {
1042                         ret = PTR_ERR(vma);
1043                         goto err_unpin;
1044                 }
1045         }
1046
1047         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1048                 /*
1049                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1050                  * fence, whereas 965+ only requires a fence if using
1051                  * framebuffer compression.  For simplicity, we always, when
1052                  * possible, install a fence as the cost is not that onerous.
1053                  *
1054                  * If we fail to fence the tiled scanout, then either the
1055                  * modeset will reject the change (which is highly unlikely as
1056                  * the affected systems, all but one, do not have unmappable
1057                  * space) or we will not be able to enable full powersaving
1058                  * techniques (also likely not to apply due to various limits
1059                  * FBC and the like impose on the size of the buffer, which
1060                  * presumably we violated anyway with this unmappable buffer).
1061                  * Anyway, it is presumably better to stumble onwards with
1062                  * something and try to run the system in a "less than optimal"
1063                  * mode that matches the user configuration.
1064                  */
1065                 ret = i915_vma_pin_fence(vma);
1066                 if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1067                         i915_vma_unpin(vma);
1068                         goto err_unpin;
1069                 }
1070                 ret = 0;
1071
1072                 if (vma->fence)
1073                         *out_flags |= PLANE_HAS_FENCE;
1074         }
1075
1076         i915_vma_get(vma);
1077
1078 err_unpin:
1079         i915_gem_object_unpin_pages(obj);
1080 err:
1081         if (ret == -EDEADLK) {
1082                 ret = i915_gem_ww_ctx_backoff(&ww);
1083                 if (!ret)
1084                         goto retry;
1085         }
1086         i915_gem_ww_ctx_fini(&ww);
1087         if (ret)
1088                 vma = ERR_PTR(ret);
1089
1090         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1091         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1092         return vma;
1093 }
1094
1095 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1096 {
1097         if (flags & PLANE_HAS_FENCE)
1098                 i915_vma_unpin_fence(vma);
1099         i915_vma_unpin(vma);
1100         i915_vma_put(vma);
1101 }
1102
1103 /*
1104  * Convert the x/y offsets into a linear offset.
1105  * Only valid with 0/180 degree rotation, which is fine since linear
1106  * offset is only used with linear buffers on pre-hsw and tiled buffers
1107  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1108  */
1109 u32 intel_fb_xy_to_linear(int x, int y,
1110                           const struct intel_plane_state *state,
1111                           int color_plane)
1112 {
1113         const struct drm_framebuffer *fb = state->hw.fb;
1114         unsigned int cpp = fb->format->cpp[color_plane];
1115         unsigned int pitch = state->view.color_plane[color_plane].stride;
1116
1117         return y * pitch + x * cpp;
1118 }
1119
1120 /*
1121  * Add the x/y offsets derived from fb->offsets[] to the user
1122  * specified plane src x/y offsets. The resulting x/y offsets
1123  * specify the start of scanout from the beginning of the gtt mapping.
1124  */
1125 void intel_add_fb_offsets(int *x, int *y,
1126                           const struct intel_plane_state *state,
1127                           int color_plane)
1128
1129 {
1130         *x += state->view.color_plane[color_plane].x;
1131         *y += state->view.color_plane[color_plane].y;
1132 }
1133
1134 /*
1135  * From the Sky Lake PRM:
1136  * "The Color Control Surface (CCS) contains the compression status of
1137  *  the cache-line pairs. The compression state of the cache-line pair
1138  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1139  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1140  *  cache-line-pairs. CCS is always Y tiled."
1141  *
1142  * Since cache line pairs refers to horizontally adjacent cache lines,
1143  * each cache line in the CCS corresponds to an area of 32x16 cache
1144  * lines on the main surface. Since each pixel is 4 bytes, this gives
1145  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1146  * main surface.
1147  */
1148 static const struct drm_format_info skl_ccs_formats[] = {
1149         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1150           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1151         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1152           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1153         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1154           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1155         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1156           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1157 };
1158
1159 /*
1160  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1161  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1162  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1163  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1164  * the main surface.
1165  */
1166 static const struct drm_format_info gen12_ccs_formats[] = {
1167         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1168           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1169           .hsub = 1, .vsub = 1, },
1170         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1171           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1172           .hsub = 1, .vsub = 1, },
1173         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1174           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1175           .hsub = 1, .vsub = 1, .has_alpha = true },
1176         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1177           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1178           .hsub = 1, .vsub = 1, .has_alpha = true },
1179         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1180           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1181           .hsub = 2, .vsub = 1, .is_yuv = true },
1182         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1183           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1184           .hsub = 2, .vsub = 1, .is_yuv = true },
1185         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1186           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1187           .hsub = 2, .vsub = 1, .is_yuv = true },
1188         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1189           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1190           .hsub = 2, .vsub = 1, .is_yuv = true },
1191         { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1192           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1193           .hsub = 1, .vsub = 1, .is_yuv = true },
1194         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1195           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1196           .hsub = 2, .vsub = 2, .is_yuv = true },
1197         { .format = DRM_FORMAT_P010, .num_planes = 4,
1198           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1199           .hsub = 2, .vsub = 2, .is_yuv = true },
1200         { .format = DRM_FORMAT_P012, .num_planes = 4,
1201           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1202           .hsub = 2, .vsub = 2, .is_yuv = true },
1203         { .format = DRM_FORMAT_P016, .num_planes = 4,
1204           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1205           .hsub = 2, .vsub = 2, .is_yuv = true },
1206 };
1207
1208 /*
1209  * Same as gen12_ccs_formats[] above, but with additional surface used
1210  * to pass Clear Color information in plane 2 with 64 bits of data.
1211  */
1212 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1213         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1214           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1215           .hsub = 1, .vsub = 1, },
1216         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1217           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1218           .hsub = 1, .vsub = 1, },
1219         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1220           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1221           .hsub = 1, .vsub = 1, .has_alpha = true },
1222         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1223           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1224           .hsub = 1, .vsub = 1, .has_alpha = true },
1225 };
1226
1227 static const struct drm_format_info *
1228 lookup_format_info(const struct drm_format_info formats[],
1229                    int num_formats, u32 format)
1230 {
1231         int i;
1232
1233         for (i = 0; i < num_formats; i++) {
1234                 if (formats[i].format == format)
1235                         return &formats[i];
1236         }
1237
1238         return NULL;
1239 }
1240
1241 static const struct drm_format_info *
1242 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1243 {
1244         switch (cmd->modifier[0]) {
1245         case I915_FORMAT_MOD_Y_TILED_CCS:
1246         case I915_FORMAT_MOD_Yf_TILED_CCS:
1247                 return lookup_format_info(skl_ccs_formats,
1248                                           ARRAY_SIZE(skl_ccs_formats),
1249                                           cmd->pixel_format);
1250         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1251         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1252                 return lookup_format_info(gen12_ccs_formats,
1253                                           ARRAY_SIZE(gen12_ccs_formats),
1254                                           cmd->pixel_format);
1255         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1256                 return lookup_format_info(gen12_ccs_cc_formats,
1257                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1258                                           cmd->pixel_format);
1259         default:
1260                 return NULL;
1261         }
1262 }
1263
1264 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1265                               u32 pixel_format, u64 modifier)
1266 {
1267         struct intel_crtc *crtc;
1268         struct intel_plane *plane;
1269
1270         if (!HAS_DISPLAY(dev_priv))
1271                 return 0;
1272
1273         /*
1274          * We assume the primary plane for pipe A has
1275          * the highest stride limits of them all,
1276          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1277          */
1278         crtc = intel_get_first_crtc(dev_priv);
1279         if (!crtc)
1280                 return 0;
1281
1282         plane = to_intel_plane(crtc->base.primary);
1283
1284         return plane->max_stride(plane, pixel_format, modifier,
1285                                  DRM_MODE_ROTATE_0);
1286 }
1287
1288 static struct i915_vma *
1289 initial_plane_vma(struct drm_i915_private *i915,
1290                   struct intel_initial_plane_config *plane_config)
1291 {
1292         struct drm_i915_gem_object *obj;
1293         struct i915_vma *vma;
1294         u32 base, size;
1295
1296         if (plane_config->size == 0)
1297                 return NULL;
1298
1299         base = round_down(plane_config->base,
1300                           I915_GTT_MIN_ALIGNMENT);
1301         size = round_up(plane_config->base + plane_config->size,
1302                         I915_GTT_MIN_ALIGNMENT);
1303         size -= base;
1304
1305         /*
1306          * If the FB is too big, just don't use it since fbdev is not very
1307          * important and we should probably use that space with FBC or other
1308          * features.
1309          */
1310         if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1311             size * 2 > i915->stolen_usable_size)
1312                 return NULL;
1313
1314         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1315         if (IS_ERR(obj))
1316                 return NULL;
1317
1318         /*
1319          * Mark it WT ahead of time to avoid changing the
1320          * cache_level during fbdev initialization. The
1321          * unbind there would get stuck waiting for rcu.
1322          */
1323         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1324                                             I915_CACHE_WT : I915_CACHE_NONE);
1325
1326         switch (plane_config->tiling) {
1327         case I915_TILING_NONE:
1328                 break;
1329         case I915_TILING_X:
1330         case I915_TILING_Y:
1331                 obj->tiling_and_stride =
1332                         plane_config->fb->base.pitches[0] |
1333                         plane_config->tiling;
1334                 break;
1335         default:
1336                 MISSING_CASE(plane_config->tiling);
1337                 goto err_obj;
1338         }
1339
1340         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1341         if (IS_ERR(vma))
1342                 goto err_obj;
1343
1344         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1345                 goto err_obj;
1346
1347         if (i915_gem_object_is_tiled(obj) &&
1348             !i915_vma_is_map_and_fenceable(vma))
1349                 goto err_obj;
1350
1351         return vma;
1352
1353 err_obj:
1354         i915_gem_object_put(obj);
1355         return NULL;
1356 }
1357
1358 static bool
1359 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1360                               struct intel_initial_plane_config *plane_config)
1361 {
1362         struct drm_device *dev = crtc->base.dev;
1363         struct drm_i915_private *dev_priv = to_i915(dev);
1364         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1365         struct drm_framebuffer *fb = &plane_config->fb->base;
1366         struct i915_vma *vma;
1367
1368         switch (fb->modifier) {
1369         case DRM_FORMAT_MOD_LINEAR:
1370         case I915_FORMAT_MOD_X_TILED:
1371         case I915_FORMAT_MOD_Y_TILED:
1372                 break;
1373         default:
1374                 drm_dbg(&dev_priv->drm,
1375                         "Unsupported modifier for initial FB: 0x%llx\n",
1376                         fb->modifier);
1377                 return false;
1378         }
1379
1380         vma = initial_plane_vma(dev_priv, plane_config);
1381         if (!vma)
1382                 return false;
1383
1384         mode_cmd.pixel_format = fb->format->format;
1385         mode_cmd.width = fb->width;
1386         mode_cmd.height = fb->height;
1387         mode_cmd.pitches[0] = fb->pitches[0];
1388         mode_cmd.modifier[0] = fb->modifier;
1389         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1390
1391         if (intel_framebuffer_init(to_intel_framebuffer(fb),
1392                                    vma->obj, &mode_cmd)) {
1393                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1394                 goto err_vma;
1395         }
1396
1397         plane_config->vma = vma;
1398         return true;
1399
1400 err_vma:
1401         i915_vma_put(vma);
1402         return false;
1403 }
1404
1405 static void
1406 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1407                         struct intel_plane_state *plane_state,
1408                         bool visible)
1409 {
1410         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1411
1412         plane_state->uapi.visible = visible;
1413
1414         if (visible)
1415                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1416         else
1417                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1418 }
1419
1420 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1421 {
1422         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1423         struct drm_plane *plane;
1424
1425         /*
1426          * Active_planes aliases if multiple "primary" or cursor planes
1427          * have been used on the same (or wrong) pipe. plane_mask uses
1428          * unique ids, hence we can use that to reconstruct active_planes.
1429          */
1430         crtc_state->enabled_planes = 0;
1431         crtc_state->active_planes = 0;
1432
1433         drm_for_each_plane_mask(plane, &dev_priv->drm,
1434                                 crtc_state->uapi.plane_mask) {
1435                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1436                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1437         }
1438 }
1439
1440 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1441                                          struct intel_plane *plane)
1442 {
1443         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1444         struct intel_crtc_state *crtc_state =
1445                 to_intel_crtc_state(crtc->base.state);
1446         struct intel_plane_state *plane_state =
1447                 to_intel_plane_state(plane->base.state);
1448
1449         drm_dbg_kms(&dev_priv->drm,
1450                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1451                     plane->base.base.id, plane->base.name,
1452                     crtc->base.base.id, crtc->base.name);
1453
1454         intel_set_plane_visible(crtc_state, plane_state, false);
1455         fixup_plane_bitmasks(crtc_state);
1456         crtc_state->data_rate[plane->id] = 0;
1457         crtc_state->min_cdclk[plane->id] = 0;
1458
1459         if (plane->id == PLANE_PRIMARY)
1460                 hsw_disable_ips(crtc_state);
1461
1462         /*
1463          * Vblank time updates from the shadow to live plane control register
1464          * are blocked if the memory self-refresh mode is active at that
1465          * moment. So to make sure the plane gets truly disabled, disable
1466          * first the self-refresh mode. The self-refresh enable bit in turn
1467          * will be checked/applied by the HW only at the next frame start
1468          * event which is after the vblank start event, so we need to have a
1469          * wait-for-vblank between disabling the plane and the pipe.
1470          */
1471         if (HAS_GMCH(dev_priv) &&
1472             intel_set_memory_cxsr(dev_priv, false))
1473                 intel_wait_for_vblank(dev_priv, crtc->pipe);
1474
1475         /*
1476          * Gen2 reports pipe underruns whenever all planes are disabled.
1477          * So disable underrun reporting before all the planes get disabled.
1478          */
1479         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1480                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1481
1482         intel_disable_plane(plane, crtc_state);
1483         intel_wait_for_vblank(dev_priv, crtc->pipe);
1484 }
1485
1486 static bool
1487 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1488                               const struct intel_initial_plane_config *plane_config,
1489                               struct drm_framebuffer **fb,
1490                               struct i915_vma **vma)
1491 {
1492         struct intel_crtc *crtc;
1493
1494         for_each_intel_crtc(&i915->drm, crtc) {
1495                 struct intel_crtc_state *crtc_state =
1496                         to_intel_crtc_state(crtc->base.state);
1497                 struct intel_plane *plane =
1498                         to_intel_plane(crtc->base.primary);
1499                 struct intel_plane_state *plane_state =
1500                         to_intel_plane_state(plane->base.state);
1501
1502                 if (!crtc_state->uapi.active)
1503                         continue;
1504
1505                 if (!plane_state->ggtt_vma)
1506                         continue;
1507
1508                 if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1509                         *fb = plane_state->hw.fb;
1510                         *vma = plane_state->ggtt_vma;
1511                         return true;
1512                 }
1513         }
1514
1515         return false;
1516 }
1517
1518 static void
1519 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1520                              struct intel_initial_plane_config *plane_config)
1521 {
1522         struct drm_device *dev = crtc->base.dev;
1523         struct drm_i915_private *dev_priv = to_i915(dev);
1524         struct intel_crtc_state *crtc_state =
1525                 to_intel_crtc_state(crtc->base.state);
1526         struct intel_plane *plane =
1527                 to_intel_plane(crtc->base.primary);
1528         struct intel_plane_state *plane_state =
1529                 to_intel_plane_state(plane->base.state);
1530         struct drm_framebuffer *fb;
1531         struct i915_vma *vma;
1532
1533         /*
1534          * TODO:
1535          *   Disable planes if get_initial_plane_config() failed.
1536          *   Make sure things work if the surface base is not page aligned.
1537          */
1538         if (!plane_config->fb)
1539                 return;
1540
1541         if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1542                 fb = &plane_config->fb->base;
1543                 vma = plane_config->vma;
1544                 goto valid_fb;
1545         }
1546
1547         /*
1548          * Failed to alloc the obj, check to see if we should share
1549          * an fb with another CRTC instead
1550          */
1551         if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1552                 goto valid_fb;
1553
1554         /*
1555          * We've failed to reconstruct the BIOS FB.  Current display state
1556          * indicates that the primary plane is visible, but has a NULL FB,
1557          * which will lead to problems later if we don't fix it up.  The
1558          * simplest solution is to just disable the primary plane now and
1559          * pretend the BIOS never had it enabled.
1560          */
1561         intel_plane_disable_noatomic(crtc, plane);
1562         if (crtc_state->bigjoiner) {
1563                 struct intel_crtc *slave =
1564                         crtc_state->bigjoiner_linked_crtc;
1565                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1566         }
1567
1568         return;
1569
1570 valid_fb:
1571         plane_state->uapi.rotation = plane_config->rotation;
1572         intel_fb_fill_view(to_intel_framebuffer(fb),
1573                            plane_state->uapi.rotation, &plane_state->view);
1574
1575         __i915_vma_pin(vma);
1576         plane_state->ggtt_vma = i915_vma_get(vma);
1577         if (intel_plane_uses_fence(plane_state) &&
1578             i915_vma_pin_fence(vma) == 0 && vma->fence)
1579                 plane_state->flags |= PLANE_HAS_FENCE;
1580
1581         plane_state->uapi.src_x = 0;
1582         plane_state->uapi.src_y = 0;
1583         plane_state->uapi.src_w = fb->width << 16;
1584         plane_state->uapi.src_h = fb->height << 16;
1585
1586         plane_state->uapi.crtc_x = 0;
1587         plane_state->uapi.crtc_y = 0;
1588         plane_state->uapi.crtc_w = fb->width;
1589         plane_state->uapi.crtc_h = fb->height;
1590
1591         if (plane_config->tiling)
1592                 dev_priv->preserve_bios_swizzle = true;
1593
1594         plane_state->uapi.fb = fb;
1595         drm_framebuffer_get(fb);
1596
1597         plane_state->uapi.crtc = &crtc->base;
1598         intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
1599
1600         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1601
1602         atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
1603 }
1604
1605 unsigned int
1606 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1607 {
1608         int x = 0, y = 0;
1609
1610         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1611                                           plane_state->view.color_plane[0].offset, 0);
1612
1613         return y;
1614 }
1615
1616 static int
1617 __intel_display_resume(struct drm_device *dev,
1618                        struct drm_atomic_state *state,
1619                        struct drm_modeset_acquire_ctx *ctx)
1620 {
1621         struct drm_crtc_state *crtc_state;
1622         struct drm_crtc *crtc;
1623         int i, ret;
1624
1625         intel_modeset_setup_hw_state(dev, ctx);
1626         intel_vga_redisable(to_i915(dev));
1627
1628         if (!state)
1629                 return 0;
1630
1631         /*
1632          * We've duplicated the state, pointers to the old state are invalid.
1633          *
1634          * Don't attempt to use the old state until we commit the duplicated state.
1635          */
1636         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1637                 /*
1638                  * Force recalculation even if we restore
1639                  * current state. With fast modeset this may not result
1640                  * in a modeset when the state is compatible.
1641                  */
1642                 crtc_state->mode_changed = true;
1643         }
1644
1645         /* ignore any reset values/BIOS leftovers in the WM registers */
1646         if (!HAS_GMCH(to_i915(dev)))
1647                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
1648
1649         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1650
1651         drm_WARN_ON(dev, ret == -EDEADLK);
1652         return ret;
1653 }
1654
1655 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1656 {
1657         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1658                 intel_has_gpu_reset(&dev_priv->gt));
1659 }
1660
1661 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1662 {
1663         struct drm_device *dev = &dev_priv->drm;
1664         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1665         struct drm_atomic_state *state;
1666         int ret;
1667
1668         if (!HAS_DISPLAY(dev_priv))
1669                 return;
1670
1671         /* reset doesn't touch the display */
1672         if (!dev_priv->params.force_reset_modeset_test &&
1673             !gpu_reset_clobbers_display(dev_priv))
1674                 return;
1675
1676         /* We have a modeset vs reset deadlock, defensively unbreak it. */
1677         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1678         smp_mb__after_atomic();
1679         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1680
1681         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1682                 drm_dbg_kms(&dev_priv->drm,
1683                             "Modeset potentially stuck, unbreaking through wedging\n");
1684                 intel_gt_set_wedged(&dev_priv->gt);
1685         }
1686
1687         /*
1688          * Need mode_config.mutex so that we don't
1689          * trample ongoing ->detect() and whatnot.
1690          */
1691         mutex_lock(&dev->mode_config.mutex);
1692         drm_modeset_acquire_init(ctx, 0);
1693         while (1) {
1694                 ret = drm_modeset_lock_all_ctx(dev, ctx);
1695                 if (ret != -EDEADLK)
1696                         break;
1697
1698                 drm_modeset_backoff(ctx);
1699         }
1700         /*
1701          * Disabling the crtcs gracefully seems nicer. Also the
1702          * g33 docs say we should at least disable all the planes.
1703          */
1704         state = drm_atomic_helper_duplicate_state(dev, ctx);
1705         if (IS_ERR(state)) {
1706                 ret = PTR_ERR(state);
1707                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1708                         ret);
1709                 return;
1710         }
1711
1712         ret = drm_atomic_helper_disable_all(dev, ctx);
1713         if (ret) {
1714                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1715                         ret);
1716                 drm_atomic_state_put(state);
1717                 return;
1718         }
1719
1720         dev_priv->modeset_restore_state = state;
1721         state->acquire_ctx = ctx;
1722 }
1723
1724 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1725 {
1726         struct drm_device *dev = &dev_priv->drm;
1727         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1728         struct drm_atomic_state *state;
1729         int ret;
1730
1731         if (!HAS_DISPLAY(dev_priv))
1732                 return;
1733
1734         /* reset doesn't touch the display */
1735         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1736                 return;
1737
1738         state = fetch_and_zero(&dev_priv->modeset_restore_state);
1739         if (!state)
1740                 goto unlock;
1741
1742         /* reset doesn't touch the display */
1743         if (!gpu_reset_clobbers_display(dev_priv)) {
1744                 /* for testing only restore the display */
1745                 ret = __intel_display_resume(dev, state, ctx);
1746                 if (ret)
1747                         drm_err(&dev_priv->drm,
1748                                 "Restoring old state failed with %i\n", ret);
1749         } else {
1750                 /*
1751                  * The display has been reset as well,
1752                  * so need a full re-initialization.
1753                  */
1754                 intel_pps_unlock_regs_wa(dev_priv);
1755                 intel_modeset_init_hw(dev_priv);
1756                 intel_init_clock_gating(dev_priv);
1757                 intel_hpd_init(dev_priv);
1758
1759                 ret = __intel_display_resume(dev, state, ctx);
1760                 if (ret)
1761                         drm_err(&dev_priv->drm,
1762                                 "Restoring old state failed with %i\n", ret);
1763
1764                 intel_hpd_poll_disable(dev_priv);
1765         }
1766
1767         drm_atomic_state_put(state);
1768 unlock:
1769         drm_modeset_drop_locks(ctx);
1770         drm_modeset_acquire_fini(ctx);
1771         mutex_unlock(&dev->mode_config.mutex);
1772
1773         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1774 }
1775
1776 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
1777 {
1778         if (crtc_state->pch_pfit.enabled &&
1779             (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
1780              crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
1781              crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
1782                 return false;
1783
1784         if (crtc_state->dsc.compression_enable)
1785                 return false;
1786
1787         if (crtc_state->has_psr2)
1788                 return false;
1789
1790         if (crtc_state->splitter.enable)
1791                 return false;
1792
1793         return true;
1794 }
1795
1796 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
1797 {
1798         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1800         enum pipe pipe = crtc->pipe;
1801         u32 tmp;
1802
1803         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1804
1805         /*
1806          * Display WA #1153: icl
1807          * enable hardware to bypass the alpha math
1808          * and rounding for per-pixel values 00 and 0xff
1809          */
1810         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1811         /*
1812          * Display WA # 1605353570: icl
1813          * Set the pixel rounding bit to 1 for allowing
1814          * passthrough of Frame buffer pixels unmodified
1815          * across pipe
1816          */
1817         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1818
1819         if (IS_DG2(dev_priv)) {
1820                 /*
1821                  * Underrun recovery must always be disabled on DG2.  However
1822                  * the chicken bit meaning is inverted compared to other
1823                  * platforms.
1824                  */
1825                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1826         } else if (DISPLAY_VER(dev_priv) >= 13) {
1827                 if (underrun_recovery_supported(crtc_state))
1828                         tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
1829                 else
1830                         tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1831         }
1832
1833         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1834 }
1835
1836 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1837 {
1838         struct drm_crtc *crtc;
1839         bool cleanup_done;
1840
1841         drm_for_each_crtc(crtc, &dev_priv->drm) {
1842                 struct drm_crtc_commit *commit;
1843                 spin_lock(&crtc->commit_lock);
1844                 commit = list_first_entry_or_null(&crtc->commit_list,
1845                                                   struct drm_crtc_commit, commit_entry);
1846                 cleanup_done = commit ?
1847                         try_wait_for_completion(&commit->cleanup_done) : true;
1848                 spin_unlock(&crtc->commit_lock);
1849
1850                 if (cleanup_done)
1851                         continue;
1852
1853                 drm_crtc_wait_one_vblank(crtc);
1854
1855                 return true;
1856         }
1857
1858         return false;
1859 }
1860
1861 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1862 {
1863         u32 temp;
1864
1865         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1866
1867         mutex_lock(&dev_priv->sb_lock);
1868
1869         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1870         temp |= SBI_SSCCTL_DISABLE;
1871         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1872
1873         mutex_unlock(&dev_priv->sb_lock);
1874 }
1875
1876 /* Program iCLKIP clock to the desired frequency */
1877 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1878 {
1879         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1880         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1881         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1882         u32 divsel, phaseinc, auxdiv, phasedir = 0;
1883         u32 temp;
1884
1885         lpt_disable_iclkip(dev_priv);
1886
1887         /* The iCLK virtual clock root frequency is in MHz,
1888          * but the adjusted_mode->crtc_clock in in KHz. To get the
1889          * divisors, it is necessary to divide one by another, so we
1890          * convert the virtual clock precision to KHz here for higher
1891          * precision.
1892          */
1893         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1894                 u32 iclk_virtual_root_freq = 172800 * 1000;
1895                 u32 iclk_pi_range = 64;
1896                 u32 desired_divisor;
1897
1898                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1899                                                     clock << auxdiv);
1900                 divsel = (desired_divisor / iclk_pi_range) - 2;
1901                 phaseinc = desired_divisor % iclk_pi_range;
1902
1903                 /*
1904                  * Near 20MHz is a corner case which is
1905                  * out of range for the 7-bit divisor
1906                  */
1907                 if (divsel <= 0x7f)
1908                         break;
1909         }
1910
1911         /* This should not happen with any sane values */
1912         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1913                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1914         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1915                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
1916
1917         drm_dbg_kms(&dev_priv->drm,
1918                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
1919                     clock, auxdiv, divsel, phasedir, phaseinc);
1920
1921         mutex_lock(&dev_priv->sb_lock);
1922
1923         /* Program SSCDIVINTPHASE6 */
1924         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
1925         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
1926         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
1927         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
1928         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
1929         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
1930         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
1931         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
1932
1933         /* Program SSCAUXDIV */
1934         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
1935         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
1936         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
1937         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
1938
1939         /* Enable modulator and associated divider */
1940         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1941         temp &= ~SBI_SSCCTL_DISABLE;
1942         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1943
1944         mutex_unlock(&dev_priv->sb_lock);
1945
1946         /* Wait for initialization time */
1947         udelay(24);
1948
1949         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
1950 }
1951
1952 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
1953 {
1954         u32 divsel, phaseinc, auxdiv;
1955         u32 iclk_virtual_root_freq = 172800 * 1000;
1956         u32 iclk_pi_range = 64;
1957         u32 desired_divisor;
1958         u32 temp;
1959
1960         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
1961                 return 0;
1962
1963         mutex_lock(&dev_priv->sb_lock);
1964
1965         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1966         if (temp & SBI_SSCCTL_DISABLE) {
1967                 mutex_unlock(&dev_priv->sb_lock);
1968                 return 0;
1969         }
1970
1971         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
1972         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
1973                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
1974         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
1975                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
1976
1977         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
1978         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
1979                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
1980
1981         mutex_unlock(&dev_priv->sb_lock);
1982
1983         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
1984
1985         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1986                                  desired_divisor << auxdiv);
1987 }
1988
1989 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
1990                                            enum pipe pch_transcoder)
1991 {
1992         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1993         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1994         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1995
1996         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
1997                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
1998         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
1999                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2000         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2001                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2002
2003         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2004                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2005         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2006                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2007         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2008                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2009         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2010                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2011 }
2012
2013 /*
2014  * Finds the encoder associated with the given CRTC. This can only be
2015  * used when we know that the CRTC isn't feeding multiple encoders!
2016  */
2017 struct intel_encoder *
2018 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2019                            const struct intel_crtc_state *crtc_state)
2020 {
2021         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2022         const struct drm_connector_state *connector_state;
2023         const struct drm_connector *connector;
2024         struct intel_encoder *encoder = NULL;
2025         int num_encoders = 0;
2026         int i;
2027
2028         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2029                 if (connector_state->crtc != &crtc->base)
2030                         continue;
2031
2032                 encoder = to_intel_encoder(connector_state->best_encoder);
2033                 num_encoders++;
2034         }
2035
2036         drm_WARN(encoder->base.dev, num_encoders != 1,
2037                  "%d encoders for pipe %c\n",
2038                  num_encoders, pipe_name(crtc->pipe));
2039
2040         return encoder;
2041 }
2042
2043 /*
2044  * Enable PCH resources required for PCH ports:
2045  *   - PCH PLLs
2046  *   - FDI training & RX/TX
2047  *   - update transcoder timings
2048  *   - DP transcoding bits
2049  *   - transcoder
2050  */
2051 static void ilk_pch_enable(const struct intel_atomic_state *state,
2052                            const struct intel_crtc_state *crtc_state)
2053 {
2054         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2055         struct drm_device *dev = crtc->base.dev;
2056         struct drm_i915_private *dev_priv = to_i915(dev);
2057         enum pipe pipe = crtc->pipe;
2058         u32 temp;
2059
2060         assert_pch_transcoder_disabled(dev_priv, pipe);
2061
2062         if (IS_IVYBRIDGE(dev_priv))
2063                 ivb_update_fdi_bc_bifurcation(crtc_state);
2064
2065         /* Write the TU size bits before fdi link training, so that error
2066          * detection works. */
2067         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2068                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2069
2070         /* For PCH output, training FDI link */
2071         dev_priv->display.fdi_link_train(crtc, crtc_state);
2072
2073         /* We need to program the right clock selection before writing the pixel
2074          * mutliplier into the DPLL. */
2075         if (HAS_PCH_CPT(dev_priv)) {
2076                 u32 sel;
2077
2078                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2079                 temp |= TRANS_DPLL_ENABLE(pipe);
2080                 sel = TRANS_DPLLB_SEL(pipe);
2081                 if (crtc_state->shared_dpll ==
2082                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2083                         temp |= sel;
2084                 else
2085                         temp &= ~sel;
2086                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2087         }
2088
2089         /* XXX: pch pll's can be enabled any time before we enable the PCH
2090          * transcoder, and we actually should do this to not upset any PCH
2091          * transcoder that already use the clock when we share it.
2092          *
2093          * Note that enable_shared_dpll tries to do the right thing, but
2094          * get_shared_dpll unconditionally resets the pll - we need that to have
2095          * the right LVDS enable sequence. */
2096         intel_enable_shared_dpll(crtc_state);
2097
2098         /* set transcoder timing, panel must allow it */
2099         assert_panel_unlocked(dev_priv, pipe);
2100         ilk_pch_transcoder_set_timings(crtc_state, pipe);
2101
2102         intel_fdi_normal_train(crtc);
2103
2104         /* For PCH DP, enable TRANS_DP_CTL */
2105         if (HAS_PCH_CPT(dev_priv) &&
2106             intel_crtc_has_dp_encoder(crtc_state)) {
2107                 const struct drm_display_mode *adjusted_mode =
2108                         &crtc_state->hw.adjusted_mode;
2109                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2110                 i915_reg_t reg = TRANS_DP_CTL(pipe);
2111                 enum port port;
2112
2113                 temp = intel_de_read(dev_priv, reg);
2114                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2115                           TRANS_DP_SYNC_MASK |
2116                           TRANS_DP_BPC_MASK);
2117                 temp |= TRANS_DP_OUTPUT_ENABLE;
2118                 temp |= bpc << 9; /* same format but at 11:9 */
2119
2120                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2121                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2122                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2123                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2124
2125                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2126                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2127                 temp |= TRANS_DP_PORT_SEL(port);
2128
2129                 intel_de_write(dev_priv, reg, temp);
2130         }
2131
2132         ilk_enable_pch_transcoder(crtc_state);
2133 }
2134
2135 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2136 {
2137         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2138         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2139         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2140
2141         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2142
2143         lpt_program_iclkip(crtc_state);
2144
2145         /* Set transcoder timing. */
2146         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2147
2148         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2149 }
2150
2151 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2152                                enum pipe pipe)
2153 {
2154         i915_reg_t dslreg = PIPEDSL(pipe);
2155         u32 temp;
2156
2157         temp = intel_de_read(dev_priv, dslreg);
2158         udelay(500);
2159         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2160                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2161                         drm_err(&dev_priv->drm,
2162                                 "mode set failed: pipe %c stuck\n",
2163                                 pipe_name(pipe));
2164         }
2165 }
2166
2167 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2168 {
2169         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2170         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2171         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2172         enum pipe pipe = crtc->pipe;
2173         int width = drm_rect_width(dst);
2174         int height = drm_rect_height(dst);
2175         int x = dst->x1;
2176         int y = dst->y1;
2177
2178         if (!crtc_state->pch_pfit.enabled)
2179                 return;
2180
2181         /* Force use of hard-coded filter coefficients
2182          * as some pre-programmed values are broken,
2183          * e.g. x201.
2184          */
2185         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2186                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2187                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2188         else
2189                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2190                                PF_FILTER_MED_3x3);
2191         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2192         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2193 }
2194
2195 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2196 {
2197         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2198         struct drm_device *dev = crtc->base.dev;
2199         struct drm_i915_private *dev_priv = to_i915(dev);
2200
2201         if (!crtc_state->ips_enabled)
2202                 return;
2203
2204         /*
2205          * We can only enable IPS after we enable a plane and wait for a vblank
2206          * This function is called from post_plane_update, which is run after
2207          * a vblank wait.
2208          */
2209         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2210
2211         if (IS_BROADWELL(dev_priv)) {
2212                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2213                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
2214                 /* Quoting Art Runyan: "its not safe to expect any particular
2215                  * value in IPS_CTL bit 31 after enabling IPS through the
2216                  * mailbox." Moreover, the mailbox may return a bogus state,
2217                  * so we need to just enable it and continue on.
2218                  */
2219         } else {
2220                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2221                 /* The bit only becomes 1 in the next vblank, so this wait here
2222                  * is essentially intel_wait_for_vblank. If we don't have this
2223                  * and don't wait for vblanks until the end of crtc_enable, then
2224                  * the HW state readout code will complain that the expected
2225                  * IPS_CTL value is not the one we read. */
2226                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2227                         drm_err(&dev_priv->drm,
2228                                 "Timed out waiting for IPS enable\n");
2229         }
2230 }
2231
2232 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2233 {
2234         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2235         struct drm_device *dev = crtc->base.dev;
2236         struct drm_i915_private *dev_priv = to_i915(dev);
2237
2238         if (!crtc_state->ips_enabled)
2239                 return;
2240
2241         if (IS_BROADWELL(dev_priv)) {
2242                 drm_WARN_ON(dev,
2243                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2244                 /*
2245                  * Wait for PCODE to finish disabling IPS. The BSpec specified
2246                  * 42ms timeout value leads to occasional timeouts so use 100ms
2247                  * instead.
2248                  */
2249                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2250                         drm_err(&dev_priv->drm,
2251                                 "Timed out waiting for IPS disable\n");
2252         } else {
2253                 intel_de_write(dev_priv, IPS_CTL, 0);
2254                 intel_de_posting_read(dev_priv, IPS_CTL);
2255         }
2256
2257         /* We need to wait for a vblank before we can disable the plane. */
2258         intel_wait_for_vblank(dev_priv, crtc->pipe);
2259 }
2260
2261 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2262 {
2263         if (crtc->overlay)
2264                 (void) intel_overlay_switch_off(crtc->overlay);
2265
2266         /* Let userspace switch the overlay on again. In most cases userspace
2267          * has to recompute where to put it anyway.
2268          */
2269 }
2270
2271 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2272                                        const struct intel_crtc_state *new_crtc_state)
2273 {
2274         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2275         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2276
2277         if (!old_crtc_state->ips_enabled)
2278                 return false;
2279
2280         if (intel_crtc_needs_modeset(new_crtc_state))
2281                 return true;
2282
2283         /*
2284          * Workaround : Do not read or write the pipe palette/gamma data while
2285          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2286          *
2287          * Disable IPS before we program the LUT.
2288          */
2289         if (IS_HASWELL(dev_priv) &&
2290             (new_crtc_state->uapi.color_mgmt_changed ||
2291              new_crtc_state->update_pipe) &&
2292             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2293                 return true;
2294
2295         return !new_crtc_state->ips_enabled;
2296 }
2297
2298 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2299                                        const struct intel_crtc_state *new_crtc_state)
2300 {
2301         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303
2304         if (!new_crtc_state->ips_enabled)
2305                 return false;
2306
2307         if (intel_crtc_needs_modeset(new_crtc_state))
2308                 return true;
2309
2310         /*
2311          * Workaround : Do not read or write the pipe palette/gamma data while
2312          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2313          *
2314          * Re-enable IPS after the LUT has been programmed.
2315          */
2316         if (IS_HASWELL(dev_priv) &&
2317             (new_crtc_state->uapi.color_mgmt_changed ||
2318              new_crtc_state->update_pipe) &&
2319             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2320                 return true;
2321
2322         /*
2323          * We can't read out IPS on broadwell, assume the worst and
2324          * forcibly enable IPS on the first fastset.
2325          */
2326         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2327                 return true;
2328
2329         return !old_crtc_state->ips_enabled;
2330 }
2331
2332 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2333 {
2334         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2335
2336         if (!crtc_state->nv12_planes)
2337                 return false;
2338
2339         /* WA Display #0827: Gen9:all */
2340         if (DISPLAY_VER(dev_priv) == 9)
2341                 return true;
2342
2343         return false;
2344 }
2345
2346 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2347 {
2348         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2349
2350         /* Wa_2006604312:icl,ehl */
2351         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2352                 return true;
2353
2354         return false;
2355 }
2356
2357 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2358                             const struct intel_crtc_state *new_crtc_state)
2359 {
2360         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2361                 new_crtc_state->active_planes;
2362 }
2363
2364 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2365                              const struct intel_crtc_state *new_crtc_state)
2366 {
2367         return old_crtc_state->active_planes &&
2368                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2369 }
2370
2371 static void intel_post_plane_update(struct intel_atomic_state *state,
2372                                     struct intel_crtc *crtc)
2373 {
2374         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2375         const struct intel_crtc_state *old_crtc_state =
2376                 intel_atomic_get_old_crtc_state(state, crtc);
2377         const struct intel_crtc_state *new_crtc_state =
2378                 intel_atomic_get_new_crtc_state(state, crtc);
2379         enum pipe pipe = crtc->pipe;
2380
2381         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2382
2383         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2384                 intel_update_watermarks(crtc);
2385
2386         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2387                 hsw_enable_ips(new_crtc_state);
2388
2389         intel_fbc_post_update(state, crtc);
2390
2391         if (needs_nv12_wa(old_crtc_state) &&
2392             !needs_nv12_wa(new_crtc_state))
2393                 skl_wa_827(dev_priv, pipe, false);
2394
2395         if (needs_scalerclk_wa(old_crtc_state) &&
2396             !needs_scalerclk_wa(new_crtc_state))
2397                 icl_wa_scalerclkgating(dev_priv, pipe, false);
2398 }
2399
2400 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2401                                         struct intel_crtc *crtc)
2402 {
2403         const struct intel_crtc_state *crtc_state =
2404                 intel_atomic_get_new_crtc_state(state, crtc);
2405         u8 update_planes = crtc_state->update_planes;
2406         const struct intel_plane_state *plane_state;
2407         struct intel_plane *plane;
2408         int i;
2409
2410         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2411                 if (plane->enable_flip_done &&
2412                     plane->pipe == crtc->pipe &&
2413                     update_planes & BIT(plane->id))
2414                         plane->enable_flip_done(plane);
2415         }
2416 }
2417
2418 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2419                                          struct intel_crtc *crtc)
2420 {
2421         const struct intel_crtc_state *crtc_state =
2422                 intel_atomic_get_new_crtc_state(state, crtc);
2423         u8 update_planes = crtc_state->update_planes;
2424         const struct intel_plane_state *plane_state;
2425         struct intel_plane *plane;
2426         int i;
2427
2428         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2429                 if (plane->disable_flip_done &&
2430                     plane->pipe == crtc->pipe &&
2431                     update_planes & BIT(plane->id))
2432                         plane->disable_flip_done(plane);
2433         }
2434 }
2435
2436 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2437                                              struct intel_crtc *crtc)
2438 {
2439         struct drm_i915_private *i915 = to_i915(state->base.dev);
2440         const struct intel_crtc_state *old_crtc_state =
2441                 intel_atomic_get_old_crtc_state(state, crtc);
2442         const struct intel_crtc_state *new_crtc_state =
2443                 intel_atomic_get_new_crtc_state(state, crtc);
2444         u8 update_planes = new_crtc_state->update_planes;
2445         const struct intel_plane_state *old_plane_state;
2446         struct intel_plane *plane;
2447         bool need_vbl_wait = false;
2448         int i;
2449
2450         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2451                 if (plane->need_async_flip_disable_wa &&
2452                     plane->pipe == crtc->pipe &&
2453                     update_planes & BIT(plane->id)) {
2454                         /*
2455                          * Apart from the async flip bit we want to
2456                          * preserve the old state for the plane.
2457                          */
2458                         plane->async_flip(plane, old_crtc_state,
2459                                           old_plane_state, false);
2460                         need_vbl_wait = true;
2461                 }
2462         }
2463
2464         if (need_vbl_wait)
2465                 intel_wait_for_vblank(i915, crtc->pipe);
2466 }
2467
2468 static void intel_pre_plane_update(struct intel_atomic_state *state,
2469                                    struct intel_crtc *crtc)
2470 {
2471         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2472         const struct intel_crtc_state *old_crtc_state =
2473                 intel_atomic_get_old_crtc_state(state, crtc);
2474         const struct intel_crtc_state *new_crtc_state =
2475                 intel_atomic_get_new_crtc_state(state, crtc);
2476         enum pipe pipe = crtc->pipe;
2477
2478         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2479                 hsw_disable_ips(old_crtc_state);
2480
2481         if (intel_fbc_pre_update(state, crtc))
2482                 intel_wait_for_vblank(dev_priv, pipe);
2483
2484         /* Display WA 827 */
2485         if (!needs_nv12_wa(old_crtc_state) &&
2486             needs_nv12_wa(new_crtc_state))
2487                 skl_wa_827(dev_priv, pipe, true);
2488
2489         /* Wa_2006604312:icl,ehl */
2490         if (!needs_scalerclk_wa(old_crtc_state) &&
2491             needs_scalerclk_wa(new_crtc_state))
2492                 icl_wa_scalerclkgating(dev_priv, pipe, true);
2493
2494         /*
2495          * Vblank time updates from the shadow to live plane control register
2496          * are blocked if the memory self-refresh mode is active at that
2497          * moment. So to make sure the plane gets truly disabled, disable
2498          * first the self-refresh mode. The self-refresh enable bit in turn
2499          * will be checked/applied by the HW only at the next frame start
2500          * event which is after the vblank start event, so we need to have a
2501          * wait-for-vblank between disabling the plane and the pipe.
2502          */
2503         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2504             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2505                 intel_wait_for_vblank(dev_priv, pipe);
2506
2507         /*
2508          * IVB workaround: must disable low power watermarks for at least
2509          * one frame before enabling scaling.  LP watermarks can be re-enabled
2510          * when scaling is disabled.
2511          *
2512          * WaCxSRDisabledForSpriteScaling:ivb
2513          */
2514         if (old_crtc_state->hw.active &&
2515             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2516                 intel_wait_for_vblank(dev_priv, pipe);
2517
2518         /*
2519          * If we're doing a modeset we don't need to do any
2520          * pre-vblank watermark programming here.
2521          */
2522         if (!intel_crtc_needs_modeset(new_crtc_state)) {
2523                 /*
2524                  * For platforms that support atomic watermarks, program the
2525                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2526                  * will be the intermediate values that are safe for both pre- and
2527                  * post- vblank; when vblank happens, the 'active' values will be set
2528                  * to the final 'target' values and we'll do this again to get the
2529                  * optimal watermarks.  For gen9+ platforms, the values we program here
2530                  * will be the final target values which will get automatically latched
2531                  * at vblank time; no further programming will be necessary.
2532                  *
2533                  * If a platform hasn't been transitioned to atomic watermarks yet,
2534                  * we'll continue to update watermarks the old way, if flags tell
2535                  * us to.
2536                  */
2537                 if (dev_priv->display.initial_watermarks)
2538                         dev_priv->display.initial_watermarks(state, crtc);
2539                 else if (new_crtc_state->update_wm_pre)
2540                         intel_update_watermarks(crtc);
2541         }
2542
2543         /*
2544          * Gen2 reports pipe underruns whenever all planes are disabled.
2545          * So disable underrun reporting before all the planes get disabled.
2546          *
2547          * We do this after .initial_watermarks() so that we have a
2548          * chance of catching underruns with the intermediate watermarks
2549          * vs. the old plane configuration.
2550          */
2551         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2552                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2553
2554         /*
2555          * WA for platforms where async address update enable bit
2556          * is double buffered and only latched at start of vblank.
2557          */
2558         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2559                 intel_crtc_async_flip_disable_wa(state, crtc);
2560 }
2561
2562 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2563                                       struct intel_crtc *crtc)
2564 {
2565         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2566         const struct intel_crtc_state *new_crtc_state =
2567                 intel_atomic_get_new_crtc_state(state, crtc);
2568         unsigned int update_mask = new_crtc_state->update_planes;
2569         const struct intel_plane_state *old_plane_state;
2570         struct intel_plane *plane;
2571         unsigned fb_bits = 0;
2572         int i;
2573
2574         intel_crtc_dpms_overlay_disable(crtc);
2575
2576         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2577                 if (crtc->pipe != plane->pipe ||
2578                     !(update_mask & BIT(plane->id)))
2579                         continue;
2580
2581                 intel_disable_plane(plane, new_crtc_state);
2582
2583                 if (old_plane_state->uapi.visible)
2584                         fb_bits |= plane->frontbuffer_bit;
2585         }
2586
2587         intel_frontbuffer_flip(dev_priv, fb_bits);
2588 }
2589
2590 /*
2591  * intel_connector_primary_encoder - get the primary encoder for a connector
2592  * @connector: connector for which to return the encoder
2593  *
2594  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2595  * all connectors to their encoder, except for DP-MST connectors which have
2596  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2597  * pointed to by as many DP-MST connectors as there are pipes.
2598  */
2599 static struct intel_encoder *
2600 intel_connector_primary_encoder(struct intel_connector *connector)
2601 {
2602         struct intel_encoder *encoder;
2603
2604         if (connector->mst_port)
2605                 return &dp_to_dig_port(connector->mst_port)->base;
2606
2607         encoder = intel_attached_encoder(connector);
2608         drm_WARN_ON(connector->base.dev, !encoder);
2609
2610         return encoder;
2611 }
2612
2613 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2614 {
2615         struct drm_connector_state *new_conn_state;
2616         struct drm_connector *connector;
2617         int i;
2618
2619         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2620                                         i) {
2621                 struct intel_connector *intel_connector;
2622                 struct intel_encoder *encoder;
2623                 struct intel_crtc *crtc;
2624
2625                 if (!intel_connector_needs_modeset(state, connector))
2626                         continue;
2627
2628                 intel_connector = to_intel_connector(connector);
2629                 encoder = intel_connector_primary_encoder(intel_connector);
2630                 if (!encoder->update_prepare)
2631                         continue;
2632
2633                 crtc = new_conn_state->crtc ?
2634                         to_intel_crtc(new_conn_state->crtc) : NULL;
2635                 encoder->update_prepare(state, encoder, crtc);
2636         }
2637 }
2638
2639 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2640 {
2641         struct drm_connector_state *new_conn_state;
2642         struct drm_connector *connector;
2643         int i;
2644
2645         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2646                                         i) {
2647                 struct intel_connector *intel_connector;
2648                 struct intel_encoder *encoder;
2649                 struct intel_crtc *crtc;
2650
2651                 if (!intel_connector_needs_modeset(state, connector))
2652                         continue;
2653
2654                 intel_connector = to_intel_connector(connector);
2655                 encoder = intel_connector_primary_encoder(intel_connector);
2656                 if (!encoder->update_complete)
2657                         continue;
2658
2659                 crtc = new_conn_state->crtc ?
2660                         to_intel_crtc(new_conn_state->crtc) : NULL;
2661                 encoder->update_complete(state, encoder, crtc);
2662         }
2663 }
2664
2665 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2666                                           struct intel_crtc *crtc)
2667 {
2668         const struct intel_crtc_state *crtc_state =
2669                 intel_atomic_get_new_crtc_state(state, crtc);
2670         const struct drm_connector_state *conn_state;
2671         struct drm_connector *conn;
2672         int i;
2673
2674         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2675                 struct intel_encoder *encoder =
2676                         to_intel_encoder(conn_state->best_encoder);
2677
2678                 if (conn_state->crtc != &crtc->base)
2679                         continue;
2680
2681                 if (encoder->pre_pll_enable)
2682                         encoder->pre_pll_enable(state, encoder,
2683                                                 crtc_state, conn_state);
2684         }
2685 }
2686
2687 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2688                                       struct intel_crtc *crtc)
2689 {
2690         const struct intel_crtc_state *crtc_state =
2691                 intel_atomic_get_new_crtc_state(state, crtc);
2692         const struct drm_connector_state *conn_state;
2693         struct drm_connector *conn;
2694         int i;
2695
2696         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2697                 struct intel_encoder *encoder =
2698                         to_intel_encoder(conn_state->best_encoder);
2699
2700                 if (conn_state->crtc != &crtc->base)
2701                         continue;
2702
2703                 if (encoder->pre_enable)
2704                         encoder->pre_enable(state, encoder,
2705                                             crtc_state, conn_state);
2706         }
2707 }
2708
2709 static void intel_encoders_enable(struct intel_atomic_state *state,
2710                                   struct intel_crtc *crtc)
2711 {
2712         const struct intel_crtc_state *crtc_state =
2713                 intel_atomic_get_new_crtc_state(state, crtc);
2714         const struct drm_connector_state *conn_state;
2715         struct drm_connector *conn;
2716         int i;
2717
2718         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2719                 struct intel_encoder *encoder =
2720                         to_intel_encoder(conn_state->best_encoder);
2721
2722                 if (conn_state->crtc != &crtc->base)
2723                         continue;
2724
2725                 if (encoder->enable)
2726                         encoder->enable(state, encoder,
2727                                         crtc_state, conn_state);
2728                 intel_opregion_notify_encoder(encoder, true);
2729         }
2730 }
2731
2732 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
2733                                        struct intel_crtc *crtc)
2734 {
2735         const struct intel_crtc_state *old_crtc_state =
2736                 intel_atomic_get_old_crtc_state(state, crtc);
2737         const struct drm_connector_state *old_conn_state;
2738         struct drm_connector *conn;
2739         int i;
2740
2741         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2742                 struct intel_encoder *encoder =
2743                         to_intel_encoder(old_conn_state->best_encoder);
2744
2745                 if (old_conn_state->crtc != &crtc->base)
2746                         continue;
2747
2748                 if (encoder->pre_disable)
2749                         encoder->pre_disable(state, encoder, old_crtc_state,
2750                                              old_conn_state);
2751         }
2752 }
2753
2754 static void intel_encoders_disable(struct intel_atomic_state *state,
2755                                    struct intel_crtc *crtc)
2756 {
2757         const struct intel_crtc_state *old_crtc_state =
2758                 intel_atomic_get_old_crtc_state(state, crtc);
2759         const struct drm_connector_state *old_conn_state;
2760         struct drm_connector *conn;
2761         int i;
2762
2763         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2764                 struct intel_encoder *encoder =
2765                         to_intel_encoder(old_conn_state->best_encoder);
2766
2767                 if (old_conn_state->crtc != &crtc->base)
2768                         continue;
2769
2770                 intel_opregion_notify_encoder(encoder, false);
2771                 if (encoder->disable)
2772                         encoder->disable(state, encoder,
2773                                          old_crtc_state, old_conn_state);
2774         }
2775 }
2776
2777 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2778                                         struct intel_crtc *crtc)
2779 {
2780         const struct intel_crtc_state *old_crtc_state =
2781                 intel_atomic_get_old_crtc_state(state, crtc);
2782         const struct drm_connector_state *old_conn_state;
2783         struct drm_connector *conn;
2784         int i;
2785
2786         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2787                 struct intel_encoder *encoder =
2788                         to_intel_encoder(old_conn_state->best_encoder);
2789
2790                 if (old_conn_state->crtc != &crtc->base)
2791                         continue;
2792
2793                 if (encoder->post_disable)
2794                         encoder->post_disable(state, encoder,
2795                                               old_crtc_state, old_conn_state);
2796         }
2797 }
2798
2799 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2800                                             struct intel_crtc *crtc)
2801 {
2802         const struct intel_crtc_state *old_crtc_state =
2803                 intel_atomic_get_old_crtc_state(state, crtc);
2804         const struct drm_connector_state *old_conn_state;
2805         struct drm_connector *conn;
2806         int i;
2807
2808         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2809                 struct intel_encoder *encoder =
2810                         to_intel_encoder(old_conn_state->best_encoder);
2811
2812                 if (old_conn_state->crtc != &crtc->base)
2813                         continue;
2814
2815                 if (encoder->post_pll_disable)
2816                         encoder->post_pll_disable(state, encoder,
2817                                                   old_crtc_state, old_conn_state);
2818         }
2819 }
2820
2821 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2822                                        struct intel_crtc *crtc)
2823 {
2824         const struct intel_crtc_state *crtc_state =
2825                 intel_atomic_get_new_crtc_state(state, crtc);
2826         const struct drm_connector_state *conn_state;
2827         struct drm_connector *conn;
2828         int i;
2829
2830         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2831                 struct intel_encoder *encoder =
2832                         to_intel_encoder(conn_state->best_encoder);
2833
2834                 if (conn_state->crtc != &crtc->base)
2835                         continue;
2836
2837                 if (encoder->update_pipe)
2838                         encoder->update_pipe(state, encoder,
2839                                              crtc_state, conn_state);
2840         }
2841 }
2842
2843 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2844 {
2845         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2846         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2847
2848         plane->disable_plane(plane, crtc_state);
2849 }
2850
2851 static void ilk_crtc_enable(struct intel_atomic_state *state,
2852                             struct intel_crtc *crtc)
2853 {
2854         const struct intel_crtc_state *new_crtc_state =
2855                 intel_atomic_get_new_crtc_state(state, crtc);
2856         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2857         enum pipe pipe = crtc->pipe;
2858
2859         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2860                 return;
2861
2862         /*
2863          * Sometimes spurious CPU pipe underruns happen during FDI
2864          * training, at least with VGA+HDMI cloning. Suppress them.
2865          *
2866          * On ILK we get an occasional spurious CPU pipe underruns
2867          * between eDP port A enable and vdd enable. Also PCH port
2868          * enable seems to result in the occasional CPU pipe underrun.
2869          *
2870          * Spurious PCH underruns also occur during PCH enabling.
2871          */
2872         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2873         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2874
2875         if (intel_crtc_has_dp_encoder(new_crtc_state))
2876                 intel_dp_set_m_n(new_crtc_state, M1_N1);
2877
2878         intel_set_transcoder_timings(new_crtc_state);
2879         intel_set_pipe_src_size(new_crtc_state);
2880
2881         if (new_crtc_state->has_pch_encoder)
2882                 intel_cpu_transcoder_set_m_n(new_crtc_state,
2883                                              &new_crtc_state->fdi_m_n, NULL);
2884
2885         ilk_set_pipeconf(new_crtc_state);
2886
2887         crtc->active = true;
2888
2889         intel_encoders_pre_enable(state, crtc);
2890
2891         if (new_crtc_state->has_pch_encoder) {
2892                 /* Note: FDI PLL enabling _must_ be done before we enable the
2893                  * cpu pipes, hence this is separate from all the other fdi/pch
2894                  * enabling. */
2895                 ilk_fdi_pll_enable(new_crtc_state);
2896         } else {
2897                 assert_fdi_tx_disabled(dev_priv, pipe);
2898                 assert_fdi_rx_disabled(dev_priv, pipe);
2899         }
2900
2901         ilk_pfit_enable(new_crtc_state);
2902
2903         /*
2904          * On ILK+ LUT must be loaded before the pipe is running but with
2905          * clocks enabled
2906          */
2907         intel_color_load_luts(new_crtc_state);
2908         intel_color_commit(new_crtc_state);
2909         /* update DSPCNTR to configure gamma for pipe bottom color */
2910         intel_disable_primary_plane(new_crtc_state);
2911
2912         if (dev_priv->display.initial_watermarks)
2913                 dev_priv->display.initial_watermarks(state, crtc);
2914         intel_enable_pipe(new_crtc_state);
2915
2916         if (new_crtc_state->has_pch_encoder)
2917                 ilk_pch_enable(state, new_crtc_state);
2918
2919         intel_crtc_vblank_on(new_crtc_state);
2920
2921         intel_encoders_enable(state, crtc);
2922
2923         if (HAS_PCH_CPT(dev_priv))
2924                 cpt_verify_modeset(dev_priv, pipe);
2925
2926         /*
2927          * Must wait for vblank to avoid spurious PCH FIFO underruns.
2928          * And a second vblank wait is needed at least on ILK with
2929          * some interlaced HDMI modes. Let's do the double wait always
2930          * in case there are more corner cases we don't know about.
2931          */
2932         if (new_crtc_state->has_pch_encoder) {
2933                 intel_wait_for_vblank(dev_priv, pipe);
2934                 intel_wait_for_vblank(dev_priv, pipe);
2935         }
2936         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2937         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2938 }
2939
2940 /* IPS only exists on ULT machines and is tied to pipe A. */
2941 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
2942 {
2943         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
2944 }
2945
2946 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
2947                                             enum pipe pipe, bool apply)
2948 {
2949         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
2950         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
2951
2952         if (apply)
2953                 val |= mask;
2954         else
2955                 val &= ~mask;
2956
2957         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
2958 }
2959
2960 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
2961 {
2962         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2963         enum pipe pipe = crtc->pipe;
2964         u32 val;
2965
2966         /* Wa_22010947358:adl-p */
2967         if (IS_ALDERLAKE_P(dev_priv))
2968                 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
2969         else
2970                 val = MBUS_DBOX_A_CREDIT(2);
2971
2972         if (DISPLAY_VER(dev_priv) >= 12) {
2973                 val |= MBUS_DBOX_BW_CREDIT(2);
2974                 val |= MBUS_DBOX_B_CREDIT(12);
2975         } else {
2976                 val |= MBUS_DBOX_BW_CREDIT(1);
2977                 val |= MBUS_DBOX_B_CREDIT(8);
2978         }
2979
2980         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
2981 }
2982
2983 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
2984 {
2985         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2986         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2987
2988         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
2989                        HSW_LINETIME(crtc_state->linetime) |
2990                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
2991 }
2992
2993 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
2994 {
2995         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2996         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2997         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
2998         u32 val;
2999
3000         val = intel_de_read(dev_priv, reg);
3001         val &= ~HSW_FRAME_START_DELAY_MASK;
3002         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3003         intel_de_write(dev_priv, reg, val);
3004 }
3005
3006 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3007                                          const struct intel_crtc_state *crtc_state)
3008 {
3009         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3010         struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3011         struct intel_crtc_state *master_crtc_state;
3012         struct drm_connector_state *conn_state;
3013         struct drm_connector *conn;
3014         struct intel_encoder *encoder = NULL;
3015         int i;
3016
3017         if (crtc_state->bigjoiner_slave)
3018                 master = crtc_state->bigjoiner_linked_crtc;
3019
3020         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3021
3022         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3023                 if (conn_state->crtc != &master->base)
3024                         continue;
3025
3026                 encoder = to_intel_encoder(conn_state->best_encoder);
3027                 break;
3028         }
3029
3030         if (!crtc_state->bigjoiner_slave) {
3031                 /* need to enable VDSC, which we skipped in pre-enable */
3032                 intel_dsc_enable(encoder, crtc_state);
3033         } else {
3034                 /*
3035                  * Enable sequence steps 1-7 on bigjoiner master
3036                  */
3037                 intel_encoders_pre_pll_enable(state, master);
3038                 if (master_crtc_state->shared_dpll)
3039                         intel_enable_shared_dpll(master_crtc_state);
3040                 intel_encoders_pre_enable(state, master);
3041
3042                 /* and DSC on slave */
3043                 intel_dsc_enable(NULL, crtc_state);
3044         }
3045
3046         if (DISPLAY_VER(dev_priv) >= 13)
3047                 intel_uncompressed_joiner_enable(crtc_state);
3048 }
3049
3050 static void hsw_crtc_enable(struct intel_atomic_state *state,
3051                             struct intel_crtc *crtc)
3052 {
3053         const struct intel_crtc_state *new_crtc_state =
3054                 intel_atomic_get_new_crtc_state(state, crtc);
3055         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3056         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3057         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3058         bool psl_clkgate_wa;
3059
3060         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3061                 return;
3062
3063         if (!new_crtc_state->bigjoiner) {
3064                 intel_encoders_pre_pll_enable(state, crtc);
3065
3066                 if (new_crtc_state->shared_dpll)
3067                         intel_enable_shared_dpll(new_crtc_state);
3068
3069                 intel_encoders_pre_enable(state, crtc);
3070         } else {
3071                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3072         }
3073
3074         intel_set_pipe_src_size(new_crtc_state);
3075         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3076                 bdw_set_pipemisc(new_crtc_state);
3077
3078         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3079                 intel_set_transcoder_timings(new_crtc_state);
3080
3081                 if (cpu_transcoder != TRANSCODER_EDP)
3082                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3083                                        new_crtc_state->pixel_multiplier - 1);
3084
3085                 if (new_crtc_state->has_pch_encoder)
3086                         intel_cpu_transcoder_set_m_n(new_crtc_state,
3087                                                      &new_crtc_state->fdi_m_n, NULL);
3088
3089                 hsw_set_frame_start_delay(new_crtc_state);
3090         }
3091
3092         if (!transcoder_is_dsi(cpu_transcoder))
3093                 hsw_set_pipeconf(new_crtc_state);
3094
3095         crtc->active = true;
3096
3097         /* Display WA #1180: WaDisableScalarClockGating: glk */
3098         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3099                 new_crtc_state->pch_pfit.enabled;
3100         if (psl_clkgate_wa)
3101                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3102
3103         if (DISPLAY_VER(dev_priv) >= 9)
3104                 skl_pfit_enable(new_crtc_state);
3105         else
3106                 ilk_pfit_enable(new_crtc_state);
3107
3108         /*
3109          * On ILK+ LUT must be loaded before the pipe is running but with
3110          * clocks enabled
3111          */
3112         intel_color_load_luts(new_crtc_state);
3113         intel_color_commit(new_crtc_state);
3114         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
3115         if (DISPLAY_VER(dev_priv) < 9)
3116                 intel_disable_primary_plane(new_crtc_state);
3117
3118         hsw_set_linetime_wm(new_crtc_state);
3119
3120         if (DISPLAY_VER(dev_priv) >= 11)
3121                 icl_set_pipe_chicken(new_crtc_state);
3122
3123         if (dev_priv->display.initial_watermarks)
3124                 dev_priv->display.initial_watermarks(state, crtc);
3125
3126         if (DISPLAY_VER(dev_priv) >= 11) {
3127                 const struct intel_dbuf_state *dbuf_state =
3128                                 intel_atomic_get_new_dbuf_state(state);
3129
3130                 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3131         }
3132
3133         if (new_crtc_state->bigjoiner_slave)
3134                 intel_crtc_vblank_on(new_crtc_state);
3135
3136         intel_encoders_enable(state, crtc);
3137
3138         if (psl_clkgate_wa) {
3139                 intel_wait_for_vblank(dev_priv, pipe);
3140                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3141         }
3142
3143         /* If we change the relative order between pipe/planes enabling, we need
3144          * to change the workaround. */
3145         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3146         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3147                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3148                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3149         }
3150 }
3151
3152 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3153 {
3154         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3155         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3156         enum pipe pipe = crtc->pipe;
3157
3158         /* To avoid upsetting the power well on haswell only disable the pfit if
3159          * it's in use. The hw state code will make sure we get this right. */
3160         if (!old_crtc_state->pch_pfit.enabled)
3161                 return;
3162
3163         intel_de_write(dev_priv, PF_CTL(pipe), 0);
3164         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3165         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3166 }
3167
3168 static void ilk_crtc_disable(struct intel_atomic_state *state,
3169                              struct intel_crtc *crtc)
3170 {
3171         const struct intel_crtc_state *old_crtc_state =
3172                 intel_atomic_get_old_crtc_state(state, crtc);
3173         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3174         enum pipe pipe = crtc->pipe;
3175
3176         /*
3177          * Sometimes spurious CPU pipe underruns happen when the
3178          * pipe is already disabled, but FDI RX/TX is still enabled.
3179          * Happens at least with VGA+HDMI cloning. Suppress them.
3180          */
3181         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3182         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3183
3184         intel_encoders_disable(state, crtc);
3185
3186         intel_crtc_vblank_off(old_crtc_state);
3187
3188         intel_disable_pipe(old_crtc_state);
3189
3190         ilk_pfit_disable(old_crtc_state);
3191
3192         if (old_crtc_state->has_pch_encoder)
3193                 ilk_fdi_disable(crtc);
3194
3195         intel_encoders_post_disable(state, crtc);
3196
3197         if (old_crtc_state->has_pch_encoder) {
3198                 ilk_disable_pch_transcoder(dev_priv, pipe);
3199
3200                 if (HAS_PCH_CPT(dev_priv)) {
3201                         i915_reg_t reg;
3202                         u32 temp;
3203
3204                         /* disable TRANS_DP_CTL */
3205                         reg = TRANS_DP_CTL(pipe);
3206                         temp = intel_de_read(dev_priv, reg);
3207                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3208                                   TRANS_DP_PORT_SEL_MASK);
3209                         temp |= TRANS_DP_PORT_SEL_NONE;
3210                         intel_de_write(dev_priv, reg, temp);
3211
3212                         /* disable DPLL_SEL */
3213                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3214                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3215                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3216                 }
3217
3218                 ilk_fdi_pll_disable(crtc);
3219         }
3220
3221         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3222         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3223 }
3224
3225 static void hsw_crtc_disable(struct intel_atomic_state *state,
3226                              struct intel_crtc *crtc)
3227 {
3228         /*
3229          * FIXME collapse everything to one hook.
3230          * Need care with mst->ddi interactions.
3231          */
3232         intel_encoders_disable(state, crtc);
3233         intel_encoders_post_disable(state, crtc);
3234 }
3235
3236 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3237 {
3238         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3239         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3240
3241         if (!crtc_state->gmch_pfit.control)
3242                 return;
3243
3244         /*
3245          * The panel fitter should only be adjusted whilst the pipe is disabled,
3246          * according to register description and PRM.
3247          */
3248         drm_WARN_ON(&dev_priv->drm,
3249                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3250         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3251
3252         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3253                        crtc_state->gmch_pfit.pgm_ratios);
3254         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3255
3256         /* Border color in case we don't scale up to the full screen. Black by
3257          * default, change to something else for debugging. */
3258         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3259 }
3260
3261 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3262 {
3263         if (phy == PHY_NONE)
3264                 return false;
3265         else if (IS_DG2(dev_priv))
3266                 /*
3267                  * DG2 outputs labelled as "combo PHY" in the bspec use
3268                  * SNPS PHYs with completely different programming,
3269                  * hence we always return false here.
3270                  */
3271                 return false;
3272         else if (IS_ALDERLAKE_S(dev_priv))
3273                 return phy <= PHY_E;
3274         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3275                 return phy <= PHY_D;
3276         else if (IS_JSL_EHL(dev_priv))
3277                 return phy <= PHY_C;
3278         else if (DISPLAY_VER(dev_priv) >= 11)
3279                 return phy <= PHY_B;
3280         else
3281                 return false;
3282 }
3283
3284 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3285 {
3286         if (IS_DG2(dev_priv))
3287                 /* DG2's "TC1" output uses a SNPS PHY */
3288                 return false;
3289         else if (IS_ALDERLAKE_P(dev_priv))
3290                 return phy >= PHY_F && phy <= PHY_I;
3291         else if (IS_TIGERLAKE(dev_priv))
3292                 return phy >= PHY_D && phy <= PHY_I;
3293         else if (IS_ICELAKE(dev_priv))
3294                 return phy >= PHY_C && phy <= PHY_F;
3295         else
3296                 return false;
3297 }
3298
3299 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3300 {
3301         if (phy == PHY_NONE)
3302                 return false;
3303         else if (IS_DG2(dev_priv))
3304                 /*
3305                  * All four "combo" ports and the TC1 port (PHY E) use
3306                  * Synopsis PHYs.
3307                  */
3308                 return phy <= PHY_E;
3309
3310         return false;
3311 }
3312
3313 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3314 {
3315         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3316                 return PHY_D + port - PORT_D_XELPD;
3317         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3318                 return PHY_F + port - PORT_TC1;
3319         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3320                 return PHY_B + port - PORT_TC1;
3321         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3322                 return PHY_C + port - PORT_TC1;
3323         else if (IS_JSL_EHL(i915) && port == PORT_D)
3324                 return PHY_A;
3325
3326         return PHY_A + port - PORT_A;
3327 }
3328
3329 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3330 {
3331         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3332                 return TC_PORT_NONE;
3333
3334         if (DISPLAY_VER(dev_priv) >= 12)
3335                 return TC_PORT_1 + port - PORT_TC1;
3336         else
3337                 return TC_PORT_1 + port - PORT_C;
3338 }
3339
3340 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3341 {
3342         switch (port) {
3343         case PORT_A:
3344                 return POWER_DOMAIN_PORT_DDI_A_LANES;
3345         case PORT_B:
3346                 return POWER_DOMAIN_PORT_DDI_B_LANES;
3347         case PORT_C:
3348                 return POWER_DOMAIN_PORT_DDI_C_LANES;
3349         case PORT_D:
3350                 return POWER_DOMAIN_PORT_DDI_D_LANES;
3351         case PORT_E:
3352                 return POWER_DOMAIN_PORT_DDI_E_LANES;
3353         case PORT_F:
3354                 return POWER_DOMAIN_PORT_DDI_F_LANES;
3355         case PORT_G:
3356                 return POWER_DOMAIN_PORT_DDI_G_LANES;
3357         case PORT_H:
3358                 return POWER_DOMAIN_PORT_DDI_H_LANES;
3359         case PORT_I:
3360                 return POWER_DOMAIN_PORT_DDI_I_LANES;
3361         default:
3362                 MISSING_CASE(port);
3363                 return POWER_DOMAIN_PORT_OTHER;
3364         }
3365 }
3366
3367 enum intel_display_power_domain
3368 intel_aux_power_domain(struct intel_digital_port *dig_port)
3369 {
3370         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3371         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3372
3373         if (intel_phy_is_tc(dev_priv, phy) &&
3374             dig_port->tc_mode == TC_PORT_TBT_ALT) {
3375                 switch (dig_port->aux_ch) {
3376                 case AUX_CH_C:
3377                         return POWER_DOMAIN_AUX_C_TBT;
3378                 case AUX_CH_D:
3379                         return POWER_DOMAIN_AUX_D_TBT;
3380                 case AUX_CH_E:
3381                         return POWER_DOMAIN_AUX_E_TBT;
3382                 case AUX_CH_F:
3383                         return POWER_DOMAIN_AUX_F_TBT;
3384                 case AUX_CH_G:
3385                         return POWER_DOMAIN_AUX_G_TBT;
3386                 case AUX_CH_H:
3387                         return POWER_DOMAIN_AUX_H_TBT;
3388                 case AUX_CH_I:
3389                         return POWER_DOMAIN_AUX_I_TBT;
3390                 default:
3391                         MISSING_CASE(dig_port->aux_ch);
3392                         return POWER_DOMAIN_AUX_C_TBT;
3393                 }
3394         }
3395
3396         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3397 }
3398
3399 /*
3400  * Converts aux_ch to power_domain without caring about TBT ports for that use
3401  * intel_aux_power_domain()
3402  */
3403 enum intel_display_power_domain
3404 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3405 {
3406         switch (aux_ch) {
3407         case AUX_CH_A:
3408                 return POWER_DOMAIN_AUX_A;
3409         case AUX_CH_B:
3410                 return POWER_DOMAIN_AUX_B;
3411         case AUX_CH_C:
3412                 return POWER_DOMAIN_AUX_C;
3413         case AUX_CH_D:
3414                 return POWER_DOMAIN_AUX_D;
3415         case AUX_CH_E:
3416                 return POWER_DOMAIN_AUX_E;
3417         case AUX_CH_F:
3418                 return POWER_DOMAIN_AUX_F;
3419         case AUX_CH_G:
3420                 return POWER_DOMAIN_AUX_G;
3421         case AUX_CH_H:
3422                 return POWER_DOMAIN_AUX_H;
3423         case AUX_CH_I:
3424                 return POWER_DOMAIN_AUX_I;
3425         default:
3426                 MISSING_CASE(aux_ch);
3427                 return POWER_DOMAIN_AUX_A;
3428         }
3429 }
3430
3431 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3432 {
3433         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3434         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3435         struct drm_encoder *encoder;
3436         enum pipe pipe = crtc->pipe;
3437         u64 mask;
3438         enum transcoder transcoder = crtc_state->cpu_transcoder;
3439
3440         if (!crtc_state->hw.active)
3441                 return 0;
3442
3443         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3444         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3445         if (crtc_state->pch_pfit.enabled ||
3446             crtc_state->pch_pfit.force_thru)
3447                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3448
3449         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3450                                   crtc_state->uapi.encoder_mask) {
3451                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3452
3453                 mask |= BIT_ULL(intel_encoder->power_domain);
3454         }
3455
3456         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3457                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3458
3459         if (crtc_state->shared_dpll)
3460                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3461
3462         if (crtc_state->dsc.compression_enable)
3463                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3464
3465         return mask;
3466 }
3467
3468 static u64
3469 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3470 {
3471         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3472         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3473         enum intel_display_power_domain domain;
3474         u64 domains, new_domains, old_domains;
3475
3476         domains = get_crtc_power_domains(crtc_state);
3477
3478         new_domains = domains & ~crtc->enabled_power_domains.mask;
3479         old_domains = crtc->enabled_power_domains.mask & ~domains;
3480
3481         for_each_power_domain(domain, new_domains)
3482                 intel_display_power_get_in_set(dev_priv,
3483                                                &crtc->enabled_power_domains,
3484                                                domain);
3485
3486         return old_domains;
3487 }
3488
3489 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3490                                            u64 domains)
3491 {
3492         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3493                                             &crtc->enabled_power_domains,
3494                                             domains);
3495 }
3496
3497 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3498                                    struct intel_crtc *crtc)
3499 {
3500         const struct intel_crtc_state *new_crtc_state =
3501                 intel_atomic_get_new_crtc_state(state, crtc);
3502         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3503         enum pipe pipe = crtc->pipe;
3504
3505         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3506                 return;
3507
3508         if (intel_crtc_has_dp_encoder(new_crtc_state))
3509                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3510
3511         intel_set_transcoder_timings(new_crtc_state);
3512         intel_set_pipe_src_size(new_crtc_state);
3513
3514         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3515                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3516                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3517         }
3518
3519         i9xx_set_pipeconf(new_crtc_state);
3520
3521         crtc->active = true;
3522
3523         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3524
3525         intel_encoders_pre_pll_enable(state, crtc);
3526
3527         if (IS_CHERRYVIEW(dev_priv))
3528                 chv_enable_pll(new_crtc_state);
3529         else
3530                 vlv_enable_pll(new_crtc_state);
3531
3532         intel_encoders_pre_enable(state, crtc);
3533
3534         i9xx_pfit_enable(new_crtc_state);
3535
3536         intel_color_load_luts(new_crtc_state);
3537         intel_color_commit(new_crtc_state);
3538         /* update DSPCNTR to configure gamma for pipe bottom color */
3539         intel_disable_primary_plane(new_crtc_state);
3540
3541         dev_priv->display.initial_watermarks(state, crtc);
3542         intel_enable_pipe(new_crtc_state);
3543
3544         intel_crtc_vblank_on(new_crtc_state);
3545
3546         intel_encoders_enable(state, crtc);
3547 }
3548
3549 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3550                              struct intel_crtc *crtc)
3551 {
3552         const struct intel_crtc_state *new_crtc_state =
3553                 intel_atomic_get_new_crtc_state(state, crtc);
3554         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3555         enum pipe pipe = crtc->pipe;
3556
3557         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3558                 return;
3559
3560         if (intel_crtc_has_dp_encoder(new_crtc_state))
3561                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3562
3563         intel_set_transcoder_timings(new_crtc_state);
3564         intel_set_pipe_src_size(new_crtc_state);
3565
3566         i9xx_set_pipeconf(new_crtc_state);
3567
3568         crtc->active = true;
3569
3570         if (DISPLAY_VER(dev_priv) != 2)
3571                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3572
3573         intel_encoders_pre_enable(state, crtc);
3574
3575         i9xx_enable_pll(new_crtc_state);
3576
3577         i9xx_pfit_enable(new_crtc_state);
3578
3579         intel_color_load_luts(new_crtc_state);
3580         intel_color_commit(new_crtc_state);
3581         /* update DSPCNTR to configure gamma for pipe bottom color */
3582         intel_disable_primary_plane(new_crtc_state);
3583
3584         if (dev_priv->display.initial_watermarks)
3585                 dev_priv->display.initial_watermarks(state, crtc);
3586         else
3587                 intel_update_watermarks(crtc);
3588         intel_enable_pipe(new_crtc_state);
3589
3590         intel_crtc_vblank_on(new_crtc_state);
3591
3592         intel_encoders_enable(state, crtc);
3593
3594         /* prevents spurious underruns */
3595         if (DISPLAY_VER(dev_priv) == 2)
3596                 intel_wait_for_vblank(dev_priv, pipe);
3597 }
3598
3599 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3600 {
3601         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3602         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3603
3604         if (!old_crtc_state->gmch_pfit.control)
3605                 return;
3606
3607         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3608
3609         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3610                     intel_de_read(dev_priv, PFIT_CONTROL));
3611         intel_de_write(dev_priv, PFIT_CONTROL, 0);
3612 }
3613
3614 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3615                               struct intel_crtc *crtc)
3616 {
3617         struct intel_crtc_state *old_crtc_state =
3618                 intel_atomic_get_old_crtc_state(state, crtc);
3619         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3620         enum pipe pipe = crtc->pipe;
3621
3622         /*
3623          * On gen2 planes are double buffered but the pipe isn't, so we must
3624          * wait for planes to fully turn off before disabling the pipe.
3625          */
3626         if (DISPLAY_VER(dev_priv) == 2)
3627                 intel_wait_for_vblank(dev_priv, pipe);
3628
3629         intel_encoders_disable(state, crtc);
3630
3631         intel_crtc_vblank_off(old_crtc_state);
3632
3633         intel_disable_pipe(old_crtc_state);
3634
3635         i9xx_pfit_disable(old_crtc_state);
3636
3637         intel_encoders_post_disable(state, crtc);
3638
3639         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3640                 if (IS_CHERRYVIEW(dev_priv))
3641                         chv_disable_pll(dev_priv, pipe);
3642                 else if (IS_VALLEYVIEW(dev_priv))
3643                         vlv_disable_pll(dev_priv, pipe);
3644                 else
3645                         i9xx_disable_pll(old_crtc_state);
3646         }
3647
3648         intel_encoders_post_pll_disable(state, crtc);
3649
3650         if (DISPLAY_VER(dev_priv) != 2)
3651                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3652
3653         if (!dev_priv->display.initial_watermarks)
3654                 intel_update_watermarks(crtc);
3655
3656         /* clock the pipe down to 640x480@60 to potentially save power */
3657         if (IS_I830(dev_priv))
3658                 i830_enable_pipe(dev_priv, pipe);
3659 }
3660
3661 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3662                                         struct drm_modeset_acquire_ctx *ctx)
3663 {
3664         struct intel_encoder *encoder;
3665         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3666         struct intel_bw_state *bw_state =
3667                 to_intel_bw_state(dev_priv->bw_obj.state);
3668         struct intel_cdclk_state *cdclk_state =
3669                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3670         struct intel_dbuf_state *dbuf_state =
3671                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3672         struct intel_crtc_state *crtc_state =
3673                 to_intel_crtc_state(crtc->base.state);
3674         struct intel_plane *plane;
3675         struct drm_atomic_state *state;
3676         struct intel_crtc_state *temp_crtc_state;
3677         enum pipe pipe = crtc->pipe;
3678         int ret;
3679
3680         if (!crtc_state->hw.active)
3681                 return;
3682
3683         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3684                 const struct intel_plane_state *plane_state =
3685                         to_intel_plane_state(plane->base.state);
3686
3687                 if (plane_state->uapi.visible)
3688                         intel_plane_disable_noatomic(crtc, plane);
3689         }
3690
3691         state = drm_atomic_state_alloc(&dev_priv->drm);
3692         if (!state) {
3693                 drm_dbg_kms(&dev_priv->drm,
3694                             "failed to disable [CRTC:%d:%s], out of memory",
3695                             crtc->base.base.id, crtc->base.name);
3696                 return;
3697         }
3698
3699         state->acquire_ctx = ctx;
3700
3701         /* Everything's already locked, -EDEADLK can't happen. */
3702         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3703         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3704
3705         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3706
3707         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3708
3709         drm_atomic_state_put(state);
3710
3711         drm_dbg_kms(&dev_priv->drm,
3712                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3713                     crtc->base.base.id, crtc->base.name);
3714
3715         crtc->active = false;
3716         crtc->base.enabled = false;
3717
3718         drm_WARN_ON(&dev_priv->drm,
3719                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3720         crtc_state->uapi.active = false;
3721         crtc_state->uapi.connector_mask = 0;
3722         crtc_state->uapi.encoder_mask = 0;
3723         intel_crtc_free_hw_state(crtc_state);
3724         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3725
3726         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3727                 encoder->base.crtc = NULL;
3728
3729         intel_fbc_disable(crtc);
3730         intel_update_watermarks(crtc);
3731         intel_disable_shared_dpll(crtc_state);
3732
3733         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3734
3735         dev_priv->active_pipes &= ~BIT(pipe);
3736         cdclk_state->min_cdclk[pipe] = 0;
3737         cdclk_state->min_voltage_level[pipe] = 0;
3738         cdclk_state->active_pipes &= ~BIT(pipe);
3739
3740         dbuf_state->active_pipes &= ~BIT(pipe);
3741
3742         bw_state->data_rate[pipe] = 0;
3743         bw_state->num_active_planes[pipe] = 0;
3744 }
3745
3746 /*
3747  * turn all crtc's off, but do not adjust state
3748  * This has to be paired with a call to intel_modeset_setup_hw_state.
3749  */
3750 int intel_display_suspend(struct drm_device *dev)
3751 {
3752         struct drm_i915_private *dev_priv = to_i915(dev);
3753         struct drm_atomic_state *state;
3754         int ret;
3755
3756         if (!HAS_DISPLAY(dev_priv))
3757                 return 0;
3758
3759         state = drm_atomic_helper_suspend(dev);
3760         ret = PTR_ERR_OR_ZERO(state);
3761         if (ret)
3762                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3763                         ret);
3764         else
3765                 dev_priv->modeset_restore_state = state;
3766         return ret;
3767 }
3768
3769 void intel_encoder_destroy(struct drm_encoder *encoder)
3770 {
3771         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3772
3773         drm_encoder_cleanup(encoder);
3774         kfree(intel_encoder);
3775 }
3776
3777 /* Cross check the actual hw state with our own modeset state tracking (and it's
3778  * internal consistency). */
3779 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3780                                          struct drm_connector_state *conn_state)
3781 {
3782         struct intel_connector *connector = to_intel_connector(conn_state->connector);
3783         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3784
3785         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3786                     connector->base.base.id, connector->base.name);
3787
3788         if (connector->get_hw_state(connector)) {
3789                 struct intel_encoder *encoder = intel_attached_encoder(connector);
3790
3791                 I915_STATE_WARN(!crtc_state,
3792                          "connector enabled without attached crtc\n");
3793
3794                 if (!crtc_state)
3795                         return;
3796
3797                 I915_STATE_WARN(!crtc_state->hw.active,
3798                                 "connector is active, but attached crtc isn't\n");
3799
3800                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3801                         return;
3802
3803                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3804                         "atomic encoder doesn't match attached encoder\n");
3805
3806                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3807                         "attached encoder crtc differs from connector crtc\n");
3808         } else {
3809                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3810                                 "attached crtc is active, but connector isn't\n");
3811                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3812                         "best encoder set without crtc!\n");
3813         }
3814 }
3815
3816 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3817 {
3818         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3819         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3820
3821         /* IPS only exists on ULT machines and is tied to pipe A. */
3822         if (!hsw_crtc_supports_ips(crtc))
3823                 return false;
3824
3825         if (!dev_priv->params.enable_ips)
3826                 return false;
3827
3828         if (crtc_state->pipe_bpp > 24)
3829                 return false;
3830
3831         /*
3832          * We compare against max which means we must take
3833          * the increased cdclk requirement into account when
3834          * calculating the new cdclk.
3835          *
3836          * Should measure whether using a lower cdclk w/o IPS
3837          */
3838         if (IS_BROADWELL(dev_priv) &&
3839             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3840                 return false;
3841
3842         return true;
3843 }
3844
3845 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3846 {
3847         struct drm_i915_private *dev_priv =
3848                 to_i915(crtc_state->uapi.crtc->dev);
3849         struct intel_atomic_state *state =
3850                 to_intel_atomic_state(crtc_state->uapi.state);
3851
3852         crtc_state->ips_enabled = false;
3853
3854         if (!hsw_crtc_state_ips_capable(crtc_state))
3855                 return 0;
3856
3857         /*
3858          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3859          * enabled and disabled dynamically based on package C states,
3860          * user space can't make reliable use of the CRCs, so let's just
3861          * completely disable it.
3862          */
3863         if (crtc_state->crc_enabled)
3864                 return 0;
3865
3866         /* IPS should be fine as long as at least one plane is enabled. */
3867         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3868                 return 0;
3869
3870         if (IS_BROADWELL(dev_priv)) {
3871                 const struct intel_cdclk_state *cdclk_state;
3872
3873                 cdclk_state = intel_atomic_get_cdclk_state(state);
3874                 if (IS_ERR(cdclk_state))
3875                         return PTR_ERR(cdclk_state);
3876
3877                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3878                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3879                         return 0;
3880         }
3881
3882         crtc_state->ips_enabled = true;
3883
3884         return 0;
3885 }
3886
3887 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3888 {
3889         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3890
3891         /* GDG double wide on either pipe, otherwise pipe A only */
3892         return DISPLAY_VER(dev_priv) < 4 &&
3893                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3894 }
3895
3896 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3897 {
3898         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3899         struct drm_rect src;
3900
3901         /*
3902          * We only use IF-ID interlacing. If we ever use
3903          * PF-ID we'll need to adjust the pixel_rate here.
3904          */
3905
3906         if (!crtc_state->pch_pfit.enabled)
3907                 return pixel_rate;
3908
3909         drm_rect_init(&src, 0, 0,
3910                       crtc_state->pipe_src_w << 16,
3911                       crtc_state->pipe_src_h << 16);
3912
3913         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
3914                                    pixel_rate);
3915 }
3916
3917 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
3918                                          const struct drm_display_mode *timings)
3919 {
3920         mode->hdisplay = timings->crtc_hdisplay;
3921         mode->htotal = timings->crtc_htotal;
3922         mode->hsync_start = timings->crtc_hsync_start;
3923         mode->hsync_end = timings->crtc_hsync_end;
3924
3925         mode->vdisplay = timings->crtc_vdisplay;
3926         mode->vtotal = timings->crtc_vtotal;
3927         mode->vsync_start = timings->crtc_vsync_start;
3928         mode->vsync_end = timings->crtc_vsync_end;
3929
3930         mode->flags = timings->flags;
3931         mode->type = DRM_MODE_TYPE_DRIVER;
3932
3933         mode->clock = timings->crtc_clock;
3934
3935         drm_mode_set_name(mode);
3936 }
3937
3938 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
3939 {
3940         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3941
3942         if (HAS_GMCH(dev_priv))
3943                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
3944                 crtc_state->pixel_rate =
3945                         crtc_state->hw.pipe_mode.crtc_clock;
3946         else
3947                 crtc_state->pixel_rate =
3948                         ilk_pipe_pixel_rate(crtc_state);
3949 }
3950
3951 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
3952 {
3953         struct drm_display_mode *mode = &crtc_state->hw.mode;
3954         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
3955         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3956
3957         drm_mode_copy(pipe_mode, adjusted_mode);
3958
3959         if (crtc_state->bigjoiner) {
3960                 /*
3961                  * transcoder is programmed to the full mode,
3962                  * but pipe timings are half of the transcoder mode
3963                  */
3964                 pipe_mode->crtc_hdisplay /= 2;
3965                 pipe_mode->crtc_hblank_start /= 2;
3966                 pipe_mode->crtc_hblank_end /= 2;
3967                 pipe_mode->crtc_hsync_start /= 2;
3968                 pipe_mode->crtc_hsync_end /= 2;
3969                 pipe_mode->crtc_htotal /= 2;
3970                 pipe_mode->crtc_clock /= 2;
3971         }
3972
3973         if (crtc_state->splitter.enable) {
3974                 int n = crtc_state->splitter.link_count;
3975                 int overlap = crtc_state->splitter.pixel_overlap;
3976
3977                 /*
3978                  * eDP MSO uses segment timings from EDID for transcoder
3979                  * timings, but full mode for everything else.
3980                  *
3981                  * h_full = (h_segment - pixel_overlap) * link_count
3982                  */
3983                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
3984                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
3985                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
3986                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
3987                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
3988                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
3989                 pipe_mode->crtc_clock *= n;
3990
3991                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
3992                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
3993         } else {
3994                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
3995                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
3996         }
3997
3998         intel_crtc_compute_pixel_rate(crtc_state);
3999
4000         drm_mode_copy(mode, adjusted_mode);
4001         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4002         mode->vdisplay = crtc_state->pipe_src_h;
4003 }
4004
4005 static void intel_encoder_get_config(struct intel_encoder *encoder,
4006                                      struct intel_crtc_state *crtc_state)
4007 {
4008         encoder->get_config(encoder, crtc_state);
4009
4010         intel_crtc_readout_derived_state(crtc_state);
4011 }
4012
4013 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4014                                      struct intel_crtc_state *pipe_config)
4015 {
4016         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4017         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4018         int clock_limit = dev_priv->max_dotclk_freq;
4019
4020         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4021
4022         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4023         if (pipe_config->bigjoiner) {
4024                 pipe_mode->crtc_clock /= 2;
4025                 pipe_mode->crtc_hdisplay /= 2;
4026                 pipe_mode->crtc_hblank_start /= 2;
4027                 pipe_mode->crtc_hblank_end /= 2;
4028                 pipe_mode->crtc_hsync_start /= 2;
4029                 pipe_mode->crtc_hsync_end /= 2;
4030                 pipe_mode->crtc_htotal /= 2;
4031                 pipe_config->pipe_src_w /= 2;
4032         }
4033
4034         if (pipe_config->splitter.enable) {
4035                 int n = pipe_config->splitter.link_count;
4036                 int overlap = pipe_config->splitter.pixel_overlap;
4037
4038                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4039                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4040                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4041                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4042                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4043                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4044                 pipe_mode->crtc_clock *= n;
4045         }
4046
4047         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4048
4049         if (DISPLAY_VER(dev_priv) < 4) {
4050                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4051
4052                 /*
4053                  * Enable double wide mode when the dot clock
4054                  * is > 90% of the (display) core speed.
4055                  */
4056                 if (intel_crtc_supports_double_wide(crtc) &&
4057                     pipe_mode->crtc_clock > clock_limit) {
4058                         clock_limit = dev_priv->max_dotclk_freq;
4059                         pipe_config->double_wide = true;
4060                 }
4061         }
4062
4063         if (pipe_mode->crtc_clock > clock_limit) {
4064                 drm_dbg_kms(&dev_priv->drm,
4065                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4066                             pipe_mode->crtc_clock, clock_limit,
4067                             yesno(pipe_config->double_wide));
4068                 return -EINVAL;
4069         }
4070
4071         /*
4072          * Pipe horizontal size must be even in:
4073          * - DVO ganged mode
4074          * - LVDS dual channel mode
4075          * - Double wide pipe
4076          */
4077         if (pipe_config->pipe_src_w & 1) {
4078                 if (pipe_config->double_wide) {
4079                         drm_dbg_kms(&dev_priv->drm,
4080                                     "Odd pipe source width not supported with double wide pipe\n");
4081                         return -EINVAL;
4082                 }
4083
4084                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4085                     intel_is_dual_link_lvds(dev_priv)) {
4086                         drm_dbg_kms(&dev_priv->drm,
4087                                     "Odd pipe source width not supported with dual link LVDS\n");
4088                         return -EINVAL;
4089                 }
4090         }
4091
4092         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4093          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4094          */
4095         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4096             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4097                 return -EINVAL;
4098
4099         intel_crtc_compute_pixel_rate(pipe_config);
4100
4101         if (pipe_config->has_pch_encoder)
4102                 return ilk_fdi_compute_config(crtc, pipe_config);
4103
4104         return 0;
4105 }
4106
4107 static void
4108 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4109 {
4110         while (*num > DATA_LINK_M_N_MASK ||
4111                *den > DATA_LINK_M_N_MASK) {
4112                 *num >>= 1;
4113                 *den >>= 1;
4114         }
4115 }
4116
4117 static void compute_m_n(unsigned int m, unsigned int n,
4118                         u32 *ret_m, u32 *ret_n,
4119                         bool constant_n)
4120 {
4121         /*
4122          * Several DP dongles in particular seem to be fussy about
4123          * too large link M/N values. Give N value as 0x8000 that
4124          * should be acceptable by specific devices. 0x8000 is the
4125          * specified fixed N value for asynchronous clock mode,
4126          * which the devices expect also in synchronous clock mode.
4127          */
4128         if (constant_n)
4129                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4130         else
4131                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4132
4133         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4134         intel_reduce_m_n_ratio(ret_m, ret_n);
4135 }
4136
4137 void
4138 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4139                        int pixel_clock, int link_clock,
4140                        struct intel_link_m_n *m_n,
4141                        bool constant_n, bool fec_enable)
4142 {
4143         u32 data_clock = bits_per_pixel * pixel_clock;
4144
4145         if (fec_enable)
4146                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
4147
4148         m_n->tu = 64;
4149         compute_m_n(data_clock,
4150                     link_clock * nlanes * 8,
4151                     &m_n->gmch_m, &m_n->gmch_n,
4152                     constant_n);
4153
4154         compute_m_n(pixel_clock, link_clock,
4155                     &m_n->link_m, &m_n->link_n,
4156                     constant_n);
4157 }
4158
4159 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4160 {
4161         /*
4162          * There may be no VBT; and if the BIOS enabled SSC we can
4163          * just keep using it to avoid unnecessary flicker.  Whereas if the
4164          * BIOS isn't using it, don't assume it will work even if the VBT
4165          * indicates as much.
4166          */
4167         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4168                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4169                                                        PCH_DREF_CONTROL) &
4170                         DREF_SSC1_ENABLE;
4171
4172                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4173                         drm_dbg_kms(&dev_priv->drm,
4174                                     "SSC %s by BIOS, overriding VBT which says %s\n",
4175                                     enableddisabled(bios_lvds_use_ssc),
4176                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
4177                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4178                 }
4179         }
4180 }
4181
4182 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4183                                          const struct intel_link_m_n *m_n)
4184 {
4185         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4186         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4187         enum pipe pipe = crtc->pipe;
4188
4189         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4190                        TU_SIZE(m_n->tu) | m_n->gmch_m);
4191         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4192         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4193         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4194 }
4195
4196 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4197                                  enum transcoder transcoder)
4198 {
4199         if (IS_HASWELL(dev_priv))
4200                 return transcoder == TRANSCODER_EDP;
4201
4202         /*
4203          * Strictly speaking some registers are available before
4204          * gen7, but we only support DRRS on gen7+
4205          */
4206         return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4207 }
4208
4209 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4210                                          const struct intel_link_m_n *m_n,
4211                                          const struct intel_link_m_n *m2_n2)
4212 {
4213         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4214         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4215         enum pipe pipe = crtc->pipe;
4216         enum transcoder transcoder = crtc_state->cpu_transcoder;
4217
4218         if (DISPLAY_VER(dev_priv) >= 5) {
4219                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4220                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4221                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4222                                m_n->gmch_n);
4223                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4224                                m_n->link_m);
4225                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4226                                m_n->link_n);
4227                 /*
4228                  *  M2_N2 registers are set only if DRRS is supported
4229                  * (to make sure the registers are not unnecessarily accessed).
4230                  */
4231                 if (m2_n2 && crtc_state->has_drrs &&
4232                     transcoder_has_m2_n2(dev_priv, transcoder)) {
4233                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4234                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4235                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4236                                        m2_n2->gmch_n);
4237                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4238                                        m2_n2->link_m);
4239                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4240                                        m2_n2->link_n);
4241                 }
4242         } else {
4243                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4244                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4245                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4246                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4247                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4248         }
4249 }
4250
4251 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4252 {
4253         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4254         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4255
4256         if (m_n == M1_N1) {
4257                 dp_m_n = &crtc_state->dp_m_n;
4258                 dp_m2_n2 = &crtc_state->dp_m2_n2;
4259         } else if (m_n == M2_N2) {
4260
4261                 /*
4262                  * M2_N2 registers are not supported. Hence m2_n2 divider value
4263                  * needs to be programmed into M1_N1.
4264                  */
4265                 dp_m_n = &crtc_state->dp_m2_n2;
4266         } else {
4267                 drm_err(&i915->drm, "Unsupported divider value\n");
4268                 return;
4269         }
4270
4271         if (crtc_state->has_pch_encoder)
4272                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4273         else
4274                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4275 }
4276
4277 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4278 {
4279         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4280         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4281         enum pipe pipe = crtc->pipe;
4282         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4283         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4284         u32 crtc_vtotal, crtc_vblank_end;
4285         int vsyncshift = 0;
4286
4287         /* We need to be careful not to changed the adjusted mode, for otherwise
4288          * the hw state checker will get angry at the mismatch. */
4289         crtc_vtotal = adjusted_mode->crtc_vtotal;
4290         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4291
4292         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4293                 /* the chip adds 2 halflines automatically */
4294                 crtc_vtotal -= 1;
4295                 crtc_vblank_end -= 1;
4296
4297                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4298                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4299                 else
4300                         vsyncshift = adjusted_mode->crtc_hsync_start -
4301                                 adjusted_mode->crtc_htotal / 2;
4302                 if (vsyncshift < 0)
4303                         vsyncshift += adjusted_mode->crtc_htotal;
4304         }
4305
4306         if (DISPLAY_VER(dev_priv) > 3)
4307                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4308                                vsyncshift);
4309
4310         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4311                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4312         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4313                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4314         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4315                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4316
4317         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4318                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4319         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4320                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4321         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4322                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4323
4324         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4325          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4326          * documented on the DDI_FUNC_CTL register description, EDP Input Select
4327          * bits. */
4328         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4329             (pipe == PIPE_B || pipe == PIPE_C))
4330                 intel_de_write(dev_priv, VTOTAL(pipe),
4331                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4332
4333 }
4334
4335 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4336 {
4337         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4338         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4339         enum pipe pipe = crtc->pipe;
4340
4341         /* pipesrc controls the size that is scaled from, which should
4342          * always be the user's requested size.
4343          */
4344         intel_de_write(dev_priv, PIPESRC(pipe),
4345                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4346 }
4347
4348 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4349 {
4350         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4351         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4352
4353         if (DISPLAY_VER(dev_priv) == 2)
4354                 return false;
4355
4356         if (DISPLAY_VER(dev_priv) >= 9 ||
4357             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4358                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4359         else
4360                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4361 }
4362
4363 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4364                                          struct intel_crtc_state *pipe_config)
4365 {
4366         struct drm_device *dev = crtc->base.dev;
4367         struct drm_i915_private *dev_priv = to_i915(dev);
4368         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4369         u32 tmp;
4370
4371         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4372         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4373         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4374
4375         if (!transcoder_is_dsi(cpu_transcoder)) {
4376                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4377                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
4378                                                         (tmp & 0xffff) + 1;
4379                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
4380                                                 ((tmp >> 16) & 0xffff) + 1;
4381         }
4382         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4383         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4384         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4385
4386         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4387         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4388         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4389
4390         if (!transcoder_is_dsi(cpu_transcoder)) {
4391                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4392                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
4393                                                         (tmp & 0xffff) + 1;
4394                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
4395                                                 ((tmp >> 16) & 0xffff) + 1;
4396         }
4397         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4398         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4399         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4400
4401         if (intel_pipe_is_interlaced(pipe_config)) {
4402                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4403                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4404                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4405         }
4406 }
4407
4408 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4409                                     struct intel_crtc_state *pipe_config)
4410 {
4411         struct drm_device *dev = crtc->base.dev;
4412         struct drm_i915_private *dev_priv = to_i915(dev);
4413         u32 tmp;
4414
4415         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4416         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4417         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4418 }
4419
4420 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4421 {
4422         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4423         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4424         u32 pipeconf;
4425
4426         pipeconf = 0;
4427
4428         /* we keep both pipes enabled on 830 */
4429         if (IS_I830(dev_priv))
4430                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4431
4432         if (crtc_state->double_wide)
4433                 pipeconf |= PIPECONF_DOUBLE_WIDE;
4434
4435         /* only g4x and later have fancy bpc/dither controls */
4436         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4437             IS_CHERRYVIEW(dev_priv)) {
4438                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4439                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4440                         pipeconf |= PIPECONF_DITHER_EN |
4441                                     PIPECONF_DITHER_TYPE_SP;
4442
4443                 switch (crtc_state->pipe_bpp) {
4444                 case 18:
4445                         pipeconf |= PIPECONF_6BPC;
4446                         break;
4447                 case 24:
4448                         pipeconf |= PIPECONF_8BPC;
4449                         break;
4450                 case 30:
4451                         pipeconf |= PIPECONF_10BPC;
4452                         break;
4453                 default:
4454                         /* Case prevented by intel_choose_pipe_bpp_dither. */
4455                         BUG();
4456                 }
4457         }
4458
4459         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4460                 if (DISPLAY_VER(dev_priv) < 4 ||
4461                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4462                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4463                 else
4464                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4465         } else {
4466                 pipeconf |= PIPECONF_PROGRESSIVE;
4467         }
4468
4469         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4470              crtc_state->limited_color_range)
4471                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4472
4473         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4474
4475         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4476
4477         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4478         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4479 }
4480
4481 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4482 {
4483         if (IS_I830(dev_priv))
4484                 return false;
4485
4486         return DISPLAY_VER(dev_priv) >= 4 ||
4487                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4488 }
4489
4490 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4491 {
4492         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4493         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4494         u32 tmp;
4495
4496         if (!i9xx_has_pfit(dev_priv))
4497                 return;
4498
4499         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4500         if (!(tmp & PFIT_ENABLE))
4501                 return;
4502
4503         /* Check whether the pfit is attached to our pipe. */
4504         if (DISPLAY_VER(dev_priv) < 4) {
4505                 if (crtc->pipe != PIPE_B)
4506                         return;
4507         } else {
4508                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4509                         return;
4510         }
4511
4512         crtc_state->gmch_pfit.control = tmp;
4513         crtc_state->gmch_pfit.pgm_ratios =
4514                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4515 }
4516
4517 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4518                                struct intel_crtc_state *pipe_config)
4519 {
4520         struct drm_device *dev = crtc->base.dev;
4521         struct drm_i915_private *dev_priv = to_i915(dev);
4522         enum pipe pipe = crtc->pipe;
4523         struct dpll clock;
4524         u32 mdiv;
4525         int refclk = 100000;
4526
4527         /* In case of DSI, DPLL will not be used */
4528         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4529                 return;
4530
4531         vlv_dpio_get(dev_priv);
4532         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4533         vlv_dpio_put(dev_priv);
4534
4535         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4536         clock.m2 = mdiv & DPIO_M2DIV_MASK;
4537         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4538         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4539         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4540
4541         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4542 }
4543
4544 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4545                                struct intel_crtc_state *pipe_config)
4546 {
4547         struct drm_device *dev = crtc->base.dev;
4548         struct drm_i915_private *dev_priv = to_i915(dev);
4549         enum pipe pipe = crtc->pipe;
4550         enum dpio_channel port = vlv_pipe_to_channel(pipe);
4551         struct dpll clock;
4552         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4553         int refclk = 100000;
4554
4555         /* In case of DSI, DPLL will not be used */
4556         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4557                 return;
4558
4559         vlv_dpio_get(dev_priv);
4560         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4561         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4562         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4563         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4564         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4565         vlv_dpio_put(dev_priv);
4566
4567         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4568         clock.m2 = (pll_dw0 & 0xff) << 22;
4569         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4570                 clock.m2 |= pll_dw2 & 0x3fffff;
4571         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4572         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4573         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4574
4575         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4576 }
4577
4578 static enum intel_output_format
4579 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4580 {
4581         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4582         u32 tmp;
4583
4584         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4585
4586         if (tmp & PIPEMISC_YUV420_ENABLE) {
4587                 /* We support 4:2:0 in full blend mode only */
4588                 drm_WARN_ON(&dev_priv->drm,
4589                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4590
4591                 return INTEL_OUTPUT_FORMAT_YCBCR420;
4592         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4593                 return INTEL_OUTPUT_FORMAT_YCBCR444;
4594         } else {
4595                 return INTEL_OUTPUT_FORMAT_RGB;
4596         }
4597 }
4598
4599 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4600 {
4601         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4602         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4603         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4604         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4605         u32 tmp;
4606
4607         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4608
4609         if (tmp & DISPPLANE_GAMMA_ENABLE)
4610                 crtc_state->gamma_enable = true;
4611
4612         if (!HAS_GMCH(dev_priv) &&
4613             tmp & DISPPLANE_PIPE_CSC_ENABLE)
4614                 crtc_state->csc_enable = true;
4615 }
4616
4617 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4618                                  struct intel_crtc_state *pipe_config)
4619 {
4620         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4621         enum intel_display_power_domain power_domain;
4622         intel_wakeref_t wakeref;
4623         u32 tmp;
4624         bool ret;
4625
4626         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4627         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4628         if (!wakeref)
4629                 return false;
4630
4631         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4632         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4633         pipe_config->shared_dpll = NULL;
4634
4635         ret = false;
4636
4637         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4638         if (!(tmp & PIPECONF_ENABLE))
4639                 goto out;
4640
4641         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4642             IS_CHERRYVIEW(dev_priv)) {
4643                 switch (tmp & PIPECONF_BPC_MASK) {
4644                 case PIPECONF_6BPC:
4645                         pipe_config->pipe_bpp = 18;
4646                         break;
4647                 case PIPECONF_8BPC:
4648                         pipe_config->pipe_bpp = 24;
4649                         break;
4650                 case PIPECONF_10BPC:
4651                         pipe_config->pipe_bpp = 30;
4652                         break;
4653                 default:
4654                         break;
4655                 }
4656         }
4657
4658         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4659             (tmp & PIPECONF_COLOR_RANGE_SELECT))
4660                 pipe_config->limited_color_range = true;
4661
4662         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4663                 PIPECONF_GAMMA_MODE_SHIFT;
4664
4665         if (IS_CHERRYVIEW(dev_priv))
4666                 pipe_config->cgm_mode = intel_de_read(dev_priv,
4667                                                       CGM_PIPE_MODE(crtc->pipe));
4668
4669         i9xx_get_pipe_color_config(pipe_config);
4670         intel_color_get_config(pipe_config);
4671
4672         if (DISPLAY_VER(dev_priv) < 4)
4673                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4674
4675         intel_get_transcoder_timings(crtc, pipe_config);
4676         intel_get_pipe_src_size(crtc, pipe_config);
4677
4678         i9xx_get_pfit_config(pipe_config);
4679
4680         if (DISPLAY_VER(dev_priv) >= 4) {
4681                 /* No way to read it out on pipes B and C */
4682                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4683                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
4684                 else
4685                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4686                 pipe_config->pixel_multiplier =
4687                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4688                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4689                 pipe_config->dpll_hw_state.dpll_md = tmp;
4690         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4691                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4692                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4693                 pipe_config->pixel_multiplier =
4694                         ((tmp & SDVO_MULTIPLIER_MASK)
4695                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4696         } else {
4697                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
4698                  * port and will be fixed up in the encoder->get_config
4699                  * function. */
4700                 pipe_config->pixel_multiplier = 1;
4701         }
4702         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4703                                                         DPLL(crtc->pipe));
4704         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4705                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4706                                                                FP0(crtc->pipe));
4707                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4708                                                                FP1(crtc->pipe));
4709         } else {
4710                 /* Mask out read-only status bits. */
4711                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4712                                                      DPLL_PORTC_READY_MASK |
4713                                                      DPLL_PORTB_READY_MASK);
4714         }
4715
4716         if (IS_CHERRYVIEW(dev_priv))
4717                 chv_crtc_clock_get(crtc, pipe_config);
4718         else if (IS_VALLEYVIEW(dev_priv))
4719                 vlv_crtc_clock_get(crtc, pipe_config);
4720         else
4721                 i9xx_crtc_clock_get(crtc, pipe_config);
4722
4723         /*
4724          * Normally the dotclock is filled in by the encoder .get_config()
4725          * but in case the pipe is enabled w/o any ports we need a sane
4726          * default.
4727          */
4728         pipe_config->hw.adjusted_mode.crtc_clock =
4729                 pipe_config->port_clock / pipe_config->pixel_multiplier;
4730
4731         ret = true;
4732
4733 out:
4734         intel_display_power_put(dev_priv, power_domain, wakeref);
4735
4736         return ret;
4737 }
4738
4739 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4740 {
4741         struct intel_encoder *encoder;
4742         int i;
4743         u32 val, final;
4744         bool has_lvds = false;
4745         bool has_cpu_edp = false;
4746         bool has_panel = false;
4747         bool has_ck505 = false;
4748         bool can_ssc = false;
4749         bool using_ssc_source = false;
4750
4751         /* We need to take the global config into account */
4752         for_each_intel_encoder(&dev_priv->drm, encoder) {
4753                 switch (encoder->type) {
4754                 case INTEL_OUTPUT_LVDS:
4755                         has_panel = true;
4756                         has_lvds = true;
4757                         break;
4758                 case INTEL_OUTPUT_EDP:
4759                         has_panel = true;
4760                         if (encoder->port == PORT_A)
4761                                 has_cpu_edp = true;
4762                         break;
4763                 default:
4764                         break;
4765                 }
4766         }
4767
4768         if (HAS_PCH_IBX(dev_priv)) {
4769                 has_ck505 = dev_priv->vbt.display_clock_mode;
4770                 can_ssc = has_ck505;
4771         } else {
4772                 has_ck505 = false;
4773                 can_ssc = true;
4774         }
4775
4776         /* Check if any DPLLs are using the SSC source */
4777         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4778                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4779
4780                 if (!(temp & DPLL_VCO_ENABLE))
4781                         continue;
4782
4783                 if ((temp & PLL_REF_INPUT_MASK) ==
4784                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4785                         using_ssc_source = true;
4786                         break;
4787                 }
4788         }
4789
4790         drm_dbg_kms(&dev_priv->drm,
4791                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4792                     has_panel, has_lvds, has_ck505, using_ssc_source);
4793
4794         /* Ironlake: try to setup display ref clock before DPLL
4795          * enabling. This is only under driver's control after
4796          * PCH B stepping, previous chipset stepping should be
4797          * ignoring this setting.
4798          */
4799         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4800
4801         /* As we must carefully and slowly disable/enable each source in turn,
4802          * compute the final state we want first and check if we need to
4803          * make any changes at all.
4804          */
4805         final = val;
4806         final &= ~DREF_NONSPREAD_SOURCE_MASK;
4807         if (has_ck505)
4808                 final |= DREF_NONSPREAD_CK505_ENABLE;
4809         else
4810                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
4811
4812         final &= ~DREF_SSC_SOURCE_MASK;
4813         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4814         final &= ~DREF_SSC1_ENABLE;
4815
4816         if (has_panel) {
4817                 final |= DREF_SSC_SOURCE_ENABLE;
4818
4819                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4820                         final |= DREF_SSC1_ENABLE;
4821
4822                 if (has_cpu_edp) {
4823                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
4824                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4825                         else
4826                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4827                 } else
4828                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4829         } else if (using_ssc_source) {
4830                 final |= DREF_SSC_SOURCE_ENABLE;
4831                 final |= DREF_SSC1_ENABLE;
4832         }
4833
4834         if (final == val)
4835                 return;
4836
4837         /* Always enable nonspread source */
4838         val &= ~DREF_NONSPREAD_SOURCE_MASK;
4839
4840         if (has_ck505)
4841                 val |= DREF_NONSPREAD_CK505_ENABLE;
4842         else
4843                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
4844
4845         if (has_panel) {
4846                 val &= ~DREF_SSC_SOURCE_MASK;
4847                 val |= DREF_SSC_SOURCE_ENABLE;
4848
4849                 /* SSC must be turned on before enabling the CPU output  */
4850                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4851                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4852                         val |= DREF_SSC1_ENABLE;
4853                 } else
4854                         val &= ~DREF_SSC1_ENABLE;
4855
4856                 /* Get SSC going before enabling the outputs */
4857                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4858                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4859                 udelay(200);
4860
4861                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4862
4863                 /* Enable CPU source on CPU attached eDP */
4864                 if (has_cpu_edp) {
4865                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4866                                 drm_dbg_kms(&dev_priv->drm,
4867                                             "Using SSC on eDP\n");
4868                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4869                         } else
4870                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4871                 } else
4872                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4873
4874                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4875                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4876                 udelay(200);
4877         } else {
4878                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4879
4880                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4881
4882                 /* Turn off CPU output */
4883                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4884
4885                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4886                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4887                 udelay(200);
4888
4889                 if (!using_ssc_source) {
4890                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4891
4892                         /* Turn off the SSC source */
4893                         val &= ~DREF_SSC_SOURCE_MASK;
4894                         val |= DREF_SSC_SOURCE_DISABLE;
4895
4896                         /* Turn off SSC1 */
4897                         val &= ~DREF_SSC1_ENABLE;
4898
4899                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4900                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4901                         udelay(200);
4902                 }
4903         }
4904
4905         BUG_ON(val != final);
4906 }
4907
4908 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
4909 {
4910         u32 tmp;
4911
4912         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
4913         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
4914         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
4915
4916         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
4917                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
4918                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
4919
4920         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
4921         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
4922         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
4923
4924         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
4925                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
4926                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
4927 }
4928
4929 /* WaMPhyProgramming:hsw */
4930 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
4931 {
4932         u32 tmp;
4933
4934         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
4935         tmp &= ~(0xFF << 24);
4936         tmp |= (0x12 << 24);
4937         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
4938
4939         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
4940         tmp |= (1 << 11);
4941         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
4942
4943         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
4944         tmp |= (1 << 11);
4945         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
4946
4947         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
4948         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
4949         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
4950
4951         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
4952         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
4953         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
4954
4955         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
4956         tmp &= ~(7 << 13);
4957         tmp |= (5 << 13);
4958         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
4959
4960         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
4961         tmp &= ~(7 << 13);
4962         tmp |= (5 << 13);
4963         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
4964
4965         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
4966         tmp &= ~0xFF;
4967         tmp |= 0x1C;
4968         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
4969
4970         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
4971         tmp &= ~0xFF;
4972         tmp |= 0x1C;
4973         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
4974
4975         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
4976         tmp &= ~(0xFF << 16);
4977         tmp |= (0x1C << 16);
4978         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
4979
4980         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
4981         tmp &= ~(0xFF << 16);
4982         tmp |= (0x1C << 16);
4983         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
4984
4985         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
4986         tmp |= (1 << 27);
4987         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
4988
4989         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
4990         tmp |= (1 << 27);
4991         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
4992
4993         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
4994         tmp &= ~(0xF << 28);
4995         tmp |= (4 << 28);
4996         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
4997
4998         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
4999         tmp &= ~(0xF << 28);
5000         tmp |= (4 << 28);
5001         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5002 }
5003
5004 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5005  * Programming" based on the parameters passed:
5006  * - Sequence to enable CLKOUT_DP
5007  * - Sequence to enable CLKOUT_DP without spread
5008  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5009  */
5010 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5011                                  bool with_spread, bool with_fdi)
5012 {
5013         u32 reg, tmp;
5014
5015         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5016                      "FDI requires downspread\n"))
5017                 with_spread = true;
5018         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5019                      with_fdi, "LP PCH doesn't have FDI\n"))
5020                 with_fdi = false;
5021
5022         mutex_lock(&dev_priv->sb_lock);
5023
5024         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5025         tmp &= ~SBI_SSCCTL_DISABLE;
5026         tmp |= SBI_SSCCTL_PATHALT;
5027         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5028
5029         udelay(24);
5030
5031         if (with_spread) {
5032                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5033                 tmp &= ~SBI_SSCCTL_PATHALT;
5034                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5035
5036                 if (with_fdi) {
5037                         lpt_reset_fdi_mphy(dev_priv);
5038                         lpt_program_fdi_mphy(dev_priv);
5039                 }
5040         }
5041
5042         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5043         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5044         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5045         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5046
5047         mutex_unlock(&dev_priv->sb_lock);
5048 }
5049
5050 /* Sequence to disable CLKOUT_DP */
5051 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5052 {
5053         u32 reg, tmp;
5054
5055         mutex_lock(&dev_priv->sb_lock);
5056
5057         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5058         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5059         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5060         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5061
5062         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5063         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5064                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5065                         tmp |= SBI_SSCCTL_PATHALT;
5066                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5067                         udelay(32);
5068                 }
5069                 tmp |= SBI_SSCCTL_DISABLE;
5070                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5071         }
5072
5073         mutex_unlock(&dev_priv->sb_lock);
5074 }
5075
5076 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5077
5078 static const u16 sscdivintphase[] = {
5079         [BEND_IDX( 50)] = 0x3B23,
5080         [BEND_IDX( 45)] = 0x3B23,
5081         [BEND_IDX( 40)] = 0x3C23,
5082         [BEND_IDX( 35)] = 0x3C23,
5083         [BEND_IDX( 30)] = 0x3D23,
5084         [BEND_IDX( 25)] = 0x3D23,
5085         [BEND_IDX( 20)] = 0x3E23,
5086         [BEND_IDX( 15)] = 0x3E23,
5087         [BEND_IDX( 10)] = 0x3F23,
5088         [BEND_IDX(  5)] = 0x3F23,
5089         [BEND_IDX(  0)] = 0x0025,
5090         [BEND_IDX( -5)] = 0x0025,
5091         [BEND_IDX(-10)] = 0x0125,
5092         [BEND_IDX(-15)] = 0x0125,
5093         [BEND_IDX(-20)] = 0x0225,
5094         [BEND_IDX(-25)] = 0x0225,
5095         [BEND_IDX(-30)] = 0x0325,
5096         [BEND_IDX(-35)] = 0x0325,
5097         [BEND_IDX(-40)] = 0x0425,
5098         [BEND_IDX(-45)] = 0x0425,
5099         [BEND_IDX(-50)] = 0x0525,
5100 };
5101
5102 /*
5103  * Bend CLKOUT_DP
5104  * steps -50 to 50 inclusive, in steps of 5
5105  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5106  * change in clock period = -(steps / 10) * 5.787 ps
5107  */
5108 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5109 {
5110         u32 tmp;
5111         int idx = BEND_IDX(steps);
5112
5113         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5114                 return;
5115
5116         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5117                 return;
5118
5119         mutex_lock(&dev_priv->sb_lock);
5120
5121         if (steps % 10 != 0)
5122                 tmp = 0xAAAAAAAB;
5123         else
5124                 tmp = 0x00000000;
5125         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5126
5127         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5128         tmp &= 0xffff0000;
5129         tmp |= sscdivintphase[idx];
5130         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5131
5132         mutex_unlock(&dev_priv->sb_lock);
5133 }
5134
5135 #undef BEND_IDX
5136
5137 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5138 {
5139         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5140         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5141
5142         if ((ctl & SPLL_PLL_ENABLE) == 0)
5143                 return false;
5144
5145         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5146             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5147                 return true;
5148
5149         if (IS_BROADWELL(dev_priv) &&
5150             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5151                 return true;
5152
5153         return false;
5154 }
5155
5156 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5157                                enum intel_dpll_id id)
5158 {
5159         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5160         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5161
5162         if ((ctl & WRPLL_PLL_ENABLE) == 0)
5163                 return false;
5164
5165         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5166                 return true;
5167
5168         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5169             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5170             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5171                 return true;
5172
5173         return false;
5174 }
5175
5176 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5177 {
5178         struct intel_encoder *encoder;
5179         bool has_fdi = false;
5180
5181         for_each_intel_encoder(&dev_priv->drm, encoder) {
5182                 switch (encoder->type) {
5183                 case INTEL_OUTPUT_ANALOG:
5184                         has_fdi = true;
5185                         break;
5186                 default:
5187                         break;
5188                 }
5189         }
5190
5191         /*
5192          * The BIOS may have decided to use the PCH SSC
5193          * reference so we must not disable it until the
5194          * relevant PLLs have stopped relying on it. We'll
5195          * just leave the PCH SSC reference enabled in case
5196          * any active PLL is using it. It will get disabled
5197          * after runtime suspend if we don't have FDI.
5198          *
5199          * TODO: Move the whole reference clock handling
5200          * to the modeset sequence proper so that we can
5201          * actually enable/disable/reconfigure these things
5202          * safely. To do that we need to introduce a real
5203          * clock hierarchy. That would also allow us to do
5204          * clock bending finally.
5205          */
5206         dev_priv->pch_ssc_use = 0;
5207
5208         if (spll_uses_pch_ssc(dev_priv)) {
5209                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5210                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5211         }
5212
5213         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5214                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5215                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5216         }
5217
5218         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5219                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5220                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5221         }
5222
5223         if (dev_priv->pch_ssc_use)
5224                 return;
5225
5226         if (has_fdi) {
5227                 lpt_bend_clkout_dp(dev_priv, 0);
5228                 lpt_enable_clkout_dp(dev_priv, true, true);
5229         } else {
5230                 lpt_disable_clkout_dp(dev_priv);
5231         }
5232 }
5233
5234 /*
5235  * Initialize reference clocks when the driver loads
5236  */
5237 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5238 {
5239         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5240                 ilk_init_pch_refclk(dev_priv);
5241         else if (HAS_PCH_LPT(dev_priv))
5242                 lpt_init_pch_refclk(dev_priv);
5243 }
5244
5245 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5246 {
5247         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5249         enum pipe pipe = crtc->pipe;
5250         u32 val;
5251
5252         val = 0;
5253
5254         switch (crtc_state->pipe_bpp) {
5255         case 18:
5256                 val |= PIPECONF_6BPC;
5257                 break;
5258         case 24:
5259                 val |= PIPECONF_8BPC;
5260                 break;
5261         case 30:
5262                 val |= PIPECONF_10BPC;
5263                 break;
5264         case 36:
5265                 val |= PIPECONF_12BPC;
5266                 break;
5267         default:
5268                 /* Case prevented by intel_choose_pipe_bpp_dither. */
5269                 BUG();
5270         }
5271
5272         if (crtc_state->dither)
5273                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5274
5275         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5276                 val |= PIPECONF_INTERLACED_ILK;
5277         else
5278                 val |= PIPECONF_PROGRESSIVE;
5279
5280         /*
5281          * This would end up with an odd purple hue over
5282          * the entire display. Make sure we don't do it.
5283          */
5284         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5285                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5286
5287         if (crtc_state->limited_color_range &&
5288             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5289                 val |= PIPECONF_COLOR_RANGE_SELECT;
5290
5291         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5292                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5293
5294         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5295
5296         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5297
5298         intel_de_write(dev_priv, PIPECONF(pipe), val);
5299         intel_de_posting_read(dev_priv, PIPECONF(pipe));
5300 }
5301
5302 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5303 {
5304         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5305         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5306         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5307         u32 val = 0;
5308
5309         if (IS_HASWELL(dev_priv) && crtc_state->dither)
5310                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5311
5312         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5313                 val |= PIPECONF_INTERLACED_ILK;
5314         else
5315                 val |= PIPECONF_PROGRESSIVE;
5316
5317         if (IS_HASWELL(dev_priv) &&
5318             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5319                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5320
5321         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5322         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5323 }
5324
5325 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5326 {
5327         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5328         const struct intel_crtc_scaler_state *scaler_state =
5329                 &crtc_state->scaler_state;
5330
5331         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5332         u32 val = 0;
5333         int i;
5334
5335         switch (crtc_state->pipe_bpp) {
5336         case 18:
5337                 val |= PIPEMISC_6_BPC;
5338                 break;
5339         case 24:
5340                 val |= PIPEMISC_8_BPC;
5341                 break;
5342         case 30:
5343                 val |= PIPEMISC_10_BPC;
5344                 break;
5345         case 36:
5346                 /* Port output 12BPC defined for ADLP+ */
5347                 if (DISPLAY_VER(dev_priv) > 12)
5348                         val |= PIPEMISC_12_BPC_ADLP;
5349                 break;
5350         default:
5351                 MISSING_CASE(crtc_state->pipe_bpp);
5352                 break;
5353         }
5354
5355         if (crtc_state->dither)
5356                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5357
5358         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5359             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5360                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5361
5362         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5363                 val |= PIPEMISC_YUV420_ENABLE |
5364                         PIPEMISC_YUV420_MODE_FULL_BLEND;
5365
5366         if (DISPLAY_VER(dev_priv) >= 11 &&
5367             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5368                                            BIT(PLANE_CURSOR))) == 0)
5369                 val |= PIPEMISC_HDR_MODE_PRECISION;
5370
5371         if (DISPLAY_VER(dev_priv) >= 12)
5372                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5373
5374         if (IS_ALDERLAKE_P(dev_priv)) {
5375                 bool scaler_in_use = false;
5376
5377                 for (i = 0; i < crtc->num_scalers; i++) {
5378                         if (!scaler_state->scalers[i].in_use)
5379                                 continue;
5380
5381                         scaler_in_use = true;
5382                         break;
5383                 }
5384
5385                 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5386                              PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5387                              scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5388                              PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5389         }
5390
5391         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5392 }
5393
5394 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5395 {
5396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5397         u32 tmp;
5398
5399         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5400
5401         switch (tmp & PIPEMISC_BPC_MASK) {
5402         case PIPEMISC_6_BPC:
5403                 return 18;
5404         case PIPEMISC_8_BPC:
5405                 return 24;
5406         case PIPEMISC_10_BPC:
5407                 return 30;
5408         /*
5409          * PORT OUTPUT 12 BPC defined for ADLP+.
5410          *
5411          * TODO:
5412          * For previous platforms with DSI interface, bits 5:7
5413          * are used for storing pipe_bpp irrespective of dithering.
5414          * Since the value of 12 BPC is not defined for these bits
5415          * on older platforms, need to find a workaround for 12 BPC
5416          * MIPI DSI HW readout.
5417          */
5418         case PIPEMISC_12_BPC_ADLP:
5419                 if (DISPLAY_VER(dev_priv) > 12)
5420                         return 36;
5421                 fallthrough;
5422         default:
5423                 MISSING_CASE(tmp);
5424                 return 0;
5425         }
5426 }
5427
5428 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5429 {
5430         /*
5431          * Account for spread spectrum to avoid
5432          * oversubscribing the link. Max center spread
5433          * is 2.5%; use 5% for safety's sake.
5434          */
5435         u32 bps = target_clock * bpp * 21 / 20;
5436         return DIV_ROUND_UP(bps, link_bw * 8);
5437 }
5438
5439 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5440                                          struct intel_link_m_n *m_n)
5441 {
5442         struct drm_device *dev = crtc->base.dev;
5443         struct drm_i915_private *dev_priv = to_i915(dev);
5444         enum pipe pipe = crtc->pipe;
5445
5446         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5447         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5448         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5449                 & ~TU_SIZE_MASK;
5450         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5451         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5452                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5453 }
5454
5455 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5456                                          enum transcoder transcoder,
5457                                          struct intel_link_m_n *m_n,
5458                                          struct intel_link_m_n *m2_n2)
5459 {
5460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5461         enum pipe pipe = crtc->pipe;
5462
5463         if (DISPLAY_VER(dev_priv) >= 5) {
5464                 m_n->link_m = intel_de_read(dev_priv,
5465                                             PIPE_LINK_M1(transcoder));
5466                 m_n->link_n = intel_de_read(dev_priv,
5467                                             PIPE_LINK_N1(transcoder));
5468                 m_n->gmch_m = intel_de_read(dev_priv,
5469                                             PIPE_DATA_M1(transcoder))
5470                         & ~TU_SIZE_MASK;
5471                 m_n->gmch_n = intel_de_read(dev_priv,
5472                                             PIPE_DATA_N1(transcoder));
5473                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5474                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5475
5476                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5477                         m2_n2->link_m = intel_de_read(dev_priv,
5478                                                       PIPE_LINK_M2(transcoder));
5479                         m2_n2->link_n = intel_de_read(dev_priv,
5480                                                              PIPE_LINK_N2(transcoder));
5481                         m2_n2->gmch_m = intel_de_read(dev_priv,
5482                                                              PIPE_DATA_M2(transcoder))
5483                                         & ~TU_SIZE_MASK;
5484                         m2_n2->gmch_n = intel_de_read(dev_priv,
5485                                                              PIPE_DATA_N2(transcoder));
5486                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5487                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5488                 }
5489         } else {
5490                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5491                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5492                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5493                         & ~TU_SIZE_MASK;
5494                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5495                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5496                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5497         }
5498 }
5499
5500 void intel_dp_get_m_n(struct intel_crtc *crtc,
5501                       struct intel_crtc_state *pipe_config)
5502 {
5503         if (pipe_config->has_pch_encoder)
5504                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5505         else
5506                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5507                                              &pipe_config->dp_m_n,
5508                                              &pipe_config->dp_m2_n2);
5509 }
5510
5511 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5512                                    struct intel_crtc_state *pipe_config)
5513 {
5514         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5515                                      &pipe_config->fdi_m_n, NULL);
5516 }
5517
5518 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5519                                   u32 pos, u32 size)
5520 {
5521         drm_rect_init(&crtc_state->pch_pfit.dst,
5522                       pos >> 16, pos & 0xffff,
5523                       size >> 16, size & 0xffff);
5524 }
5525
5526 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5527 {
5528         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5529         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5530         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5531         int id = -1;
5532         int i;
5533
5534         /* find scaler attached to this pipe */
5535         for (i = 0; i < crtc->num_scalers; i++) {
5536                 u32 ctl, pos, size;
5537
5538                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5539                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5540                         continue;
5541
5542                 id = i;
5543                 crtc_state->pch_pfit.enabled = true;
5544
5545                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5546                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5547
5548                 ilk_get_pfit_pos_size(crtc_state, pos, size);
5549
5550                 scaler_state->scalers[i].in_use = true;
5551                 break;
5552         }
5553
5554         scaler_state->scaler_id = id;
5555         if (id >= 0)
5556                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5557         else
5558                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5559 }
5560
5561 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5562 {
5563         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5564         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5565         u32 ctl, pos, size;
5566
5567         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5568         if ((ctl & PF_ENABLE) == 0)
5569                 return;
5570
5571         crtc_state->pch_pfit.enabled = true;
5572
5573         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5574         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5575
5576         ilk_get_pfit_pos_size(crtc_state, pos, size);
5577
5578         /*
5579          * We currently do not free assignements of panel fitters on
5580          * ivb/hsw (since we don't use the higher upscaling modes which
5581          * differentiates them) so just WARN about this case for now.
5582          */
5583         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5584                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5585 }
5586
5587 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5588                                 struct intel_crtc_state *pipe_config)
5589 {
5590         struct drm_device *dev = crtc->base.dev;
5591         struct drm_i915_private *dev_priv = to_i915(dev);
5592         enum intel_display_power_domain power_domain;
5593         intel_wakeref_t wakeref;
5594         u32 tmp;
5595         bool ret;
5596
5597         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5598         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5599         if (!wakeref)
5600                 return false;
5601
5602         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5603         pipe_config->shared_dpll = NULL;
5604
5605         ret = false;
5606         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5607         if (!(tmp & PIPECONF_ENABLE))
5608                 goto out;
5609
5610         switch (tmp & PIPECONF_BPC_MASK) {
5611         case PIPECONF_6BPC:
5612                 pipe_config->pipe_bpp = 18;
5613                 break;
5614         case PIPECONF_8BPC:
5615                 pipe_config->pipe_bpp = 24;
5616                 break;
5617         case PIPECONF_10BPC:
5618                 pipe_config->pipe_bpp = 30;
5619                 break;
5620         case PIPECONF_12BPC:
5621                 pipe_config->pipe_bpp = 36;
5622                 break;
5623         default:
5624                 break;
5625         }
5626
5627         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5628                 pipe_config->limited_color_range = true;
5629
5630         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5631         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5632         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5633                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5634                 break;
5635         default:
5636                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5637                 break;
5638         }
5639
5640         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5641                 PIPECONF_GAMMA_MODE_SHIFT;
5642
5643         pipe_config->csc_mode = intel_de_read(dev_priv,
5644                                               PIPE_CSC_MODE(crtc->pipe));
5645
5646         i9xx_get_pipe_color_config(pipe_config);
5647         intel_color_get_config(pipe_config);
5648
5649         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5650                 struct intel_shared_dpll *pll;
5651                 enum intel_dpll_id pll_id;
5652                 bool pll_active;
5653
5654                 pipe_config->has_pch_encoder = true;
5655
5656                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5657                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5658                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5659
5660                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5661
5662                 if (HAS_PCH_IBX(dev_priv)) {
5663                         /*
5664                          * The pipe->pch transcoder and pch transcoder->pll
5665                          * mapping is fixed.
5666                          */
5667                         pll_id = (enum intel_dpll_id) crtc->pipe;
5668                 } else {
5669                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5670                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5671                                 pll_id = DPLL_ID_PCH_PLL_B;
5672                         else
5673                                 pll_id= DPLL_ID_PCH_PLL_A;
5674                 }
5675
5676                 pipe_config->shared_dpll =
5677                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
5678                 pll = pipe_config->shared_dpll;
5679
5680                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5681                                                      &pipe_config->dpll_hw_state);
5682                 drm_WARN_ON(dev, !pll_active);
5683
5684                 tmp = pipe_config->dpll_hw_state.dpll;
5685                 pipe_config->pixel_multiplier =
5686                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5687                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5688
5689                 ilk_pch_clock_get(crtc, pipe_config);
5690         } else {
5691                 pipe_config->pixel_multiplier = 1;
5692         }
5693
5694         intel_get_transcoder_timings(crtc, pipe_config);
5695         intel_get_pipe_src_size(crtc, pipe_config);
5696
5697         ilk_get_pfit_config(pipe_config);
5698
5699         ret = true;
5700
5701 out:
5702         intel_display_power_put(dev_priv, power_domain, wakeref);
5703
5704         return ret;
5705 }
5706
5707 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5708                                      struct intel_crtc_state *pipe_config,
5709                                      struct intel_display_power_domain_set *power_domain_set)
5710 {
5711         struct drm_device *dev = crtc->base.dev;
5712         struct drm_i915_private *dev_priv = to_i915(dev);
5713         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
5714         unsigned long enabled_panel_transcoders = 0;
5715         enum transcoder panel_transcoder;
5716         u32 tmp;
5717
5718         if (DISPLAY_VER(dev_priv) >= 11)
5719                 panel_transcoder_mask |=
5720                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5721
5722         /*
5723          * The pipe->transcoder mapping is fixed with the exception of the eDP
5724          * and DSI transcoders handled below.
5725          */
5726         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5727
5728         /*
5729          * XXX: Do intel_display_power_get_if_enabled before reading this (for
5730          * consistency and less surprising code; it's in always on power).
5731          */
5732         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
5733                                        panel_transcoder_mask) {
5734                 bool force_thru = false;
5735                 enum pipe trans_pipe;
5736
5737                 tmp = intel_de_read(dev_priv,
5738                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
5739                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5740                         continue;
5741
5742                 /*
5743                  * Log all enabled ones, only use the first one.
5744                  *
5745                  * FIXME: This won't work for two separate DSI displays.
5746                  */
5747                 enabled_panel_transcoders |= BIT(panel_transcoder);
5748                 if (enabled_panel_transcoders != BIT(panel_transcoder))
5749                         continue;
5750
5751                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5752                 default:
5753                         drm_WARN(dev, 1,
5754                                  "unknown pipe linked to transcoder %s\n",
5755                                  transcoder_name(panel_transcoder));
5756                         fallthrough;
5757                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5758                         force_thru = true;
5759                         fallthrough;
5760                 case TRANS_DDI_EDP_INPUT_A_ON:
5761                         trans_pipe = PIPE_A;
5762                         break;
5763                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5764                         trans_pipe = PIPE_B;
5765                         break;
5766                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5767                         trans_pipe = PIPE_C;
5768                         break;
5769                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
5770                         trans_pipe = PIPE_D;
5771                         break;
5772                 }
5773
5774                 if (trans_pipe == crtc->pipe) {
5775                         pipe_config->cpu_transcoder = panel_transcoder;
5776                         pipe_config->pch_pfit.force_thru = force_thru;
5777                 }
5778         }
5779
5780         /*
5781          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
5782          */
5783         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
5784                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
5785
5786         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5787                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5788                 return false;
5789
5790         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5791
5792         return tmp & PIPECONF_ENABLE;
5793 }
5794
5795 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5796                                          struct intel_crtc_state *pipe_config,
5797                                          struct intel_display_power_domain_set *power_domain_set)
5798 {
5799         struct drm_device *dev = crtc->base.dev;
5800         struct drm_i915_private *dev_priv = to_i915(dev);
5801         enum transcoder cpu_transcoder;
5802         enum port port;
5803         u32 tmp;
5804
5805         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5806                 if (port == PORT_A)
5807                         cpu_transcoder = TRANSCODER_DSI_A;
5808                 else
5809                         cpu_transcoder = TRANSCODER_DSI_C;
5810
5811                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5812                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5813                         continue;
5814
5815                 /*
5816                  * The PLL needs to be enabled with a valid divider
5817                  * configuration, otherwise accessing DSI registers will hang
5818                  * the machine. See BSpec North Display Engine
5819                  * registers/MIPI[BXT]. We can break out here early, since we
5820                  * need the same DSI PLL to be enabled for both DSI ports.
5821                  */
5822                 if (!bxt_dsi_pll_is_enabled(dev_priv))
5823                         break;
5824
5825                 /* XXX: this works for video mode only */
5826                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5827                 if (!(tmp & DPI_ENABLE))
5828                         continue;
5829
5830                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5831                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5832                         continue;
5833
5834                 pipe_config->cpu_transcoder = cpu_transcoder;
5835                 break;
5836         }
5837
5838         return transcoder_is_dsi(pipe_config->cpu_transcoder);
5839 }
5840
5841 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5842                                    struct intel_crtc_state *pipe_config)
5843 {
5844         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5845         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5846         enum port port;
5847         u32 tmp;
5848
5849         if (transcoder_is_dsi(cpu_transcoder)) {
5850                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5851                                                 PORT_A : PORT_B;
5852         } else {
5853                 tmp = intel_de_read(dev_priv,
5854                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
5855                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5856                         return;
5857                 if (DISPLAY_VER(dev_priv) >= 12)
5858                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5859                 else
5860                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5861         }
5862
5863         /*
5864          * Haswell has only FDI/PCH transcoder A. It is which is connected to
5865          * DDI E. So just check whether this pipe is wired to DDI E and whether
5866          * the PCH transcoder is on.
5867          */
5868         if (DISPLAY_VER(dev_priv) < 9 &&
5869             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5870                 pipe_config->has_pch_encoder = true;
5871
5872                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5873                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5874                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5875
5876                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5877         }
5878 }
5879
5880 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5881                                 struct intel_crtc_state *pipe_config)
5882 {
5883         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5884         struct intel_display_power_domain_set power_domain_set = { };
5885         bool active;
5886         u32 tmp;
5887
5888         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5889                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
5890                 return false;
5891
5892         pipe_config->shared_dpll = NULL;
5893
5894         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5895
5896         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5897             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5898                 drm_WARN_ON(&dev_priv->drm, active);
5899                 active = true;
5900         }
5901
5902         intel_dsc_get_config(pipe_config);
5903         if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
5904                 intel_uncompressed_joiner_get_config(pipe_config);
5905
5906         if (!active) {
5907                 /* bigjoiner slave doesn't enable transcoder */
5908                 if (!pipe_config->bigjoiner_slave)
5909                         goto out;
5910
5911                 active = true;
5912                 pipe_config->pixel_multiplier = 1;
5913
5914                 /* we cannot read out most state, so don't bother.. */
5915                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5916         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5917             DISPLAY_VER(dev_priv) >= 11) {
5918                 hsw_get_ddi_port_state(crtc, pipe_config);
5919                 intel_get_transcoder_timings(crtc, pipe_config);
5920         }
5921
5922         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5923                 intel_vrr_get_config(crtc, pipe_config);
5924
5925         intel_get_pipe_src_size(crtc, pipe_config);
5926
5927         if (IS_HASWELL(dev_priv)) {
5928                 u32 tmp = intel_de_read(dev_priv,
5929                                         PIPECONF(pipe_config->cpu_transcoder));
5930
5931                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5932                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5933                 else
5934                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5935         } else {
5936                 pipe_config->output_format =
5937                         bdw_get_pipemisc_output_format(crtc);
5938         }
5939
5940         pipe_config->gamma_mode = intel_de_read(dev_priv,
5941                                                 GAMMA_MODE(crtc->pipe));
5942
5943         pipe_config->csc_mode = intel_de_read(dev_priv,
5944                                               PIPE_CSC_MODE(crtc->pipe));
5945
5946         if (DISPLAY_VER(dev_priv) >= 9) {
5947                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
5948
5949                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
5950                         pipe_config->gamma_enable = true;
5951
5952                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
5953                         pipe_config->csc_enable = true;
5954         } else {
5955                 i9xx_get_pipe_color_config(pipe_config);
5956         }
5957
5958         intel_color_get_config(pipe_config);
5959
5960         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
5961         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
5962         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5963                 pipe_config->ips_linetime =
5964                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
5965
5966         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5967                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
5968                 if (DISPLAY_VER(dev_priv) >= 9)
5969                         skl_get_pfit_config(pipe_config);
5970                 else
5971                         ilk_get_pfit_config(pipe_config);
5972         }
5973
5974         if (hsw_crtc_supports_ips(crtc)) {
5975                 if (IS_HASWELL(dev_priv))
5976                         pipe_config->ips_enabled = intel_de_read(dev_priv,
5977                                                                  IPS_CTL) & IPS_ENABLE;
5978                 else {
5979                         /*
5980                          * We cannot readout IPS state on broadwell, set to
5981                          * true so we can set it to a defined state on first
5982                          * commit.
5983                          */
5984                         pipe_config->ips_enabled = true;
5985                 }
5986         }
5987
5988         if (pipe_config->bigjoiner_slave) {
5989                 /* Cannot be read out as a slave, set to 0. */
5990                 pipe_config->pixel_multiplier = 0;
5991         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
5992             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
5993                 pipe_config->pixel_multiplier =
5994                         intel_de_read(dev_priv,
5995                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
5996         } else {
5997                 pipe_config->pixel_multiplier = 1;
5998         }
5999
6000 out:
6001         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6002
6003         return active;
6004 }
6005
6006 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6007 {
6008         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6009         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6010
6011         if (!i915->display.get_pipe_config(crtc, crtc_state))
6012                 return false;
6013
6014         crtc_state->hw.active = true;
6015
6016         intel_crtc_readout_derived_state(crtc_state);
6017
6018         return true;
6019 }
6020
6021 /* VESA 640x480x72Hz mode to set on the pipe */
6022 static const struct drm_display_mode load_detect_mode = {
6023         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6024                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6025 };
6026
6027 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6028                                         struct drm_crtc *crtc)
6029 {
6030         struct drm_plane *plane;
6031         struct drm_plane_state *plane_state;
6032         int ret, i;
6033
6034         ret = drm_atomic_add_affected_planes(state, crtc);
6035         if (ret)
6036                 return ret;
6037
6038         for_each_new_plane_in_state(state, plane, plane_state, i) {
6039                 if (plane_state->crtc != crtc)
6040                         continue;
6041
6042                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6043                 if (ret)
6044                         return ret;
6045
6046                 drm_atomic_set_fb_for_plane(plane_state, NULL);
6047         }
6048
6049         return 0;
6050 }
6051
6052 int intel_get_load_detect_pipe(struct drm_connector *connector,
6053                                struct intel_load_detect_pipe *old,
6054                                struct drm_modeset_acquire_ctx *ctx)
6055 {
6056         struct intel_encoder *encoder =
6057                 intel_attached_encoder(to_intel_connector(connector));
6058         struct intel_crtc *possible_crtc;
6059         struct intel_crtc *crtc = NULL;
6060         struct drm_device *dev = encoder->base.dev;
6061         struct drm_i915_private *dev_priv = to_i915(dev);
6062         struct drm_mode_config *config = &dev->mode_config;
6063         struct drm_atomic_state *state = NULL, *restore_state = NULL;
6064         struct drm_connector_state *connector_state;
6065         struct intel_crtc_state *crtc_state;
6066         int ret;
6067
6068         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6069                     connector->base.id, connector->name,
6070                     encoder->base.base.id, encoder->base.name);
6071
6072         old->restore_state = NULL;
6073
6074         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6075
6076         /*
6077          * Algorithm gets a little messy:
6078          *
6079          *   - if the connector already has an assigned crtc, use it (but make
6080          *     sure it's on first)
6081          *
6082          *   - try to find the first unused crtc that can drive this connector,
6083          *     and use that if we find one
6084          */
6085
6086         /* See if we already have a CRTC for this connector */
6087         if (connector->state->crtc) {
6088                 crtc = to_intel_crtc(connector->state->crtc);
6089
6090                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6091                 if (ret)
6092                         goto fail;
6093
6094                 /* Make sure the crtc and connector are running */
6095                 goto found;
6096         }
6097
6098         /* Find an unused one (if possible) */
6099         for_each_intel_crtc(dev, possible_crtc) {
6100                 if (!(encoder->base.possible_crtcs &
6101                       drm_crtc_mask(&possible_crtc->base)))
6102                         continue;
6103
6104                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6105                 if (ret)
6106                         goto fail;
6107
6108                 if (possible_crtc->base.state->enable) {
6109                         drm_modeset_unlock(&possible_crtc->base.mutex);
6110                         continue;
6111                 }
6112
6113                 crtc = possible_crtc;
6114                 break;
6115         }
6116
6117         /*
6118          * If we didn't find an unused CRTC, don't use any.
6119          */
6120         if (!crtc) {
6121                 drm_dbg_kms(&dev_priv->drm,
6122                             "no pipe available for load-detect\n");
6123                 ret = -ENODEV;
6124                 goto fail;
6125         }
6126
6127 found:
6128         state = drm_atomic_state_alloc(dev);
6129         restore_state = drm_atomic_state_alloc(dev);
6130         if (!state || !restore_state) {
6131                 ret = -ENOMEM;
6132                 goto fail;
6133         }
6134
6135         state->acquire_ctx = ctx;
6136         restore_state->acquire_ctx = ctx;
6137
6138         connector_state = drm_atomic_get_connector_state(state, connector);
6139         if (IS_ERR(connector_state)) {
6140                 ret = PTR_ERR(connector_state);
6141                 goto fail;
6142         }
6143
6144         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6145         if (ret)
6146                 goto fail;
6147
6148         crtc_state = intel_atomic_get_crtc_state(state, crtc);
6149         if (IS_ERR(crtc_state)) {
6150                 ret = PTR_ERR(crtc_state);
6151                 goto fail;
6152         }
6153
6154         crtc_state->uapi.active = true;
6155
6156         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6157                                            &load_detect_mode);
6158         if (ret)
6159                 goto fail;
6160
6161         ret = intel_modeset_disable_planes(state, &crtc->base);
6162         if (ret)
6163                 goto fail;
6164
6165         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6166         if (!ret)
6167                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6168         if (!ret)
6169                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6170         if (ret) {
6171                 drm_dbg_kms(&dev_priv->drm,
6172                             "Failed to create a copy of old state to restore: %i\n",
6173                             ret);
6174                 goto fail;
6175         }
6176
6177         ret = drm_atomic_commit(state);
6178         if (ret) {
6179                 drm_dbg_kms(&dev_priv->drm,
6180                             "failed to set mode on load-detect pipe\n");
6181                 goto fail;
6182         }
6183
6184         old->restore_state = restore_state;
6185         drm_atomic_state_put(state);
6186
6187         /* let the connector get through one full cycle before testing */
6188         intel_wait_for_vblank(dev_priv, crtc->pipe);
6189         return true;
6190
6191 fail:
6192         if (state) {
6193                 drm_atomic_state_put(state);
6194                 state = NULL;
6195         }
6196         if (restore_state) {
6197                 drm_atomic_state_put(restore_state);
6198                 restore_state = NULL;
6199         }
6200
6201         if (ret == -EDEADLK)
6202                 return ret;
6203
6204         return false;
6205 }
6206
6207 void intel_release_load_detect_pipe(struct drm_connector *connector,
6208                                     struct intel_load_detect_pipe *old,
6209                                     struct drm_modeset_acquire_ctx *ctx)
6210 {
6211         struct intel_encoder *intel_encoder =
6212                 intel_attached_encoder(to_intel_connector(connector));
6213         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6214         struct drm_encoder *encoder = &intel_encoder->base;
6215         struct drm_atomic_state *state = old->restore_state;
6216         int ret;
6217
6218         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6219                     connector->base.id, connector->name,
6220                     encoder->base.id, encoder->name);
6221
6222         if (!state)
6223                 return;
6224
6225         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6226         if (ret)
6227                 drm_dbg_kms(&i915->drm,
6228                             "Couldn't release load detect pipe: %i\n", ret);
6229         drm_atomic_state_put(state);
6230 }
6231
6232 static int i9xx_pll_refclk(struct drm_device *dev,
6233                            const struct intel_crtc_state *pipe_config)
6234 {
6235         struct drm_i915_private *dev_priv = to_i915(dev);
6236         u32 dpll = pipe_config->dpll_hw_state.dpll;
6237
6238         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6239                 return dev_priv->vbt.lvds_ssc_freq;
6240         else if (HAS_PCH_SPLIT(dev_priv))
6241                 return 120000;
6242         else if (DISPLAY_VER(dev_priv) != 2)
6243                 return 96000;
6244         else
6245                 return 48000;
6246 }
6247
6248 /* Returns the clock of the currently programmed mode of the given pipe. */
6249 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6250                                 struct intel_crtc_state *pipe_config)
6251 {
6252         struct drm_device *dev = crtc->base.dev;
6253         struct drm_i915_private *dev_priv = to_i915(dev);
6254         u32 dpll = pipe_config->dpll_hw_state.dpll;
6255         u32 fp;
6256         struct dpll clock;
6257         int port_clock;
6258         int refclk = i9xx_pll_refclk(dev, pipe_config);
6259
6260         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6261                 fp = pipe_config->dpll_hw_state.fp0;
6262         else
6263                 fp = pipe_config->dpll_hw_state.fp1;
6264
6265         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6266         if (IS_PINEVIEW(dev_priv)) {
6267                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6268                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6269         } else {
6270                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6271                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6272         }
6273
6274         if (DISPLAY_VER(dev_priv) != 2) {
6275                 if (IS_PINEVIEW(dev_priv))
6276                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6277                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6278                 else
6279                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6280                                DPLL_FPA01_P1_POST_DIV_SHIFT);
6281
6282                 switch (dpll & DPLL_MODE_MASK) {
6283                 case DPLLB_MODE_DAC_SERIAL:
6284                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6285                                 5 : 10;
6286                         break;
6287                 case DPLLB_MODE_LVDS:
6288                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6289                                 7 : 14;
6290                         break;
6291                 default:
6292                         drm_dbg_kms(&dev_priv->drm,
6293                                     "Unknown DPLL mode %08x in programmed "
6294                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
6295                         return;
6296                 }
6297
6298                 if (IS_PINEVIEW(dev_priv))
6299                         port_clock = pnv_calc_dpll_params(refclk, &clock);
6300                 else
6301                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
6302         } else {
6303                 enum pipe lvds_pipe;
6304
6305                 if (IS_I85X(dev_priv) &&
6306                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
6307                     lvds_pipe == crtc->pipe) {
6308                         u32 lvds = intel_de_read(dev_priv, LVDS);
6309
6310                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6311                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
6312
6313                         if (lvds & LVDS_CLKB_POWER_UP)
6314                                 clock.p2 = 7;
6315                         else
6316                                 clock.p2 = 14;
6317                 } else {
6318                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
6319                                 clock.p1 = 2;
6320                         else {
6321                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6322                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6323                         }
6324                         if (dpll & PLL_P2_DIVIDE_BY_4)
6325                                 clock.p2 = 4;
6326                         else
6327                                 clock.p2 = 2;
6328                 }
6329
6330                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
6331         }
6332
6333         /*
6334          * This value includes pixel_multiplier. We will use
6335          * port_clock to compute adjusted_mode.crtc_clock in the
6336          * encoder's get_config() function.
6337          */
6338         pipe_config->port_clock = port_clock;
6339 }
6340
6341 int intel_dotclock_calculate(int link_freq,
6342                              const struct intel_link_m_n *m_n)
6343 {
6344         /*
6345          * The calculation for the data clock is:
6346          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6347          * But we want to avoid losing precison if possible, so:
6348          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6349          *
6350          * and the link clock is simpler:
6351          * link_clock = (m * link_clock) / n
6352          */
6353
6354         if (!m_n->link_n)
6355                 return 0;
6356
6357         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6358 }
6359
6360 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6361                               struct intel_crtc_state *pipe_config)
6362 {
6363         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6364
6365         /* read out port_clock from the DPLL */
6366         i9xx_crtc_clock_get(crtc, pipe_config);
6367
6368         /*
6369          * In case there is an active pipe without active ports,
6370          * we may need some idea for the dotclock anyway.
6371          * Calculate one based on the FDI configuration.
6372          */
6373         pipe_config->hw.adjusted_mode.crtc_clock =
6374                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6375                                          &pipe_config->fdi_m_n);
6376 }
6377
6378 /* Returns the currently programmed mode of the given encoder. */
6379 struct drm_display_mode *
6380 intel_encoder_current_mode(struct intel_encoder *encoder)
6381 {
6382         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6383         struct intel_crtc_state *crtc_state;
6384         struct drm_display_mode *mode;
6385         struct intel_crtc *crtc;
6386         enum pipe pipe;
6387
6388         if (!encoder->get_hw_state(encoder, &pipe))
6389                 return NULL;
6390
6391         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6392
6393         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6394         if (!mode)
6395                 return NULL;
6396
6397         crtc_state = intel_crtc_state_alloc(crtc);
6398         if (!crtc_state) {
6399                 kfree(mode);
6400                 return NULL;
6401         }
6402
6403         if (!intel_crtc_get_pipe_config(crtc_state)) {
6404                 kfree(crtc_state);
6405                 kfree(mode);
6406                 return NULL;
6407         }
6408
6409         intel_encoder_get_config(encoder, crtc_state);
6410
6411         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6412
6413         kfree(crtc_state);
6414
6415         return mode;
6416 }
6417
6418 /**
6419  * intel_wm_need_update - Check whether watermarks need updating
6420  * @cur: current plane state
6421  * @new: new plane state
6422  *
6423  * Check current plane state versus the new one to determine whether
6424  * watermarks need to be recalculated.
6425  *
6426  * Returns true or false.
6427  */
6428 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6429                                  struct intel_plane_state *new)
6430 {
6431         /* Update watermarks on tiling or size changes. */
6432         if (new->uapi.visible != cur->uapi.visible)
6433                 return true;
6434
6435         if (!cur->hw.fb || !new->hw.fb)
6436                 return false;
6437
6438         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6439             cur->hw.rotation != new->hw.rotation ||
6440             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6441             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6442             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6443             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6444                 return true;
6445
6446         return false;
6447 }
6448
6449 static bool needs_scaling(const struct intel_plane_state *state)
6450 {
6451         int src_w = drm_rect_width(&state->uapi.src) >> 16;
6452         int src_h = drm_rect_height(&state->uapi.src) >> 16;
6453         int dst_w = drm_rect_width(&state->uapi.dst);
6454         int dst_h = drm_rect_height(&state->uapi.dst);
6455
6456         return (src_w != dst_w || src_h != dst_h);
6457 }
6458
6459 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6460                                     struct intel_crtc_state *crtc_state,
6461                                     const struct intel_plane_state *old_plane_state,
6462                                     struct intel_plane_state *plane_state)
6463 {
6464         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6465         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6467         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6468         bool was_crtc_enabled = old_crtc_state->hw.active;
6469         bool is_crtc_enabled = crtc_state->hw.active;
6470         bool turn_off, turn_on, visible, was_visible;
6471         int ret;
6472
6473         if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6474                 ret = skl_update_scaler_plane(crtc_state, plane_state);
6475                 if (ret)
6476                         return ret;
6477         }
6478
6479         was_visible = old_plane_state->uapi.visible;
6480         visible = plane_state->uapi.visible;
6481
6482         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6483                 was_visible = false;
6484
6485         /*
6486          * Visibility is calculated as if the crtc was on, but
6487          * after scaler setup everything depends on it being off
6488          * when the crtc isn't active.
6489          *
6490          * FIXME this is wrong for watermarks. Watermarks should also
6491          * be computed as if the pipe would be active. Perhaps move
6492          * per-plane wm computation to the .check_plane() hook, and
6493          * only combine the results from all planes in the current place?
6494          */
6495         if (!is_crtc_enabled) {
6496                 intel_plane_set_invisible(crtc_state, plane_state);
6497                 visible = false;
6498         }
6499
6500         if (!was_visible && !visible)
6501                 return 0;
6502
6503         turn_off = was_visible && (!visible || mode_changed);
6504         turn_on = visible && (!was_visible || mode_changed);
6505
6506         drm_dbg_atomic(&dev_priv->drm,
6507                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6508                        crtc->base.base.id, crtc->base.name,
6509                        plane->base.base.id, plane->base.name,
6510                        was_visible, visible,
6511                        turn_off, turn_on, mode_changed);
6512
6513         if (turn_on) {
6514                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6515                         crtc_state->update_wm_pre = true;
6516
6517                 /* must disable cxsr around plane enable/disable */
6518                 if (plane->id != PLANE_CURSOR)
6519                         crtc_state->disable_cxsr = true;
6520         } else if (turn_off) {
6521                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6522                         crtc_state->update_wm_post = true;
6523
6524                 /* must disable cxsr around plane enable/disable */
6525                 if (plane->id != PLANE_CURSOR)
6526                         crtc_state->disable_cxsr = true;
6527         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
6528                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6529                         /* FIXME bollocks */
6530                         crtc_state->update_wm_pre = true;
6531                         crtc_state->update_wm_post = true;
6532                 }
6533         }
6534
6535         if (visible || was_visible)
6536                 crtc_state->fb_bits |= plane->frontbuffer_bit;
6537
6538         /*
6539          * ILK/SNB DVSACNTR/Sprite Enable
6540          * IVB SPR_CTL/Sprite Enable
6541          * "When in Self Refresh Big FIFO mode, a write to enable the
6542          *  plane will be internally buffered and delayed while Big FIFO
6543          *  mode is exiting."
6544          *
6545          * Which means that enabling the sprite can take an extra frame
6546          * when we start in big FIFO mode (LP1+). Thus we need to drop
6547          * down to LP0 and wait for vblank in order to make sure the
6548          * sprite gets enabled on the next vblank after the register write.
6549          * Doing otherwise would risk enabling the sprite one frame after
6550          * we've already signalled flip completion. We can resume LP1+
6551          * once the sprite has been enabled.
6552          *
6553          *
6554          * WaCxSRDisabledForSpriteScaling:ivb
6555          * IVB SPR_SCALE/Scaling Enable
6556          * "Low Power watermarks must be disabled for at least one
6557          *  frame before enabling sprite scaling, and kept disabled
6558          *  until sprite scaling is disabled."
6559          *
6560          * ILK/SNB DVSASCALE/Scaling Enable
6561          * "When in Self Refresh Big FIFO mode, scaling enable will be
6562          *  masked off while Big FIFO mode is exiting."
6563          *
6564          * Despite the w/a only being listed for IVB we assume that
6565          * the ILK/SNB note has similar ramifications, hence we apply
6566          * the w/a on all three platforms.
6567          *
6568          * With experimental results seems this is needed also for primary
6569          * plane, not only sprite plane.
6570          */
6571         if (plane->id != PLANE_CURSOR &&
6572             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6573              IS_IVYBRIDGE(dev_priv)) &&
6574             (turn_on || (!needs_scaling(old_plane_state) &&
6575                          needs_scaling(plane_state))))
6576                 crtc_state->disable_lp_wm = true;
6577
6578         return 0;
6579 }
6580
6581 static bool encoders_cloneable(const struct intel_encoder *a,
6582                                const struct intel_encoder *b)
6583 {
6584         /* masks could be asymmetric, so check both ways */
6585         return a == b || (a->cloneable & (1 << b->type) &&
6586                           b->cloneable & (1 << a->type));
6587 }
6588
6589 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6590                                          struct intel_crtc *crtc,
6591                                          struct intel_encoder *encoder)
6592 {
6593         struct intel_encoder *source_encoder;
6594         struct drm_connector *connector;
6595         struct drm_connector_state *connector_state;
6596         int i;
6597
6598         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6599                 if (connector_state->crtc != &crtc->base)
6600                         continue;
6601
6602                 source_encoder =
6603                         to_intel_encoder(connector_state->best_encoder);
6604                 if (!encoders_cloneable(encoder, source_encoder))
6605                         return false;
6606         }
6607
6608         return true;
6609 }
6610
6611 static int icl_add_linked_planes(struct intel_atomic_state *state)
6612 {
6613         struct intel_plane *plane, *linked;
6614         struct intel_plane_state *plane_state, *linked_plane_state;
6615         int i;
6616
6617         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6618                 linked = plane_state->planar_linked_plane;
6619
6620                 if (!linked)
6621                         continue;
6622
6623                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
6624                 if (IS_ERR(linked_plane_state))
6625                         return PTR_ERR(linked_plane_state);
6626
6627                 drm_WARN_ON(state->base.dev,
6628                             linked_plane_state->planar_linked_plane != plane);
6629                 drm_WARN_ON(state->base.dev,
6630                             linked_plane_state->planar_slave == plane_state->planar_slave);
6631         }
6632
6633         return 0;
6634 }
6635
6636 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6637 {
6638         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6639         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6640         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6641         struct intel_plane *plane, *linked;
6642         struct intel_plane_state *plane_state;
6643         int i;
6644
6645         if (DISPLAY_VER(dev_priv) < 11)
6646                 return 0;
6647
6648         /*
6649          * Destroy all old plane links and make the slave plane invisible
6650          * in the crtc_state->active_planes mask.
6651          */
6652         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6653                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6654                         continue;
6655
6656                 plane_state->planar_linked_plane = NULL;
6657                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
6658                         crtc_state->enabled_planes &= ~BIT(plane->id);
6659                         crtc_state->active_planes &= ~BIT(plane->id);
6660                         crtc_state->update_planes |= BIT(plane->id);
6661                 }
6662
6663                 plane_state->planar_slave = false;
6664         }
6665
6666         if (!crtc_state->nv12_planes)
6667                 return 0;
6668
6669         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6670                 struct intel_plane_state *linked_state = NULL;
6671
6672                 if (plane->pipe != crtc->pipe ||
6673                     !(crtc_state->nv12_planes & BIT(plane->id)))
6674                         continue;
6675
6676                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6677                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6678                                 continue;
6679
6680                         if (crtc_state->active_planes & BIT(linked->id))
6681                                 continue;
6682
6683                         linked_state = intel_atomic_get_plane_state(state, linked);
6684                         if (IS_ERR(linked_state))
6685                                 return PTR_ERR(linked_state);
6686
6687                         break;
6688                 }
6689
6690                 if (!linked_state) {
6691                         drm_dbg_kms(&dev_priv->drm,
6692                                     "Need %d free Y planes for planar YUV\n",
6693                                     hweight8(crtc_state->nv12_planes));
6694
6695                         return -EINVAL;
6696                 }
6697
6698                 plane_state->planar_linked_plane = linked;
6699
6700                 linked_state->planar_slave = true;
6701                 linked_state->planar_linked_plane = plane;
6702                 crtc_state->enabled_planes |= BIT(linked->id);
6703                 crtc_state->active_planes |= BIT(linked->id);
6704                 crtc_state->update_planes |= BIT(linked->id);
6705                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6706                             linked->base.name, plane->base.name);
6707
6708                 /* Copy parameters to slave plane */
6709                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6710                 linked_state->color_ctl = plane_state->color_ctl;
6711                 linked_state->view = plane_state->view;
6712
6713                 intel_plane_copy_hw_state(linked_state, plane_state);
6714                 linked_state->uapi.src = plane_state->uapi.src;
6715                 linked_state->uapi.dst = plane_state->uapi.dst;
6716
6717                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
6718                         if (linked->id == PLANE_SPRITE5)
6719                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6720                         else if (linked->id == PLANE_SPRITE4)
6721                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6722                         else if (linked->id == PLANE_SPRITE3)
6723                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6724                         else if (linked->id == PLANE_SPRITE2)
6725                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6726                         else
6727                                 MISSING_CASE(linked->id);
6728                 }
6729         }
6730
6731         return 0;
6732 }
6733
6734 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6735 {
6736         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6737         struct intel_atomic_state *state =
6738                 to_intel_atomic_state(new_crtc_state->uapi.state);
6739         const struct intel_crtc_state *old_crtc_state =
6740                 intel_atomic_get_old_crtc_state(state, crtc);
6741
6742         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6743 }
6744
6745 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6746 {
6747         const struct drm_display_mode *pipe_mode =
6748                 &crtc_state->hw.pipe_mode;
6749         int linetime_wm;
6750
6751         if (!crtc_state->hw.enable)
6752                 return 0;
6753
6754         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6755                                         pipe_mode->crtc_clock);
6756
6757         return min(linetime_wm, 0x1ff);
6758 }
6759
6760 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6761                                const struct intel_cdclk_state *cdclk_state)
6762 {
6763         const struct drm_display_mode *pipe_mode =
6764                 &crtc_state->hw.pipe_mode;
6765         int linetime_wm;
6766
6767         if (!crtc_state->hw.enable)
6768                 return 0;
6769
6770         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6771                                         cdclk_state->logical.cdclk);
6772
6773         return min(linetime_wm, 0x1ff);
6774 }
6775
6776 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6777 {
6778         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6779         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6780         const struct drm_display_mode *pipe_mode =
6781                 &crtc_state->hw.pipe_mode;
6782         int linetime_wm;
6783
6784         if (!crtc_state->hw.enable)
6785                 return 0;
6786
6787         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6788                                    crtc_state->pixel_rate);
6789
6790         /* Display WA #1135: BXT:ALL GLK:ALL */
6791         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6792             dev_priv->ipc_enabled)
6793                 linetime_wm /= 2;
6794
6795         return min(linetime_wm, 0x1ff);
6796 }
6797
6798 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6799                                    struct intel_crtc *crtc)
6800 {
6801         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6802         struct intel_crtc_state *crtc_state =
6803                 intel_atomic_get_new_crtc_state(state, crtc);
6804         const struct intel_cdclk_state *cdclk_state;
6805
6806         if (DISPLAY_VER(dev_priv) >= 9)
6807                 crtc_state->linetime = skl_linetime_wm(crtc_state);
6808         else
6809                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
6810
6811         if (!hsw_crtc_supports_ips(crtc))
6812                 return 0;
6813
6814         cdclk_state = intel_atomic_get_cdclk_state(state);
6815         if (IS_ERR(cdclk_state))
6816                 return PTR_ERR(cdclk_state);
6817
6818         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6819                                                        cdclk_state);
6820
6821         return 0;
6822 }
6823
6824 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6825                                    struct intel_crtc *crtc)
6826 {
6827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6828         struct intel_crtc_state *crtc_state =
6829                 intel_atomic_get_new_crtc_state(state, crtc);
6830         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6831         int ret;
6832
6833         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6834             mode_changed && !crtc_state->hw.active)
6835                 crtc_state->update_wm_post = true;
6836
6837         if (mode_changed && crtc_state->hw.enable &&
6838             dev_priv->display.crtc_compute_clock &&
6839             !crtc_state->bigjoiner_slave &&
6840             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6841                 ret = dev_priv->display.crtc_compute_clock(crtc_state);
6842                 if (ret)
6843                         return ret;
6844         }
6845
6846         /*
6847          * May need to update pipe gamma enable bits
6848          * when C8 planes are getting enabled/disabled.
6849          */
6850         if (c8_planes_changed(crtc_state))
6851                 crtc_state->uapi.color_mgmt_changed = true;
6852
6853         if (mode_changed || crtc_state->update_pipe ||
6854             crtc_state->uapi.color_mgmt_changed) {
6855                 ret = intel_color_check(crtc_state);
6856                 if (ret)
6857                         return ret;
6858         }
6859
6860         if (dev_priv->display.compute_pipe_wm) {
6861                 ret = dev_priv->display.compute_pipe_wm(state, crtc);
6862                 if (ret) {
6863                         drm_dbg_kms(&dev_priv->drm,
6864                                     "Target pipe watermarks are invalid\n");
6865                         return ret;
6866                 }
6867
6868         }
6869
6870         if (dev_priv->display.compute_intermediate_wm) {
6871                 if (drm_WARN_ON(&dev_priv->drm,
6872                                 !dev_priv->display.compute_pipe_wm))
6873                         return 0;
6874
6875                 /*
6876                  * Calculate 'intermediate' watermarks that satisfy both the
6877                  * old state and the new state.  We can program these
6878                  * immediately.
6879                  */
6880                 ret = dev_priv->display.compute_intermediate_wm(state, crtc);
6881                 if (ret) {
6882                         drm_dbg_kms(&dev_priv->drm,
6883                                     "No valid intermediate pipe watermarks are possible\n");
6884                         return ret;
6885                 }
6886         }
6887
6888         if (DISPLAY_VER(dev_priv) >= 9) {
6889                 if (mode_changed || crtc_state->update_pipe) {
6890                         ret = skl_update_scaler_crtc(crtc_state);
6891                         if (ret)
6892                                 return ret;
6893                 }
6894
6895                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6896                 if (ret)
6897                         return ret;
6898         }
6899
6900         if (HAS_IPS(dev_priv)) {
6901                 ret = hsw_compute_ips_config(crtc_state);
6902                 if (ret)
6903                         return ret;
6904         }
6905
6906         if (DISPLAY_VER(dev_priv) >= 9 ||
6907             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6908                 ret = hsw_compute_linetime_wm(state, crtc);
6909                 if (ret)
6910                         return ret;
6911
6912         }
6913
6914         if (!mode_changed) {
6915                 ret = intel_psr2_sel_fetch_update(state, crtc);
6916                 if (ret)
6917                         return ret;
6918         }
6919
6920         return 0;
6921 }
6922
6923 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
6924 {
6925         struct intel_connector *connector;
6926         struct drm_connector_list_iter conn_iter;
6927
6928         drm_connector_list_iter_begin(dev, &conn_iter);
6929         for_each_intel_connector_iter(connector, &conn_iter) {
6930                 struct drm_connector_state *conn_state = connector->base.state;
6931                 struct intel_encoder *encoder =
6932                         to_intel_encoder(connector->base.encoder);
6933
6934                 if (conn_state->crtc)
6935                         drm_connector_put(&connector->base);
6936
6937                 if (encoder) {
6938                         struct intel_crtc *crtc =
6939                                 to_intel_crtc(encoder->base.crtc);
6940                         const struct intel_crtc_state *crtc_state =
6941                                 to_intel_crtc_state(crtc->base.state);
6942
6943                         conn_state->best_encoder = &encoder->base;
6944                         conn_state->crtc = &crtc->base;
6945                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
6946
6947                         drm_connector_get(&connector->base);
6948                 } else {
6949                         conn_state->best_encoder = NULL;
6950                         conn_state->crtc = NULL;
6951                 }
6952         }
6953         drm_connector_list_iter_end(&conn_iter);
6954 }
6955
6956 static int
6957 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
6958                       struct intel_crtc_state *pipe_config)
6959 {
6960         struct drm_connector *connector = conn_state->connector;
6961         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
6962         const struct drm_display_info *info = &connector->display_info;
6963         int bpp;
6964
6965         switch (conn_state->max_bpc) {
6966         case 6 ... 7:
6967                 bpp = 6 * 3;
6968                 break;
6969         case 8 ... 9:
6970                 bpp = 8 * 3;
6971                 break;
6972         case 10 ... 11:
6973                 bpp = 10 * 3;
6974                 break;
6975         case 12 ... 16:
6976                 bpp = 12 * 3;
6977                 break;
6978         default:
6979                 MISSING_CASE(conn_state->max_bpc);
6980                 return -EINVAL;
6981         }
6982
6983         if (bpp < pipe_config->pipe_bpp) {
6984                 drm_dbg_kms(&i915->drm,
6985                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
6986                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
6987                             connector->base.id, connector->name,
6988                             bpp, 3 * info->bpc,
6989                             3 * conn_state->max_requested_bpc,
6990                             pipe_config->pipe_bpp);
6991
6992                 pipe_config->pipe_bpp = bpp;
6993         }
6994
6995         return 0;
6996 }
6997
6998 static int
6999 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7000                           struct intel_crtc_state *pipe_config)
7001 {
7002         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7003         struct drm_atomic_state *state = pipe_config->uapi.state;
7004         struct drm_connector *connector;
7005         struct drm_connector_state *connector_state;
7006         int bpp, i;
7007
7008         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7009             IS_CHERRYVIEW(dev_priv)))
7010                 bpp = 10*3;
7011         else if (DISPLAY_VER(dev_priv) >= 5)
7012                 bpp = 12*3;
7013         else
7014                 bpp = 8*3;
7015
7016         pipe_config->pipe_bpp = bpp;
7017
7018         /* Clamp display bpp to connector max bpp */
7019         for_each_new_connector_in_state(state, connector, connector_state, i) {
7020                 int ret;
7021
7022                 if (connector_state->crtc != &crtc->base)
7023                         continue;
7024
7025                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7026                 if (ret)
7027                         return ret;
7028         }
7029
7030         return 0;
7031 }
7032
7033 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7034                                     const struct drm_display_mode *mode)
7035 {
7036         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7037                     "type: 0x%x flags: 0x%x\n",
7038                     mode->crtc_clock,
7039                     mode->crtc_hdisplay, mode->crtc_hsync_start,
7040                     mode->crtc_hsync_end, mode->crtc_htotal,
7041                     mode->crtc_vdisplay, mode->crtc_vsync_start,
7042                     mode->crtc_vsync_end, mode->crtc_vtotal,
7043                     mode->type, mode->flags);
7044 }
7045
7046 static void
7047 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7048                       const char *id, unsigned int lane_count,
7049                       const struct intel_link_m_n *m_n)
7050 {
7051         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7052
7053         drm_dbg_kms(&i915->drm,
7054                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7055                     id, lane_count,
7056                     m_n->gmch_m, m_n->gmch_n,
7057                     m_n->link_m, m_n->link_n, m_n->tu);
7058 }
7059
7060 static void
7061 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7062                      const union hdmi_infoframe *frame)
7063 {
7064         if (!drm_debug_enabled(DRM_UT_KMS))
7065                 return;
7066
7067         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7068 }
7069
7070 static void
7071 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7072                       const struct drm_dp_vsc_sdp *vsc)
7073 {
7074         if (!drm_debug_enabled(DRM_UT_KMS))
7075                 return;
7076
7077         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7078 }
7079
7080 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7081
7082 static const char * const output_type_str[] = {
7083         OUTPUT_TYPE(UNUSED),
7084         OUTPUT_TYPE(ANALOG),
7085         OUTPUT_TYPE(DVO),
7086         OUTPUT_TYPE(SDVO),
7087         OUTPUT_TYPE(LVDS),
7088         OUTPUT_TYPE(TVOUT),
7089         OUTPUT_TYPE(HDMI),
7090         OUTPUT_TYPE(DP),
7091         OUTPUT_TYPE(EDP),
7092         OUTPUT_TYPE(DSI),
7093         OUTPUT_TYPE(DDI),
7094         OUTPUT_TYPE(DP_MST),
7095 };
7096
7097 #undef OUTPUT_TYPE
7098
7099 static void snprintf_output_types(char *buf, size_t len,
7100                                   unsigned int output_types)
7101 {
7102         char *str = buf;
7103         int i;
7104
7105         str[0] = '\0';
7106
7107         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7108                 int r;
7109
7110                 if ((output_types & BIT(i)) == 0)
7111                         continue;
7112
7113                 r = snprintf(str, len, "%s%s",
7114                              str != buf ? "," : "", output_type_str[i]);
7115                 if (r >= len)
7116                         break;
7117                 str += r;
7118                 len -= r;
7119
7120                 output_types &= ~BIT(i);
7121         }
7122
7123         WARN_ON_ONCE(output_types != 0);
7124 }
7125
7126 static const char * const output_format_str[] = {
7127         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7128         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7129         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7130 };
7131
7132 static const char *output_formats(enum intel_output_format format)
7133 {
7134         if (format >= ARRAY_SIZE(output_format_str))
7135                 return "invalid";
7136         return output_format_str[format];
7137 }
7138
7139 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7140 {
7141         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7142         struct drm_i915_private *i915 = to_i915(plane->base.dev);
7143         const struct drm_framebuffer *fb = plane_state->hw.fb;
7144
7145         if (!fb) {
7146                 drm_dbg_kms(&i915->drm,
7147                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7148                             plane->base.base.id, plane->base.name,
7149                             yesno(plane_state->uapi.visible));
7150                 return;
7151         }
7152
7153         drm_dbg_kms(&i915->drm,
7154                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7155                     plane->base.base.id, plane->base.name,
7156                     fb->base.id, fb->width, fb->height, &fb->format->format,
7157                     fb->modifier, yesno(plane_state->uapi.visible));
7158         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7159                     plane_state->hw.rotation, plane_state->scaler_id);
7160         if (plane_state->uapi.visible)
7161                 drm_dbg_kms(&i915->drm,
7162                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7163                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
7164                             DRM_RECT_ARG(&plane_state->uapi.dst));
7165 }
7166
7167 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7168                                    struct intel_atomic_state *state,
7169                                    const char *context)
7170 {
7171         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7172         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7173         const struct intel_plane_state *plane_state;
7174         struct intel_plane *plane;
7175         char buf[64];
7176         int i;
7177
7178         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7179                     crtc->base.base.id, crtc->base.name,
7180                     yesno(pipe_config->hw.enable), context);
7181
7182         if (!pipe_config->hw.enable)
7183                 goto dump_planes;
7184
7185         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7186         drm_dbg_kms(&dev_priv->drm,
7187                     "active: %s, output_types: %s (0x%x), output format: %s\n",
7188                     yesno(pipe_config->hw.active),
7189                     buf, pipe_config->output_types,
7190                     output_formats(pipe_config->output_format));
7191
7192         drm_dbg_kms(&dev_priv->drm,
7193                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7194                     transcoder_name(pipe_config->cpu_transcoder),
7195                     pipe_config->pipe_bpp, pipe_config->dither);
7196
7197         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7198                     transcoder_name(pipe_config->mst_master_transcoder));
7199
7200         drm_dbg_kms(&dev_priv->drm,
7201                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7202                     transcoder_name(pipe_config->master_transcoder),
7203                     pipe_config->sync_mode_slaves_mask);
7204
7205         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7206                     pipe_config->bigjoiner_slave ? "slave" :
7207                     pipe_config->bigjoiner ? "master" : "no");
7208
7209         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7210                     enableddisabled(pipe_config->splitter.enable),
7211                     pipe_config->splitter.link_count,
7212                     pipe_config->splitter.pixel_overlap);
7213
7214         if (pipe_config->has_pch_encoder)
7215                 intel_dump_m_n_config(pipe_config, "fdi",
7216                                       pipe_config->fdi_lanes,
7217                                       &pipe_config->fdi_m_n);
7218
7219         if (intel_crtc_has_dp_encoder(pipe_config)) {
7220                 intel_dump_m_n_config(pipe_config, "dp m_n",
7221                                 pipe_config->lane_count, &pipe_config->dp_m_n);
7222                 if (pipe_config->has_drrs)
7223                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
7224                                               pipe_config->lane_count,
7225                                               &pipe_config->dp_m2_n2);
7226         }
7227
7228         drm_dbg_kms(&dev_priv->drm,
7229                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7230                     pipe_config->has_audio, pipe_config->has_infoframe,
7231                     pipe_config->infoframes.enable);
7232
7233         if (pipe_config->infoframes.enable &
7234             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7235                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7236                             pipe_config->infoframes.gcp);
7237         if (pipe_config->infoframes.enable &
7238             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7239                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7240         if (pipe_config->infoframes.enable &
7241             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7242                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7243         if (pipe_config->infoframes.enable &
7244             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7245                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7246         if (pipe_config->infoframes.enable &
7247             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7248                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7249         if (pipe_config->infoframes.enable &
7250             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7251                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7252         if (pipe_config->infoframes.enable &
7253             intel_hdmi_infoframe_enable(DP_SDP_VSC))
7254                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7255
7256         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7257                     yesno(pipe_config->vrr.enable),
7258                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7259                     pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7260                     pipe_config->vrr.flipline,
7261                     intel_vrr_vmin_vblank_start(pipe_config),
7262                     intel_vrr_vmax_vblank_start(pipe_config));
7263
7264         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7265         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7266         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7267         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7268         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7269         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7270         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7271         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7272         drm_dbg_kms(&dev_priv->drm,
7273                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7274                     pipe_config->port_clock,
7275                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7276                     pipe_config->pixel_rate);
7277
7278         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7279                     pipe_config->linetime, pipe_config->ips_linetime);
7280
7281         if (DISPLAY_VER(dev_priv) >= 9)
7282                 drm_dbg_kms(&dev_priv->drm,
7283                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7284                             crtc->num_scalers,
7285                             pipe_config->scaler_state.scaler_users,
7286                             pipe_config->scaler_state.scaler_id);
7287
7288         if (HAS_GMCH(dev_priv))
7289                 drm_dbg_kms(&dev_priv->drm,
7290                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7291                             pipe_config->gmch_pfit.control,
7292                             pipe_config->gmch_pfit.pgm_ratios,
7293                             pipe_config->gmch_pfit.lvds_border_bits);
7294         else
7295                 drm_dbg_kms(&dev_priv->drm,
7296                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7297                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7298                             enableddisabled(pipe_config->pch_pfit.enabled),
7299                             yesno(pipe_config->pch_pfit.force_thru));
7300
7301         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7302                     pipe_config->ips_enabled, pipe_config->double_wide);
7303
7304         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7305
7306         if (IS_CHERRYVIEW(dev_priv))
7307                 drm_dbg_kms(&dev_priv->drm,
7308                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7309                             pipe_config->cgm_mode, pipe_config->gamma_mode,
7310                             pipe_config->gamma_enable, pipe_config->csc_enable);
7311         else
7312                 drm_dbg_kms(&dev_priv->drm,
7313                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7314                             pipe_config->csc_mode, pipe_config->gamma_mode,
7315                             pipe_config->gamma_enable, pipe_config->csc_enable);
7316
7317         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7318                     pipe_config->hw.degamma_lut ?
7319                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7320                     pipe_config->hw.gamma_lut ?
7321                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7322
7323 dump_planes:
7324         if (!state)
7325                 return;
7326
7327         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7328                 if (plane->pipe == crtc->pipe)
7329                         intel_dump_plane_state(plane_state);
7330         }
7331 }
7332
7333 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7334 {
7335         struct drm_device *dev = state->base.dev;
7336         struct drm_connector *connector;
7337         struct drm_connector_list_iter conn_iter;
7338         unsigned int used_ports = 0;
7339         unsigned int used_mst_ports = 0;
7340         bool ret = true;
7341
7342         /*
7343          * We're going to peek into connector->state,
7344          * hence connection_mutex must be held.
7345          */
7346         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7347
7348         /*
7349          * Walk the connector list instead of the encoder
7350          * list to detect the problem on ddi platforms
7351          * where there's just one encoder per digital port.
7352          */
7353         drm_connector_list_iter_begin(dev, &conn_iter);
7354         drm_for_each_connector_iter(connector, &conn_iter) {
7355                 struct drm_connector_state *connector_state;
7356                 struct intel_encoder *encoder;
7357
7358                 connector_state =
7359                         drm_atomic_get_new_connector_state(&state->base,
7360                                                            connector);
7361                 if (!connector_state)
7362                         connector_state = connector->state;
7363
7364                 if (!connector_state->best_encoder)
7365                         continue;
7366
7367                 encoder = to_intel_encoder(connector_state->best_encoder);
7368
7369                 drm_WARN_ON(dev, !connector_state->crtc);
7370
7371                 switch (encoder->type) {
7372                 case INTEL_OUTPUT_DDI:
7373                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7374                                 break;
7375                         fallthrough;
7376                 case INTEL_OUTPUT_DP:
7377                 case INTEL_OUTPUT_HDMI:
7378                 case INTEL_OUTPUT_EDP:
7379                         /* the same port mustn't appear more than once */
7380                         if (used_ports & BIT(encoder->port))
7381                                 ret = false;
7382
7383                         used_ports |= BIT(encoder->port);
7384                         break;
7385                 case INTEL_OUTPUT_DP_MST:
7386                         used_mst_ports |=
7387                                 1 << encoder->port;
7388                         break;
7389                 default:
7390                         break;
7391                 }
7392         }
7393         drm_connector_list_iter_end(&conn_iter);
7394
7395         /* can't mix MST and SST/HDMI on the same port */
7396         if (used_ports & used_mst_ports)
7397                 return false;
7398
7399         return ret;
7400 }
7401
7402 static void
7403 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7404                                            struct intel_crtc_state *crtc_state)
7405 {
7406         const struct intel_crtc_state *from_crtc_state = crtc_state;
7407
7408         if (crtc_state->bigjoiner_slave) {
7409                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
7410                                                                   crtc_state->bigjoiner_linked_crtc);
7411
7412                 /* No need to copy state if the master state is unchanged */
7413                 if (!from_crtc_state)
7414                         return;
7415         }
7416
7417         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7418 }
7419
7420 static void
7421 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7422                                  struct intel_crtc_state *crtc_state)
7423 {
7424         crtc_state->hw.enable = crtc_state->uapi.enable;
7425         crtc_state->hw.active = crtc_state->uapi.active;
7426         crtc_state->hw.mode = crtc_state->uapi.mode;
7427         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7428         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7429
7430         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7431 }
7432
7433 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7434 {
7435         if (crtc_state->bigjoiner_slave)
7436                 return;
7437
7438         crtc_state->uapi.enable = crtc_state->hw.enable;
7439         crtc_state->uapi.active = crtc_state->hw.active;
7440         drm_WARN_ON(crtc_state->uapi.crtc->dev,
7441                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7442
7443         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7444         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7445
7446         /* copy color blobs to uapi */
7447         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7448                                   crtc_state->hw.degamma_lut);
7449         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7450                                   crtc_state->hw.gamma_lut);
7451         drm_property_replace_blob(&crtc_state->uapi.ctm,
7452                                   crtc_state->hw.ctm);
7453 }
7454
7455 static int
7456 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7457                           const struct intel_crtc_state *from_crtc_state)
7458 {
7459         struct intel_crtc_state *saved_state;
7460         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7461
7462         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7463         if (!saved_state)
7464                 return -ENOMEM;
7465
7466         saved_state->uapi = crtc_state->uapi;
7467         saved_state->scaler_state = crtc_state->scaler_state;
7468         saved_state->shared_dpll = crtc_state->shared_dpll;
7469         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7470         saved_state->crc_enabled = crtc_state->crc_enabled;
7471
7472         intel_crtc_free_hw_state(crtc_state);
7473         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7474         kfree(saved_state);
7475
7476         /* Re-init hw state */
7477         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7478         crtc_state->hw.enable = from_crtc_state->hw.enable;
7479         crtc_state->hw.active = from_crtc_state->hw.active;
7480         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7481         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7482
7483         /* Some fixups */
7484         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7485         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7486         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7487         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7488         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7489         crtc_state->bigjoiner_slave = true;
7490         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7491         crtc_state->has_audio = false;
7492
7493         return 0;
7494 }
7495
7496 static int
7497 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7498                                  struct intel_crtc_state *crtc_state)
7499 {
7500         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7501         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7502         struct intel_crtc_state *saved_state;
7503
7504         saved_state = intel_crtc_state_alloc(crtc);
7505         if (!saved_state)
7506                 return -ENOMEM;
7507
7508         /* free the old crtc_state->hw members */
7509         intel_crtc_free_hw_state(crtc_state);
7510
7511         /* FIXME: before the switch to atomic started, a new pipe_config was
7512          * kzalloc'd. Code that depends on any field being zero should be
7513          * fixed, so that the crtc_state can be safely duplicated. For now,
7514          * only fields that are know to not cause problems are preserved. */
7515
7516         saved_state->uapi = crtc_state->uapi;
7517         saved_state->scaler_state = crtc_state->scaler_state;
7518         saved_state->shared_dpll = crtc_state->shared_dpll;
7519         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7520         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7521                sizeof(saved_state->icl_port_dplls));
7522         saved_state->crc_enabled = crtc_state->crc_enabled;
7523         if (IS_G4X(dev_priv) ||
7524             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7525                 saved_state->wm = crtc_state->wm;
7526
7527         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7528         kfree(saved_state);
7529
7530         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7531
7532         return 0;
7533 }
7534
7535 static int
7536 intel_modeset_pipe_config(struct intel_atomic_state *state,
7537                           struct intel_crtc_state *pipe_config)
7538 {
7539         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7540         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7541         struct drm_connector *connector;
7542         struct drm_connector_state *connector_state;
7543         int base_bpp, ret, i;
7544         bool retry = true;
7545
7546         pipe_config->cpu_transcoder =
7547                 (enum transcoder) to_intel_crtc(crtc)->pipe;
7548
7549         /*
7550          * Sanitize sync polarity flags based on requested ones. If neither
7551          * positive or negative polarity is requested, treat this as meaning
7552          * negative polarity.
7553          */
7554         if (!(pipe_config->hw.adjusted_mode.flags &
7555               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7556                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7557
7558         if (!(pipe_config->hw.adjusted_mode.flags &
7559               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7560                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7561
7562         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7563                                         pipe_config);
7564         if (ret)
7565                 return ret;
7566
7567         base_bpp = pipe_config->pipe_bpp;
7568
7569         /*
7570          * Determine the real pipe dimensions. Note that stereo modes can
7571          * increase the actual pipe size due to the frame doubling and
7572          * insertion of additional space for blanks between the frame. This
7573          * is stored in the crtc timings. We use the requested mode to do this
7574          * computation to clearly distinguish it from the adjusted mode, which
7575          * can be changed by the connectors in the below retry loop.
7576          */
7577         drm_mode_get_hv_timing(&pipe_config->hw.mode,
7578                                &pipe_config->pipe_src_w,
7579                                &pipe_config->pipe_src_h);
7580
7581         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7582                 struct intel_encoder *encoder =
7583                         to_intel_encoder(connector_state->best_encoder);
7584
7585                 if (connector_state->crtc != crtc)
7586                         continue;
7587
7588                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7589                         drm_dbg_kms(&i915->drm,
7590                                     "rejecting invalid cloning configuration\n");
7591                         return -EINVAL;
7592                 }
7593
7594                 /*
7595                  * Determine output_types before calling the .compute_config()
7596                  * hooks so that the hooks can use this information safely.
7597                  */
7598                 if (encoder->compute_output_type)
7599                         pipe_config->output_types |=
7600                                 BIT(encoder->compute_output_type(encoder, pipe_config,
7601                                                                  connector_state));
7602                 else
7603                         pipe_config->output_types |= BIT(encoder->type);
7604         }
7605
7606 encoder_retry:
7607         /* Ensure the port clock defaults are reset when retrying. */
7608         pipe_config->port_clock = 0;
7609         pipe_config->pixel_multiplier = 1;
7610
7611         /* Fill in default crtc timings, allow encoders to overwrite them. */
7612         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7613                               CRTC_STEREO_DOUBLE);
7614
7615         /* Pass our mode to the connectors and the CRTC to give them a chance to
7616          * adjust it according to limitations or connector properties, and also
7617          * a chance to reject the mode entirely.
7618          */
7619         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7620                 struct intel_encoder *encoder =
7621                         to_intel_encoder(connector_state->best_encoder);
7622
7623                 if (connector_state->crtc != crtc)
7624                         continue;
7625
7626                 ret = encoder->compute_config(encoder, pipe_config,
7627                                               connector_state);
7628                 if (ret < 0) {
7629                         if (ret != -EDEADLK)
7630                                 drm_dbg_kms(&i915->drm,
7631                                             "Encoder config failure: %d\n",
7632                                             ret);
7633                         return ret;
7634                 }
7635         }
7636
7637         /* Set default port clock if not overwritten by the encoder. Needs to be
7638          * done afterwards in case the encoder adjusts the mode. */
7639         if (!pipe_config->port_clock)
7640                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7641                         * pipe_config->pixel_multiplier;
7642
7643         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7644         if (ret == -EDEADLK)
7645                 return ret;
7646         if (ret < 0) {
7647                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7648                 return ret;
7649         }
7650
7651         if (ret == I915_DISPLAY_CONFIG_RETRY) {
7652                 if (drm_WARN(&i915->drm, !retry,
7653                              "loop in pipe configuration computation\n"))
7654                         return -EINVAL;
7655
7656                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7657                 retry = false;
7658                 goto encoder_retry;
7659         }
7660
7661         /* Dithering seems to not pass-through bits correctly when it should, so
7662          * only enable it on 6bpc panels and when its not a compliance
7663          * test requesting 6bpc video pattern.
7664          */
7665         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7666                 !pipe_config->dither_force_disable;
7667         drm_dbg_kms(&i915->drm,
7668                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7669                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7670
7671         return 0;
7672 }
7673
7674 static int
7675 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7676 {
7677         struct intel_atomic_state *state =
7678                 to_intel_atomic_state(crtc_state->uapi.state);
7679         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7680         struct drm_connector_state *conn_state;
7681         struct drm_connector *connector;
7682         int i;
7683
7684         for_each_new_connector_in_state(&state->base, connector,
7685                                         conn_state, i) {
7686                 struct intel_encoder *encoder =
7687                         to_intel_encoder(conn_state->best_encoder);
7688                 int ret;
7689
7690                 if (conn_state->crtc != &crtc->base ||
7691                     !encoder->compute_config_late)
7692                         continue;
7693
7694                 ret = encoder->compute_config_late(encoder, crtc_state,
7695                                                    conn_state);
7696                 if (ret)
7697                         return ret;
7698         }
7699
7700         return 0;
7701 }
7702
7703 bool intel_fuzzy_clock_check(int clock1, int clock2)
7704 {
7705         int diff;
7706
7707         if (clock1 == clock2)
7708                 return true;
7709
7710         if (!clock1 || !clock2)
7711                 return false;
7712
7713         diff = abs(clock1 - clock2);
7714
7715         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7716                 return true;
7717
7718         return false;
7719 }
7720
7721 static bool
7722 intel_compare_m_n(unsigned int m, unsigned int n,
7723                   unsigned int m2, unsigned int n2,
7724                   bool exact)
7725 {
7726         if (m == m2 && n == n2)
7727                 return true;
7728
7729         if (exact || !m || !n || !m2 || !n2)
7730                 return false;
7731
7732         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7733
7734         if (n > n2) {
7735                 while (n > n2) {
7736                         m2 <<= 1;
7737                         n2 <<= 1;
7738                 }
7739         } else if (n < n2) {
7740                 while (n < n2) {
7741                         m <<= 1;
7742                         n <<= 1;
7743                 }
7744         }
7745
7746         if (n != n2)
7747                 return false;
7748
7749         return intel_fuzzy_clock_check(m, m2);
7750 }
7751
7752 static bool
7753 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7754                        const struct intel_link_m_n *m2_n2,
7755                        bool exact)
7756 {
7757         return m_n->tu == m2_n2->tu &&
7758                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7759                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7760                 intel_compare_m_n(m_n->link_m, m_n->link_n,
7761                                   m2_n2->link_m, m2_n2->link_n, exact);
7762 }
7763
7764 static bool
7765 intel_compare_infoframe(const union hdmi_infoframe *a,
7766                         const union hdmi_infoframe *b)
7767 {
7768         return memcmp(a, b, sizeof(*a)) == 0;
7769 }
7770
7771 static bool
7772 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7773                          const struct drm_dp_vsc_sdp *b)
7774 {
7775         return memcmp(a, b, sizeof(*a)) == 0;
7776 }
7777
7778 static void
7779 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7780                                bool fastset, const char *name,
7781                                const union hdmi_infoframe *a,
7782                                const union hdmi_infoframe *b)
7783 {
7784         if (fastset) {
7785                 if (!drm_debug_enabled(DRM_UT_KMS))
7786                         return;
7787
7788                 drm_dbg_kms(&dev_priv->drm,
7789                             "fastset mismatch in %s infoframe\n", name);
7790                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7791                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7792                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7793                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7794         } else {
7795                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7796                 drm_err(&dev_priv->drm, "expected:\n");
7797                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7798                 drm_err(&dev_priv->drm, "found:\n");
7799                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7800         }
7801 }
7802
7803 static void
7804 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7805                                 bool fastset, const char *name,
7806                                 const struct drm_dp_vsc_sdp *a,
7807                                 const struct drm_dp_vsc_sdp *b)
7808 {
7809         if (fastset) {
7810                 if (!drm_debug_enabled(DRM_UT_KMS))
7811                         return;
7812
7813                 drm_dbg_kms(&dev_priv->drm,
7814                             "fastset mismatch in %s dp sdp\n", name);
7815                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7816                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7817                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7818                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7819         } else {
7820                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7821                 drm_err(&dev_priv->drm, "expected:\n");
7822                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7823                 drm_err(&dev_priv->drm, "found:\n");
7824                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7825         }
7826 }
7827
7828 static void __printf(4, 5)
7829 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7830                      const char *name, const char *format, ...)
7831 {
7832         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7833         struct va_format vaf;
7834         va_list args;
7835
7836         va_start(args, format);
7837         vaf.fmt = format;
7838         vaf.va = &args;
7839
7840         if (fastset)
7841                 drm_dbg_kms(&i915->drm,
7842                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7843                             crtc->base.base.id, crtc->base.name, name, &vaf);
7844         else
7845                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7846                         crtc->base.base.id, crtc->base.name, name, &vaf);
7847
7848         va_end(args);
7849 }
7850
7851 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7852 {
7853         if (dev_priv->params.fastboot != -1)
7854                 return dev_priv->params.fastboot;
7855
7856         /* Enable fastboot by default on Skylake and newer */
7857         if (DISPLAY_VER(dev_priv) >= 9)
7858                 return true;
7859
7860         /* Enable fastboot by default on VLV and CHV */
7861         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7862                 return true;
7863
7864         /* Disabled by default on all others */
7865         return false;
7866 }
7867
7868 static bool
7869 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7870                           const struct intel_crtc_state *pipe_config,
7871                           bool fastset)
7872 {
7873         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7874         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7875         bool ret = true;
7876         u32 bp_gamma = 0;
7877         bool fixup_inherited = fastset &&
7878                 current_config->inherited && !pipe_config->inherited;
7879
7880         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7881                 drm_dbg_kms(&dev_priv->drm,
7882                             "initial modeset and fastboot not set\n");
7883                 ret = false;
7884         }
7885
7886 #define PIPE_CONF_CHECK_X(name) do { \
7887         if (current_config->name != pipe_config->name) { \
7888                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7889                                      "(expected 0x%08x, found 0x%08x)", \
7890                                      current_config->name, \
7891                                      pipe_config->name); \
7892                 ret = false; \
7893         } \
7894 } while (0)
7895
7896 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
7897         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
7898                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7899                                      "(expected 0x%08x, found 0x%08x)", \
7900                                      current_config->name & (mask), \
7901                                      pipe_config->name & (mask)); \
7902                 ret = false; \
7903         } \
7904 } while (0)
7905
7906 #define PIPE_CONF_CHECK_I(name) do { \
7907         if (current_config->name != pipe_config->name) { \
7908                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7909                                      "(expected %i, found %i)", \
7910                                      current_config->name, \
7911                                      pipe_config->name); \
7912                 ret = false; \
7913         } \
7914 } while (0)
7915
7916 #define PIPE_CONF_CHECK_BOOL(name) do { \
7917         if (current_config->name != pipe_config->name) { \
7918                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
7919                                      "(expected %s, found %s)", \
7920                                      yesno(current_config->name), \
7921                                      yesno(pipe_config->name)); \
7922                 ret = false; \
7923         } \
7924 } while (0)
7925
7926 /*
7927  * Checks state where we only read out the enabling, but not the entire
7928  * state itself (like full infoframes or ELD for audio). These states
7929  * require a full modeset on bootup to fix up.
7930  */
7931 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
7932         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
7933                 PIPE_CONF_CHECK_BOOL(name); \
7934         } else { \
7935                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7936                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
7937                                      yesno(current_config->name), \
7938                                      yesno(pipe_config->name)); \
7939                 ret = false; \
7940         } \
7941 } while (0)
7942
7943 #define PIPE_CONF_CHECK_P(name) do { \
7944         if (current_config->name != pipe_config->name) { \
7945                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7946                                      "(expected %p, found %p)", \
7947                                      current_config->name, \
7948                                      pipe_config->name); \
7949                 ret = false; \
7950         } \
7951 } while (0)
7952
7953 #define PIPE_CONF_CHECK_M_N(name) do { \
7954         if (!intel_compare_link_m_n(&current_config->name, \
7955                                     &pipe_config->name,\
7956                                     !fastset)) { \
7957                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7958                                      "(expected tu %i gmch %i/%i link %i/%i, " \
7959                                      "found tu %i, gmch %i/%i link %i/%i)", \
7960                                      current_config->name.tu, \
7961                                      current_config->name.gmch_m, \
7962                                      current_config->name.gmch_n, \
7963                                      current_config->name.link_m, \
7964                                      current_config->name.link_n, \
7965                                      pipe_config->name.tu, \
7966                                      pipe_config->name.gmch_m, \
7967                                      pipe_config->name.gmch_n, \
7968                                      pipe_config->name.link_m, \
7969                                      pipe_config->name.link_n); \
7970                 ret = false; \
7971         } \
7972 } while (0)
7973
7974 /* This is required for BDW+ where there is only one set of registers for
7975  * switching between high and low RR.
7976  * This macro can be used whenever a comparison has to be made between one
7977  * hw state and multiple sw state variables.
7978  */
7979 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
7980         if (!intel_compare_link_m_n(&current_config->name, \
7981                                     &pipe_config->name, !fastset) && \
7982             !intel_compare_link_m_n(&current_config->alt_name, \
7983                                     &pipe_config->name, !fastset)) { \
7984                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7985                                      "(expected tu %i gmch %i/%i link %i/%i, " \
7986                                      "or tu %i gmch %i/%i link %i/%i, " \
7987                                      "found tu %i, gmch %i/%i link %i/%i)", \
7988                                      current_config->name.tu, \
7989                                      current_config->name.gmch_m, \
7990                                      current_config->name.gmch_n, \
7991                                      current_config->name.link_m, \
7992                                      current_config->name.link_n, \
7993                                      current_config->alt_name.tu, \
7994                                      current_config->alt_name.gmch_m, \
7995                                      current_config->alt_name.gmch_n, \
7996                                      current_config->alt_name.link_m, \
7997                                      current_config->alt_name.link_n, \
7998                                      pipe_config->name.tu, \
7999                                      pipe_config->name.gmch_m, \
8000                                      pipe_config->name.gmch_n, \
8001                                      pipe_config->name.link_m, \
8002                                      pipe_config->name.link_n); \
8003                 ret = false; \
8004         } \
8005 } while (0)
8006
8007 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8008         if ((current_config->name ^ pipe_config->name) & (mask)) { \
8009                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8010                                      "(%x) (expected %i, found %i)", \
8011                                      (mask), \
8012                                      current_config->name & (mask), \
8013                                      pipe_config->name & (mask)); \
8014                 ret = false; \
8015         } \
8016 } while (0)
8017
8018 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8019         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8020                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8021                                      "(expected %i, found %i)", \
8022                                      current_config->name, \
8023                                      pipe_config->name); \
8024                 ret = false; \
8025         } \
8026 } while (0)
8027
8028 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8029         if (!intel_compare_infoframe(&current_config->infoframes.name, \
8030                                      &pipe_config->infoframes.name)) { \
8031                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8032                                                &current_config->infoframes.name, \
8033                                                &pipe_config->infoframes.name); \
8034                 ret = false; \
8035         } \
8036 } while (0)
8037
8038 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8039         if (!current_config->has_psr && !pipe_config->has_psr && \
8040             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8041                                       &pipe_config->infoframes.name)) { \
8042                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8043                                                 &current_config->infoframes.name, \
8044                                                 &pipe_config->infoframes.name); \
8045                 ret = false; \
8046         } \
8047 } while (0)
8048
8049 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8050         if (current_config->name1 != pipe_config->name1) { \
8051                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8052                                 "(expected %i, found %i, won't compare lut values)", \
8053                                 current_config->name1, \
8054                                 pipe_config->name1); \
8055                 ret = false;\
8056         } else { \
8057                 if (!intel_color_lut_equal(current_config->name2, \
8058                                         pipe_config->name2, pipe_config->name1, \
8059                                         bit_precision)) { \
8060                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8061                                         "hw_state doesn't match sw_state"); \
8062                         ret = false; \
8063                 } \
8064         } \
8065 } while (0)
8066
8067 #define PIPE_CONF_QUIRK(quirk) \
8068         ((current_config->quirks | pipe_config->quirks) & (quirk))
8069
8070         PIPE_CONF_CHECK_I(cpu_transcoder);
8071
8072         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8073         PIPE_CONF_CHECK_I(fdi_lanes);
8074         PIPE_CONF_CHECK_M_N(fdi_m_n);
8075
8076         PIPE_CONF_CHECK_I(lane_count);
8077         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8078
8079         if (DISPLAY_VER(dev_priv) < 8) {
8080                 PIPE_CONF_CHECK_M_N(dp_m_n);
8081
8082                 if (current_config->has_drrs)
8083                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
8084         } else
8085                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8086
8087         PIPE_CONF_CHECK_X(output_types);
8088
8089         /* FIXME do the readout properly and get rid of this quirk */
8090         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8091                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8092                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8093                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8094                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8095                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8096                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8097
8098                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8099                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8100                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8101                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8102                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8103                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8104
8105                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8106                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8107                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8108                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8109                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8110                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8111
8112                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8113                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8114                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8115                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8116                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8117                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8118
8119                 PIPE_CONF_CHECK_I(pixel_multiplier);
8120
8121                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8122                                       DRM_MODE_FLAG_INTERLACE);
8123
8124                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8125                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8126                                               DRM_MODE_FLAG_PHSYNC);
8127                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8128                                               DRM_MODE_FLAG_NHSYNC);
8129                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8130                                               DRM_MODE_FLAG_PVSYNC);
8131                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8132                                               DRM_MODE_FLAG_NVSYNC);
8133                 }
8134         }
8135
8136         PIPE_CONF_CHECK_I(output_format);
8137         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8138         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8139             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8140                 PIPE_CONF_CHECK_BOOL(limited_color_range);
8141
8142         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8143         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8144         PIPE_CONF_CHECK_BOOL(has_infoframe);
8145         /* FIXME do the readout properly and get rid of this quirk */
8146         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8147                 PIPE_CONF_CHECK_BOOL(fec_enable);
8148
8149         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8150
8151         PIPE_CONF_CHECK_X(gmch_pfit.control);
8152         /* pfit ratios are autocomputed by the hw on gen4+ */
8153         if (DISPLAY_VER(dev_priv) < 4)
8154                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8155         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8156
8157         /*
8158          * Changing the EDP transcoder input mux
8159          * (A_ONOFF vs. A_ON) requires a full modeset.
8160          */
8161         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8162
8163         if (!fastset) {
8164                 PIPE_CONF_CHECK_I(pipe_src_w);
8165                 PIPE_CONF_CHECK_I(pipe_src_h);
8166
8167                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8168                 if (current_config->pch_pfit.enabled) {
8169                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8170                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8171                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8172                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8173                 }
8174
8175                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8176                 /* FIXME do the readout properly and get rid of this quirk */
8177                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8178                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8179
8180                 PIPE_CONF_CHECK_X(gamma_mode);
8181                 if (IS_CHERRYVIEW(dev_priv))
8182                         PIPE_CONF_CHECK_X(cgm_mode);
8183                 else
8184                         PIPE_CONF_CHECK_X(csc_mode);
8185                 PIPE_CONF_CHECK_BOOL(gamma_enable);
8186                 PIPE_CONF_CHECK_BOOL(csc_enable);
8187
8188                 PIPE_CONF_CHECK_I(linetime);
8189                 PIPE_CONF_CHECK_I(ips_linetime);
8190
8191                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8192                 if (bp_gamma)
8193                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8194
8195                 PIPE_CONF_CHECK_BOOL(has_psr);
8196                 PIPE_CONF_CHECK_BOOL(has_psr2);
8197                 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8198                 PIPE_CONF_CHECK_I(dc3co_exitline);
8199         }
8200
8201         PIPE_CONF_CHECK_BOOL(double_wide);
8202
8203         if (dev_priv->dpll.mgr)
8204                 PIPE_CONF_CHECK_P(shared_dpll);
8205
8206         /* FIXME do the readout properly and get rid of this quirk */
8207         if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8208                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8209                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8210                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8211                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8212                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8213                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8214                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8215                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8216                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8217                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8218                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8219                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8220                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8221                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8222                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8223                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8224                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8225                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8226                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8227                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8228                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8229                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8230                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8231                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8232                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8233                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8234                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8235                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8236                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8237                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8238                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8239         }
8240
8241         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8242                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8243                 PIPE_CONF_CHECK_X(dsi_pll.div);
8244
8245                 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8246                         PIPE_CONF_CHECK_I(pipe_bpp);
8247
8248                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8249                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8250                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8251
8252                 PIPE_CONF_CHECK_I(min_voltage_level);
8253         }
8254
8255         if (fastset && (current_config->has_psr || pipe_config->has_psr))
8256                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8257                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8258         else
8259                 PIPE_CONF_CHECK_X(infoframes.enable);
8260
8261         PIPE_CONF_CHECK_X(infoframes.gcp);
8262         PIPE_CONF_CHECK_INFOFRAME(avi);
8263         PIPE_CONF_CHECK_INFOFRAME(spd);
8264         PIPE_CONF_CHECK_INFOFRAME(hdmi);
8265         PIPE_CONF_CHECK_INFOFRAME(drm);
8266         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8267
8268         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8269         PIPE_CONF_CHECK_I(master_transcoder);
8270         PIPE_CONF_CHECK_BOOL(bigjoiner);
8271         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8272         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8273
8274         PIPE_CONF_CHECK_I(dsc.compression_enable);
8275         PIPE_CONF_CHECK_I(dsc.dsc_split);
8276         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8277
8278         PIPE_CONF_CHECK_BOOL(splitter.enable);
8279         PIPE_CONF_CHECK_I(splitter.link_count);
8280         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8281
8282         PIPE_CONF_CHECK_I(mst_master_transcoder);
8283
8284         PIPE_CONF_CHECK_BOOL(vrr.enable);
8285         PIPE_CONF_CHECK_I(vrr.vmin);
8286         PIPE_CONF_CHECK_I(vrr.vmax);
8287         PIPE_CONF_CHECK_I(vrr.flipline);
8288         PIPE_CONF_CHECK_I(vrr.pipeline_full);
8289         PIPE_CONF_CHECK_I(vrr.guardband);
8290
8291 #undef PIPE_CONF_CHECK_X
8292 #undef PIPE_CONF_CHECK_I
8293 #undef PIPE_CONF_CHECK_BOOL
8294 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8295 #undef PIPE_CONF_CHECK_P
8296 #undef PIPE_CONF_CHECK_FLAGS
8297 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8298 #undef PIPE_CONF_CHECK_COLOR_LUT
8299 #undef PIPE_CONF_QUIRK
8300
8301         return ret;
8302 }
8303
8304 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8305                                            const struct intel_crtc_state *pipe_config)
8306 {
8307         if (pipe_config->has_pch_encoder) {
8308                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8309                                                             &pipe_config->fdi_m_n);
8310                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8311
8312                 /*
8313                  * FDI already provided one idea for the dotclock.
8314                  * Yell if the encoder disagrees.
8315                  */
8316                 drm_WARN(&dev_priv->drm,
8317                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8318                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8319                          fdi_dotclock, dotclock);
8320         }
8321 }
8322
8323 static void verify_wm_state(struct intel_crtc *crtc,
8324                             struct intel_crtc_state *new_crtc_state)
8325 {
8326         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8327         struct skl_hw_state {
8328                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8329                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8330                 struct skl_pipe_wm wm;
8331         } *hw;
8332         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8333         int level, max_level = ilk_wm_max_level(dev_priv);
8334         struct intel_plane *plane;
8335         u8 hw_enabled_slices;
8336
8337         if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8338                 return;
8339
8340         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8341         if (!hw)
8342                 return;
8343
8344         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8345
8346         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8347
8348         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8349
8350         if (DISPLAY_VER(dev_priv) >= 11 &&
8351             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8352                 drm_err(&dev_priv->drm,
8353                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8354                         dev_priv->dbuf.enabled_slices,
8355                         hw_enabled_slices);
8356
8357         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8358                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8359                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8360
8361                 /* Watermarks */
8362                 for (level = 0; level <= max_level; level++) {
8363                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8364                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8365
8366                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8367                                 continue;
8368
8369                         drm_err(&dev_priv->drm,
8370                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8371                                 plane->base.base.id, plane->base.name, level,
8372                                 sw_wm_level->enable,
8373                                 sw_wm_level->blocks,
8374                                 sw_wm_level->lines,
8375                                 hw_wm_level->enable,
8376                                 hw_wm_level->blocks,
8377                                 hw_wm_level->lines);
8378                 }
8379
8380                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8381                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8382
8383                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8384                         drm_err(&dev_priv->drm,
8385                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8386                                 plane->base.base.id, plane->base.name,
8387                                 sw_wm_level->enable,
8388                                 sw_wm_level->blocks,
8389                                 sw_wm_level->lines,
8390                                 hw_wm_level->enable,
8391                                 hw_wm_level->blocks,
8392                                 hw_wm_level->lines);
8393                 }
8394
8395                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8396                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8397
8398                 if (HAS_HW_SAGV_WM(dev_priv) &&
8399                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8400                         drm_err(&dev_priv->drm,
8401                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8402                                 plane->base.base.id, plane->base.name,
8403                                 sw_wm_level->enable,
8404                                 sw_wm_level->blocks,
8405                                 sw_wm_level->lines,
8406                                 hw_wm_level->enable,
8407                                 hw_wm_level->blocks,
8408                                 hw_wm_level->lines);
8409                 }
8410
8411                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8412                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8413
8414                 if (HAS_HW_SAGV_WM(dev_priv) &&
8415                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8416                         drm_err(&dev_priv->drm,
8417                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8418                                 plane->base.base.id, plane->base.name,
8419                                 sw_wm_level->enable,
8420                                 sw_wm_level->blocks,
8421                                 sw_wm_level->lines,
8422                                 hw_wm_level->enable,
8423                                 hw_wm_level->blocks,
8424                                 hw_wm_level->lines);
8425                 }
8426
8427                 /* DDB */
8428                 hw_ddb_entry = &hw->ddb_y[plane->id];
8429                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8430
8431                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8432                         drm_err(&dev_priv->drm,
8433                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8434                                 plane->base.base.id, plane->base.name,
8435                                 sw_ddb_entry->start, sw_ddb_entry->end,
8436                                 hw_ddb_entry->start, hw_ddb_entry->end);
8437                 }
8438         }
8439
8440         kfree(hw);
8441 }
8442
8443 static void
8444 verify_connector_state(struct intel_atomic_state *state,
8445                        struct intel_crtc *crtc)
8446 {
8447         struct drm_connector *connector;
8448         struct drm_connector_state *new_conn_state;
8449         int i;
8450
8451         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8452                 struct drm_encoder *encoder = connector->encoder;
8453                 struct intel_crtc_state *crtc_state = NULL;
8454
8455                 if (new_conn_state->crtc != &crtc->base)
8456                         continue;
8457
8458                 if (crtc)
8459                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8460
8461                 intel_connector_verify_state(crtc_state, new_conn_state);
8462
8463                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8464                      "connector's atomic encoder doesn't match legacy encoder\n");
8465         }
8466 }
8467
8468 static void
8469 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8470 {
8471         struct intel_encoder *encoder;
8472         struct drm_connector *connector;
8473         struct drm_connector_state *old_conn_state, *new_conn_state;
8474         int i;
8475
8476         for_each_intel_encoder(&dev_priv->drm, encoder) {
8477                 bool enabled = false, found = false;
8478                 enum pipe pipe;
8479
8480                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8481                             encoder->base.base.id,
8482                             encoder->base.name);
8483
8484                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8485                                                    new_conn_state, i) {
8486                         if (old_conn_state->best_encoder == &encoder->base)
8487                                 found = true;
8488
8489                         if (new_conn_state->best_encoder != &encoder->base)
8490                                 continue;
8491                         found = enabled = true;
8492
8493                         I915_STATE_WARN(new_conn_state->crtc !=
8494                                         encoder->base.crtc,
8495                              "connector's crtc doesn't match encoder crtc\n");
8496                 }
8497
8498                 if (!found)
8499                         continue;
8500
8501                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8502                      "encoder's enabled state mismatch "
8503                      "(expected %i, found %i)\n",
8504                      !!encoder->base.crtc, enabled);
8505
8506                 if (!encoder->base.crtc) {
8507                         bool active;
8508
8509                         active = encoder->get_hw_state(encoder, &pipe);
8510                         I915_STATE_WARN(active,
8511                              "encoder detached but still enabled on pipe %c.\n",
8512                              pipe_name(pipe));
8513                 }
8514         }
8515 }
8516
8517 static void
8518 verify_crtc_state(struct intel_crtc *crtc,
8519                   struct intel_crtc_state *old_crtc_state,
8520                   struct intel_crtc_state *new_crtc_state)
8521 {
8522         struct drm_device *dev = crtc->base.dev;
8523         struct drm_i915_private *dev_priv = to_i915(dev);
8524         struct intel_encoder *encoder;
8525         struct intel_crtc_state *pipe_config = old_crtc_state;
8526         struct drm_atomic_state *state = old_crtc_state->uapi.state;
8527         struct intel_crtc *master = crtc;
8528
8529         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8530         intel_crtc_free_hw_state(old_crtc_state);
8531         intel_crtc_state_reset(old_crtc_state, crtc);
8532         old_crtc_state->uapi.state = state;
8533
8534         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8535                     crtc->base.name);
8536
8537         pipe_config->hw.enable = new_crtc_state->hw.enable;
8538
8539         intel_crtc_get_pipe_config(pipe_config);
8540
8541         /* we keep both pipes enabled on 830 */
8542         if (IS_I830(dev_priv) && pipe_config->hw.active)
8543                 pipe_config->hw.active = new_crtc_state->hw.active;
8544
8545         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8546                         "crtc active state doesn't match with hw state "
8547                         "(expected %i, found %i)\n",
8548                         new_crtc_state->hw.active, pipe_config->hw.active);
8549
8550         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8551                         "transitional active state does not match atomic hw state "
8552                         "(expected %i, found %i)\n",
8553                         new_crtc_state->hw.active, crtc->active);
8554
8555         if (new_crtc_state->bigjoiner_slave)
8556                 master = new_crtc_state->bigjoiner_linked_crtc;
8557
8558         for_each_encoder_on_crtc(dev, &master->base, encoder) {
8559                 enum pipe pipe;
8560                 bool active;
8561
8562                 active = encoder->get_hw_state(encoder, &pipe);
8563                 I915_STATE_WARN(active != new_crtc_state->hw.active,
8564                                 "[ENCODER:%i] active %i with crtc active %i\n",
8565                                 encoder->base.base.id, active,
8566                                 new_crtc_state->hw.active);
8567
8568                 I915_STATE_WARN(active && master->pipe != pipe,
8569                                 "Encoder connected to wrong pipe %c\n",
8570                                 pipe_name(pipe));
8571
8572                 if (active)
8573                         intel_encoder_get_config(encoder, pipe_config);
8574         }
8575
8576         if (!new_crtc_state->hw.active)
8577                 return;
8578
8579         if (new_crtc_state->bigjoiner_slave)
8580                 /* No PLLs set for slave */
8581                 pipe_config->shared_dpll = NULL;
8582
8583         intel_pipe_config_sanity_check(dev_priv, pipe_config);
8584
8585         if (!intel_pipe_config_compare(new_crtc_state,
8586                                        pipe_config, false)) {
8587                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
8588                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8589                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8590         }
8591 }
8592
8593 static void
8594 intel_verify_planes(struct intel_atomic_state *state)
8595 {
8596         struct intel_plane *plane;
8597         const struct intel_plane_state *plane_state;
8598         int i;
8599
8600         for_each_new_intel_plane_in_state(state, plane,
8601                                           plane_state, i)
8602                 assert_plane(plane, plane_state->planar_slave ||
8603                              plane_state->uapi.visible);
8604 }
8605
8606 static void
8607 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8608                          struct intel_shared_dpll *pll,
8609                          struct intel_crtc *crtc,
8610                          struct intel_crtc_state *new_crtc_state)
8611 {
8612         struct intel_dpll_hw_state dpll_hw_state;
8613         u8 pipe_mask;
8614         bool active;
8615
8616         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8617
8618         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8619
8620         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8621
8622         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8623                 I915_STATE_WARN(!pll->on && pll->active_mask,
8624                      "pll in active use but not on in sw tracking\n");
8625                 I915_STATE_WARN(pll->on && !pll->active_mask,
8626                      "pll is on but not used by any active pipe\n");
8627                 I915_STATE_WARN(pll->on != active,
8628                      "pll on state mismatch (expected %i, found %i)\n",
8629                      pll->on, active);
8630         }
8631
8632         if (!crtc) {
8633                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8634                                 "more active pll users than references: 0x%x vs 0x%x\n",
8635                                 pll->active_mask, pll->state.pipe_mask);
8636
8637                 return;
8638         }
8639
8640         pipe_mask = BIT(crtc->pipe);
8641
8642         if (new_crtc_state->hw.active)
8643                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8644                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8645                                 pipe_name(crtc->pipe), pll->active_mask);
8646         else
8647                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8648                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8649                                 pipe_name(crtc->pipe), pll->active_mask);
8650
8651         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8652                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8653                         pipe_mask, pll->state.pipe_mask);
8654
8655         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8656                                           &dpll_hw_state,
8657                                           sizeof(dpll_hw_state)),
8658                         "pll hw state mismatch\n");
8659 }
8660
8661 static void
8662 verify_shared_dpll_state(struct intel_crtc *crtc,
8663                          struct intel_crtc_state *old_crtc_state,
8664                          struct intel_crtc_state *new_crtc_state)
8665 {
8666         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8667
8668         if (new_crtc_state->shared_dpll)
8669                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8670
8671         if (old_crtc_state->shared_dpll &&
8672             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8673                 u8 pipe_mask = BIT(crtc->pipe);
8674                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8675
8676                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8677                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8678                                 pipe_name(crtc->pipe), pll->active_mask);
8679                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8680                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8681                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
8682         }
8683 }
8684
8685 static void
8686 verify_mpllb_state(struct intel_atomic_state *state,
8687                    struct intel_crtc_state *new_crtc_state)
8688 {
8689         struct drm_i915_private *i915 = to_i915(state->base.dev);
8690         struct intel_mpllb_state mpllb_hw_state = { 0 };
8691         struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
8692         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8693         struct intel_encoder *encoder;
8694
8695         if (!IS_DG2(i915))
8696                 return;
8697
8698         if (!new_crtc_state->hw.active)
8699                 return;
8700
8701         if (new_crtc_state->bigjoiner_slave)
8702                 return;
8703
8704         encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
8705         intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
8706
8707 #define MPLLB_CHECK(name) do { \
8708         if (mpllb_sw_state->name != mpllb_hw_state.name) { \
8709                 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
8710                                      "(expected 0x%08x, found 0x%08x)", \
8711                                      mpllb_sw_state->name, \
8712                                      mpllb_hw_state.name); \
8713         } \
8714 } while (0)
8715
8716         MPLLB_CHECK(mpllb_cp);
8717         MPLLB_CHECK(mpllb_div);
8718         MPLLB_CHECK(mpllb_div2);
8719         MPLLB_CHECK(mpllb_fracn1);
8720         MPLLB_CHECK(mpllb_fracn2);
8721         MPLLB_CHECK(mpllb_sscen);
8722         MPLLB_CHECK(mpllb_sscstep);
8723
8724         /*
8725          * ref_control is handled by the hardware/firemware and never
8726          * programmed by the software, but the proper values are supplied
8727          * in the bspec for verification purposes.
8728          */
8729         MPLLB_CHECK(ref_control);
8730
8731 #undef MPLLB_CHECK
8732 }
8733
8734 static void
8735 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8736                           struct intel_atomic_state *state,
8737                           struct intel_crtc_state *old_crtc_state,
8738                           struct intel_crtc_state *new_crtc_state)
8739 {
8740         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8741                 return;
8742
8743         verify_wm_state(crtc, new_crtc_state);
8744         verify_connector_state(state, crtc);
8745         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8746         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8747         verify_mpllb_state(state, new_crtc_state);
8748 }
8749
8750 static void
8751 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8752 {
8753         int i;
8754
8755         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8756                 verify_single_dpll_state(dev_priv,
8757                                          &dev_priv->dpll.shared_dplls[i],
8758                                          NULL, NULL);
8759 }
8760
8761 static void
8762 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8763                               struct intel_atomic_state *state)
8764 {
8765         verify_encoder_state(dev_priv, state);
8766         verify_connector_state(state, NULL);
8767         verify_disabled_dpll_state(dev_priv);
8768 }
8769
8770 int intel_modeset_all_pipes(struct intel_atomic_state *state)
8771 {
8772         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8773         struct intel_crtc *crtc;
8774
8775         /*
8776          * Add all pipes to the state, and force
8777          * a modeset on all the active ones.
8778          */
8779         for_each_intel_crtc(&dev_priv->drm, crtc) {
8780                 struct intel_crtc_state *crtc_state;
8781                 int ret;
8782
8783                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8784                 if (IS_ERR(crtc_state))
8785                         return PTR_ERR(crtc_state);
8786
8787                 if (!crtc_state->hw.active ||
8788                     drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
8789                         continue;
8790
8791                 crtc_state->uapi.mode_changed = true;
8792
8793                 ret = drm_atomic_add_affected_connectors(&state->base,
8794                                                          &crtc->base);
8795                 if (ret)
8796                         return ret;
8797
8798                 ret = intel_atomic_add_affected_planes(state, crtc);
8799                 if (ret)
8800                         return ret;
8801
8802                 crtc_state->update_planes |= crtc_state->active_planes;
8803         }
8804
8805         return 0;
8806 }
8807
8808 static void
8809 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8810 {
8811         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8812         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8813         struct drm_display_mode adjusted_mode =
8814                 crtc_state->hw.adjusted_mode;
8815
8816         if (crtc_state->vrr.enable) {
8817                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8818                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8819                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8820                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8821         }
8822
8823         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8824
8825         crtc->mode_flags = crtc_state->mode_flags;
8826
8827         /*
8828          * The scanline counter increments at the leading edge of hsync.
8829          *
8830          * On most platforms it starts counting from vtotal-1 on the
8831          * first active line. That means the scanline counter value is
8832          * always one less than what we would expect. Ie. just after
8833          * start of vblank, which also occurs at start of hsync (on the
8834          * last active line), the scanline counter will read vblank_start-1.
8835          *
8836          * On gen2 the scanline counter starts counting from 1 instead
8837          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8838          * to keep the value positive), instead of adding one.
8839          *
8840          * On HSW+ the behaviour of the scanline counter depends on the output
8841          * type. For DP ports it behaves like most other platforms, but on HDMI
8842          * there's an extra 1 line difference. So we need to add two instead of
8843          * one to the value.
8844          *
8845          * On VLV/CHV DSI the scanline counter would appear to increment
8846          * approx. 1/3 of a scanline before start of vblank. Unfortunately
8847          * that means we can't tell whether we're in vblank or not while
8848          * we're on that particular line. We must still set scanline_offset
8849          * to 1 so that the vblank timestamps come out correct when we query
8850          * the scanline counter from within the vblank interrupt handler.
8851          * However if queried just before the start of vblank we'll get an
8852          * answer that's slightly in the future.
8853          */
8854         if (DISPLAY_VER(dev_priv) == 2) {
8855                 int vtotal;
8856
8857                 vtotal = adjusted_mode.crtc_vtotal;
8858                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8859                         vtotal /= 2;
8860
8861                 crtc->scanline_offset = vtotal - 1;
8862         } else if (HAS_DDI(dev_priv) &&
8863                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8864                 crtc->scanline_offset = 2;
8865         } else {
8866                 crtc->scanline_offset = 1;
8867         }
8868 }
8869
8870 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8871 {
8872         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8873         struct intel_crtc_state *new_crtc_state;
8874         struct intel_crtc *crtc;
8875         int i;
8876
8877         if (!dev_priv->display.crtc_compute_clock)
8878                 return;
8879
8880         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8881                 if (!intel_crtc_needs_modeset(new_crtc_state))
8882                         continue;
8883
8884                 intel_release_shared_dplls(state, crtc);
8885         }
8886 }
8887
8888 /*
8889  * This implements the workaround described in the "notes" section of the mode
8890  * set sequence documentation. When going from no pipes or single pipe to
8891  * multiple pipes, and planes are enabled after the pipe, we need to wait at
8892  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8893  */
8894 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8895 {
8896         struct intel_crtc_state *crtc_state;
8897         struct intel_crtc *crtc;
8898         struct intel_crtc_state *first_crtc_state = NULL;
8899         struct intel_crtc_state *other_crtc_state = NULL;
8900         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8901         int i;
8902
8903         /* look at all crtc's that are going to be enabled in during modeset */
8904         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8905                 if (!crtc_state->hw.active ||
8906                     !intel_crtc_needs_modeset(crtc_state))
8907                         continue;
8908
8909                 if (first_crtc_state) {
8910                         other_crtc_state = crtc_state;
8911                         break;
8912                 } else {
8913                         first_crtc_state = crtc_state;
8914                         first_pipe = crtc->pipe;
8915                 }
8916         }
8917
8918         /* No workaround needed? */
8919         if (!first_crtc_state)
8920                 return 0;
8921
8922         /* w/a possibly needed, check how many crtc's are already enabled. */
8923         for_each_intel_crtc(state->base.dev, crtc) {
8924                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8925                 if (IS_ERR(crtc_state))
8926                         return PTR_ERR(crtc_state);
8927
8928                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8929
8930                 if (!crtc_state->hw.active ||
8931                     intel_crtc_needs_modeset(crtc_state))
8932                         continue;
8933
8934                 /* 2 or more enabled crtcs means no need for w/a */
8935                 if (enabled_pipe != INVALID_PIPE)
8936                         return 0;
8937
8938                 enabled_pipe = crtc->pipe;
8939         }
8940
8941         if (enabled_pipe != INVALID_PIPE)
8942                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8943         else if (other_crtc_state)
8944                 other_crtc_state->hsw_workaround_pipe = first_pipe;
8945
8946         return 0;
8947 }
8948
8949 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8950                            u8 active_pipes)
8951 {
8952         const struct intel_crtc_state *crtc_state;
8953         struct intel_crtc *crtc;
8954         int i;
8955
8956         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8957                 if (crtc_state->hw.active)
8958                         active_pipes |= BIT(crtc->pipe);
8959                 else
8960                         active_pipes &= ~BIT(crtc->pipe);
8961         }
8962
8963         return active_pipes;
8964 }
8965
8966 static int intel_modeset_checks(struct intel_atomic_state *state)
8967 {
8968         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8969
8970         state->modeset = true;
8971
8972         if (IS_HASWELL(dev_priv))
8973                 return hsw_mode_set_planes_workaround(state);
8974
8975         return 0;
8976 }
8977
8978 /*
8979  * Handle calculation of various watermark data at the end of the atomic check
8980  * phase.  The code here should be run after the per-crtc and per-plane 'check'
8981  * handlers to ensure that all derived state has been updated.
8982  */
8983 static int calc_watermark_data(struct intel_atomic_state *state)
8984 {
8985         struct drm_device *dev = state->base.dev;
8986         struct drm_i915_private *dev_priv = to_i915(dev);
8987
8988         /* Is there platform-specific watermark information to calculate? */
8989         if (dev_priv->display.compute_global_watermarks)
8990                 return dev_priv->display.compute_global_watermarks(state);
8991
8992         return 0;
8993 }
8994
8995 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
8996                                      struct intel_crtc_state *new_crtc_state)
8997 {
8998         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
8999                 return;
9000
9001         new_crtc_state->uapi.mode_changed = false;
9002         new_crtc_state->update_pipe = true;
9003 }
9004
9005 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9006                                     struct intel_crtc_state *new_crtc_state)
9007 {
9008         /*
9009          * If we're not doing the full modeset we want to
9010          * keep the current M/N values as they may be
9011          * sufficiently different to the computed values
9012          * to cause problems.
9013          *
9014          * FIXME: should really copy more fuzzy state here
9015          */
9016         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9017         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9018         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9019         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9020 }
9021
9022 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9023                                           struct intel_crtc *crtc,
9024                                           u8 plane_ids_mask)
9025 {
9026         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9027         struct intel_plane *plane;
9028
9029         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9030                 struct intel_plane_state *plane_state;
9031
9032                 if ((plane_ids_mask & BIT(plane->id)) == 0)
9033                         continue;
9034
9035                 plane_state = intel_atomic_get_plane_state(state, plane);
9036                 if (IS_ERR(plane_state))
9037                         return PTR_ERR(plane_state);
9038         }
9039
9040         return 0;
9041 }
9042
9043 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9044                                      struct intel_crtc *crtc)
9045 {
9046         const struct intel_crtc_state *old_crtc_state =
9047                 intel_atomic_get_old_crtc_state(state, crtc);
9048         const struct intel_crtc_state *new_crtc_state =
9049                 intel_atomic_get_new_crtc_state(state, crtc);
9050
9051         return intel_crtc_add_planes_to_state(state, crtc,
9052                                               old_crtc_state->enabled_planes |
9053                                               new_crtc_state->enabled_planes);
9054 }
9055
9056 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9057 {
9058         /* See {hsw,vlv,ivb}_plane_ratio() */
9059         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9060                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9061                 IS_IVYBRIDGE(dev_priv);
9062 }
9063
9064 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9065                                            struct intel_crtc *crtc,
9066                                            struct intel_crtc *other)
9067 {
9068         const struct intel_plane_state *plane_state;
9069         struct intel_plane *plane;
9070         u8 plane_ids = 0;
9071         int i;
9072
9073         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9074                 if (plane->pipe == crtc->pipe)
9075                         plane_ids |= BIT(plane->id);
9076         }
9077
9078         return intel_crtc_add_planes_to_state(state, other, plane_ids);
9079 }
9080
9081 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9082 {
9083         const struct intel_crtc_state *crtc_state;
9084         struct intel_crtc *crtc;
9085         int i;
9086
9087         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9088                 int ret;
9089
9090                 if (!crtc_state->bigjoiner)
9091                         continue;
9092
9093                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9094                                                       crtc_state->bigjoiner_linked_crtc);
9095                 if (ret)
9096                         return ret;
9097         }
9098
9099         return 0;
9100 }
9101
9102 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9103 {
9104         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9105         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9106         struct intel_plane_state *plane_state;
9107         struct intel_plane *plane;
9108         struct intel_crtc *crtc;
9109         int i, ret;
9110
9111         ret = icl_add_linked_planes(state);
9112         if (ret)
9113                 return ret;
9114
9115         ret = intel_bigjoiner_add_affected_planes(state);
9116         if (ret)
9117                 return ret;
9118
9119         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9120                 ret = intel_plane_atomic_check(state, plane);
9121                 if (ret) {
9122                         drm_dbg_atomic(&dev_priv->drm,
9123                                        "[PLANE:%d:%s] atomic driver check failed\n",
9124                                        plane->base.base.id, plane->base.name);
9125                         return ret;
9126                 }
9127         }
9128
9129         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9130                                             new_crtc_state, i) {
9131                 u8 old_active_planes, new_active_planes;
9132
9133                 ret = icl_check_nv12_planes(new_crtc_state);
9134                 if (ret)
9135                         return ret;
9136
9137                 /*
9138                  * On some platforms the number of active planes affects
9139                  * the planes' minimum cdclk calculation. Add such planes
9140                  * to the state before we compute the minimum cdclk.
9141                  */
9142                 if (!active_planes_affects_min_cdclk(dev_priv))
9143                         continue;
9144
9145                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9146                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9147
9148                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
9149                         continue;
9150
9151                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9152                 if (ret)
9153                         return ret;
9154         }
9155
9156         return 0;
9157 }
9158
9159 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9160                                     bool *need_cdclk_calc)
9161 {
9162         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9163         const struct intel_cdclk_state *old_cdclk_state;
9164         const struct intel_cdclk_state *new_cdclk_state;
9165         struct intel_plane_state *plane_state;
9166         struct intel_bw_state *new_bw_state;
9167         struct intel_plane *plane;
9168         int min_cdclk = 0;
9169         enum pipe pipe;
9170         int ret;
9171         int i;
9172         /*
9173          * active_planes bitmask has been updated, and potentially
9174          * affected planes are part of the state. We can now
9175          * compute the minimum cdclk for each plane.
9176          */
9177         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9178                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9179                 if (ret)
9180                         return ret;
9181         }
9182
9183         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9184         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9185
9186         if (new_cdclk_state &&
9187             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9188                 *need_cdclk_calc = true;
9189
9190         ret = dev_priv->display.bw_calc_min_cdclk(state);
9191         if (ret)
9192                 return ret;
9193
9194         new_bw_state = intel_atomic_get_new_bw_state(state);
9195
9196         if (!new_cdclk_state || !new_bw_state)
9197                 return 0;
9198
9199         for_each_pipe(dev_priv, pipe) {
9200                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9201
9202                 /*
9203                  * Currently do this change only if we need to increase
9204                  */
9205                 if (new_bw_state->min_cdclk > min_cdclk)
9206                         *need_cdclk_calc = true;
9207         }
9208
9209         return 0;
9210 }
9211
9212 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9213 {
9214         struct intel_crtc_state *crtc_state;
9215         struct intel_crtc *crtc;
9216         int i;
9217
9218         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9219                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9220                 int ret;
9221
9222                 ret = intel_crtc_atomic_check(state, crtc);
9223                 if (ret) {
9224                         drm_dbg_atomic(&i915->drm,
9225                                        "[CRTC:%d:%s] atomic driver check failed\n",
9226                                        crtc->base.base.id, crtc->base.name);
9227                         return ret;
9228                 }
9229         }
9230
9231         return 0;
9232 }
9233
9234 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9235                                                u8 transcoders)
9236 {
9237         const struct intel_crtc_state *new_crtc_state;
9238         struct intel_crtc *crtc;
9239         int i;
9240
9241         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9242                 if (new_crtc_state->hw.enable &&
9243                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9244                     intel_crtc_needs_modeset(new_crtc_state))
9245                         return true;
9246         }
9247
9248         return false;
9249 }
9250
9251 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9252                                         struct intel_crtc *crtc,
9253                                         struct intel_crtc_state *old_crtc_state,
9254                                         struct intel_crtc_state *new_crtc_state)
9255 {
9256         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9257         struct intel_crtc *slave, *master;
9258
9259         /* slave being enabled, is master is still claiming this crtc? */
9260         if (old_crtc_state->bigjoiner_slave) {
9261                 slave = crtc;
9262                 master = old_crtc_state->bigjoiner_linked_crtc;
9263                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9264                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9265                         goto claimed;
9266         }
9267
9268         if (!new_crtc_state->bigjoiner)
9269                 return 0;
9270
9271         slave = intel_dsc_get_bigjoiner_secondary(crtc);
9272         if (!slave) {
9273                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9274                               "CRTC + 1 to be used, doesn't exist\n",
9275                               crtc->base.base.id, crtc->base.name);
9276                 return -EINVAL;
9277         }
9278
9279         new_crtc_state->bigjoiner_linked_crtc = slave;
9280         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9281         master = crtc;
9282         if (IS_ERR(slave_crtc_state))
9283                 return PTR_ERR(slave_crtc_state);
9284
9285         /* master being enabled, slave was already configured? */
9286         if (slave_crtc_state->uapi.enable)
9287                 goto claimed;
9288
9289         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9290                       slave->base.base.id, slave->base.name);
9291
9292         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9293
9294 claimed:
9295         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9296                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9297                       slave->base.base.id, slave->base.name,
9298                       master->base.base.id, master->base.name);
9299         return -EINVAL;
9300 }
9301
9302 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9303                                  struct intel_crtc_state *master_crtc_state)
9304 {
9305         struct intel_crtc_state *slave_crtc_state =
9306                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9307
9308         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9309         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9310         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9311         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9312 }
9313
9314 /**
9315  * DOC: asynchronous flip implementation
9316  *
9317  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9318  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9319  * Correspondingly, support is currently added for primary plane only.
9320  *
9321  * Async flip can only change the plane surface address, so anything else
9322  * changing is rejected from the intel_atomic_check_async() function.
9323  * Once this check is cleared, flip done interrupt is enabled using
9324  * the intel_crtc_enable_flip_done() function.
9325  *
9326  * As soon as the surface address register is written, flip done interrupt is
9327  * generated and the requested events are sent to the usersapce in the interrupt
9328  * handler itself. The timestamp and sequence sent during the flip done event
9329  * correspond to the last vblank and have no relation to the actual time when
9330  * the flip done event was sent.
9331  */
9332 static int intel_atomic_check_async(struct intel_atomic_state *state)
9333 {
9334         struct drm_i915_private *i915 = to_i915(state->base.dev);
9335         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9336         const struct intel_plane_state *new_plane_state, *old_plane_state;
9337         struct intel_crtc *crtc;
9338         struct intel_plane *plane;
9339         int i;
9340
9341         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9342                                             new_crtc_state, i) {
9343                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9344                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9345                         return -EINVAL;
9346                 }
9347
9348                 if (!new_crtc_state->hw.active) {
9349                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9350                         return -EINVAL;
9351                 }
9352                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9353                         drm_dbg_kms(&i915->drm,
9354                                     "Active planes cannot be changed during async flip\n");
9355                         return -EINVAL;
9356                 }
9357         }
9358
9359         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9360                                              new_plane_state, i) {
9361                 /*
9362                  * TODO: Async flip is only supported through the page flip IOCTL
9363                  * as of now. So support currently added for primary plane only.
9364                  * Support for other planes on platforms on which supports
9365                  * this(vlv/chv and icl+) should be added when async flip is
9366                  * enabled in the atomic IOCTL path.
9367                  */
9368                 if (!plane->async_flip)
9369                         return -EINVAL;
9370
9371                 /*
9372                  * FIXME: This check is kept generic for all platforms.
9373                  * Need to verify this for all gen9 platforms to enable
9374                  * this selectively if required.
9375                  */
9376                 switch (new_plane_state->hw.fb->modifier) {
9377                 case I915_FORMAT_MOD_X_TILED:
9378                 case I915_FORMAT_MOD_Y_TILED:
9379                 case I915_FORMAT_MOD_Yf_TILED:
9380                         break;
9381                 default:
9382                         drm_dbg_kms(&i915->drm,
9383                                     "Linear memory/CCS does not support async flips\n");
9384                         return -EINVAL;
9385                 }
9386
9387                 if (old_plane_state->view.color_plane[0].stride !=
9388                     new_plane_state->view.color_plane[0].stride) {
9389                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9390                         return -EINVAL;
9391                 }
9392
9393                 if (old_plane_state->hw.fb->modifier !=
9394                     new_plane_state->hw.fb->modifier) {
9395                         drm_dbg_kms(&i915->drm,
9396                                     "Framebuffer modifiers cannot be changed in async flip\n");
9397                         return -EINVAL;
9398                 }
9399
9400                 if (old_plane_state->hw.fb->format !=
9401                     new_plane_state->hw.fb->format) {
9402                         drm_dbg_kms(&i915->drm,
9403                                     "Framebuffer format cannot be changed in async flip\n");
9404                         return -EINVAL;
9405                 }
9406
9407                 if (old_plane_state->hw.rotation !=
9408                     new_plane_state->hw.rotation) {
9409                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9410                         return -EINVAL;
9411                 }
9412
9413                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9414                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9415                         drm_dbg_kms(&i915->drm,
9416                                     "Plane size/co-ordinates cannot be changed in async flip\n");
9417                         return -EINVAL;
9418                 }
9419
9420                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9421                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9422                         return -EINVAL;
9423                 }
9424
9425                 if (old_plane_state->hw.pixel_blend_mode !=
9426                     new_plane_state->hw.pixel_blend_mode) {
9427                         drm_dbg_kms(&i915->drm,
9428                                     "Pixel blend mode cannot be changed in async flip\n");
9429                         return -EINVAL;
9430                 }
9431
9432                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9433                         drm_dbg_kms(&i915->drm,
9434                                     "Color encoding cannot be changed in async flip\n");
9435                         return -EINVAL;
9436                 }
9437
9438                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9439                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9440                         return -EINVAL;
9441                 }
9442         }
9443
9444         return 0;
9445 }
9446
9447 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9448 {
9449         struct intel_crtc_state *crtc_state;
9450         struct intel_crtc *crtc;
9451         int i;
9452
9453         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9454                 struct intel_crtc_state *linked_crtc_state;
9455                 struct intel_crtc *linked_crtc;
9456                 int ret;
9457
9458                 if (!crtc_state->bigjoiner)
9459                         continue;
9460
9461                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
9462                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9463                 if (IS_ERR(linked_crtc_state))
9464                         return PTR_ERR(linked_crtc_state);
9465
9466                 if (!intel_crtc_needs_modeset(crtc_state))
9467                         continue;
9468
9469                 linked_crtc_state->uapi.mode_changed = true;
9470
9471                 ret = drm_atomic_add_affected_connectors(&state->base,
9472                                                          &linked_crtc->base);
9473                 if (ret)
9474                         return ret;
9475
9476                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
9477                 if (ret)
9478                         return ret;
9479         }
9480
9481         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9482                 /* Kill old bigjoiner link, we may re-establish afterwards */
9483                 if (intel_crtc_needs_modeset(crtc_state) &&
9484                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9485                         kill_bigjoiner_slave(state, crtc_state);
9486         }
9487
9488         return 0;
9489 }
9490
9491 /**
9492  * intel_atomic_check - validate state object
9493  * @dev: drm device
9494  * @_state: state to validate
9495  */
9496 static int intel_atomic_check(struct drm_device *dev,
9497                               struct drm_atomic_state *_state)
9498 {
9499         struct drm_i915_private *dev_priv = to_i915(dev);
9500         struct intel_atomic_state *state = to_intel_atomic_state(_state);
9501         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9502         struct intel_crtc *crtc;
9503         int ret, i;
9504         bool any_ms = false;
9505
9506         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9507                                             new_crtc_state, i) {
9508                 if (new_crtc_state->inherited != old_crtc_state->inherited)
9509                         new_crtc_state->uapi.mode_changed = true;
9510         }
9511
9512         intel_vrr_check_modeset(state);
9513
9514         ret = drm_atomic_helper_check_modeset(dev, &state->base);
9515         if (ret)
9516                 goto fail;
9517
9518         ret = intel_bigjoiner_add_affected_crtcs(state);
9519         if (ret)
9520                 goto fail;
9521
9522         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9523                                             new_crtc_state, i) {
9524                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
9525                         /* Light copy */
9526                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9527
9528                         continue;
9529                 }
9530
9531                 if (!new_crtc_state->uapi.enable) {
9532                         if (!new_crtc_state->bigjoiner_slave) {
9533                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9534                                 any_ms = true;
9535                         }
9536                         continue;
9537                 }
9538
9539                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9540                 if (ret)
9541                         goto fail;
9542
9543                 ret = intel_modeset_pipe_config(state, new_crtc_state);
9544                 if (ret)
9545                         goto fail;
9546
9547                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9548                                                    new_crtc_state);
9549                 if (ret)
9550                         goto fail;
9551         }
9552
9553         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9554                                             new_crtc_state, i) {
9555                 if (!intel_crtc_needs_modeset(new_crtc_state))
9556                         continue;
9557
9558                 ret = intel_modeset_pipe_config_late(new_crtc_state);
9559                 if (ret)
9560                         goto fail;
9561
9562                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9563         }
9564
9565         /**
9566          * Check if fastset is allowed by external dependencies like other
9567          * pipes and transcoders.
9568          *
9569          * Right now it only forces a fullmodeset when the MST master
9570          * transcoder did not changed but the pipe of the master transcoder
9571          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9572          * in case of port synced crtcs, if one of the synced crtcs
9573          * needs a full modeset, all other synced crtcs should be
9574          * forced a full modeset.
9575          */
9576         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9577                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9578                         continue;
9579
9580                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9581                         enum transcoder master = new_crtc_state->mst_master_transcoder;
9582
9583                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9584                                 new_crtc_state->uapi.mode_changed = true;
9585                                 new_crtc_state->update_pipe = false;
9586                         }
9587                 }
9588
9589                 if (is_trans_port_sync_mode(new_crtc_state)) {
9590                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
9591
9592                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9593                                 trans |= BIT(new_crtc_state->master_transcoder);
9594
9595                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
9596                                 new_crtc_state->uapi.mode_changed = true;
9597                                 new_crtc_state->update_pipe = false;
9598                         }
9599                 }
9600
9601                 if (new_crtc_state->bigjoiner) {
9602                         struct intel_crtc_state *linked_crtc_state =
9603                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9604
9605                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
9606                                 new_crtc_state->uapi.mode_changed = true;
9607                                 new_crtc_state->update_pipe = false;
9608                         }
9609                 }
9610         }
9611
9612         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9613                                             new_crtc_state, i) {
9614                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9615                         any_ms = true;
9616                         continue;
9617                 }
9618
9619                 if (!new_crtc_state->update_pipe)
9620                         continue;
9621
9622                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9623         }
9624
9625         if (any_ms && !check_digital_port_conflicts(state)) {
9626                 drm_dbg_kms(&dev_priv->drm,
9627                             "rejecting conflicting digital port configuration\n");
9628                 ret = -EINVAL;
9629                 goto fail;
9630         }
9631
9632         ret = drm_dp_mst_atomic_check(&state->base);
9633         if (ret)
9634                 goto fail;
9635
9636         ret = intel_atomic_check_planes(state);
9637         if (ret)
9638                 goto fail;
9639
9640         intel_fbc_choose_crtc(dev_priv, state);
9641         ret = calc_watermark_data(state);
9642         if (ret)
9643                 goto fail;
9644
9645         ret = intel_bw_atomic_check(state);
9646         if (ret)
9647                 goto fail;
9648
9649         ret = intel_atomic_check_cdclk(state, &any_ms);
9650         if (ret)
9651                 goto fail;
9652
9653         if (intel_any_crtc_needs_modeset(state))
9654                 any_ms = true;
9655
9656         if (any_ms) {
9657                 ret = intel_modeset_checks(state);
9658                 if (ret)
9659                         goto fail;
9660
9661                 ret = intel_modeset_calc_cdclk(state);
9662                 if (ret)
9663                         return ret;
9664
9665                 intel_modeset_clear_plls(state);
9666         }
9667
9668         ret = intel_atomic_check_crtcs(state);
9669         if (ret)
9670                 goto fail;
9671
9672         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9673                                             new_crtc_state, i) {
9674                 if (new_crtc_state->uapi.async_flip) {
9675                         ret = intel_atomic_check_async(state);
9676                         if (ret)
9677                                 goto fail;
9678                 }
9679
9680                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
9681                     !new_crtc_state->update_pipe)
9682                         continue;
9683
9684                 intel_dump_pipe_config(new_crtc_state, state,
9685                                        intel_crtc_needs_modeset(new_crtc_state) ?
9686                                        "[modeset]" : "[fastset]");
9687         }
9688
9689         return 0;
9690
9691  fail:
9692         if (ret == -EDEADLK)
9693                 return ret;
9694
9695         /*
9696          * FIXME would probably be nice to know which crtc specifically
9697          * caused the failure, in cases where we can pinpoint it.
9698          */
9699         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9700                                             new_crtc_state, i)
9701                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9702
9703         return ret;
9704 }
9705
9706 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9707 {
9708         struct intel_crtc_state *crtc_state;
9709         struct intel_crtc *crtc;
9710         int i, ret;
9711
9712         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9713         if (ret < 0)
9714                 return ret;
9715
9716         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9717                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9718
9719                 if (mode_changed || crtc_state->update_pipe ||
9720                     crtc_state->uapi.color_mgmt_changed) {
9721                         intel_dsb_prepare(crtc_state);
9722                 }
9723         }
9724
9725         return 0;
9726 }
9727
9728 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9729                                   struct intel_crtc_state *crtc_state)
9730 {
9731         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9732
9733         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
9734                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9735
9736         if (crtc_state->has_pch_encoder) {
9737                 enum pipe pch_transcoder =
9738                         intel_crtc_pch_transcoder(crtc);
9739
9740                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9741         }
9742 }
9743
9744 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9745                                const struct intel_crtc_state *new_crtc_state)
9746 {
9747         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9748         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9749
9750         /*
9751          * Update pipe size and adjust fitter if needed: the reason for this is
9752          * that in compute_mode_changes we check the native mode (not the pfit
9753          * mode) to see if we can flip rather than do a full mode set. In the
9754          * fastboot case, we'll flip, but if we don't update the pipesrc and
9755          * pfit state, we'll end up with a big fb scanned out into the wrong
9756          * sized surface.
9757          */
9758         intel_set_pipe_src_size(new_crtc_state);
9759
9760         /* on skylake this is done by detaching scalers */
9761         if (DISPLAY_VER(dev_priv) >= 9) {
9762                 if (new_crtc_state->pch_pfit.enabled)
9763                         skl_pfit_enable(new_crtc_state);
9764         } else if (HAS_PCH_SPLIT(dev_priv)) {
9765                 if (new_crtc_state->pch_pfit.enabled)
9766                         ilk_pfit_enable(new_crtc_state);
9767                 else if (old_crtc_state->pch_pfit.enabled)
9768                         ilk_pfit_disable(old_crtc_state);
9769         }
9770
9771         /*
9772          * The register is supposedly single buffered so perhaps
9773          * not 100% correct to do this here. But SKL+ calculate
9774          * this based on the adjust pixel rate so pfit changes do
9775          * affect it and so it must be updated for fastsets.
9776          * HSW/BDW only really need this here for fastboot, after
9777          * that the value should not change without a full modeset.
9778          */
9779         if (DISPLAY_VER(dev_priv) >= 9 ||
9780             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9781                 hsw_set_linetime_wm(new_crtc_state);
9782
9783         if (DISPLAY_VER(dev_priv) >= 11)
9784                 icl_set_pipe_chicken(new_crtc_state);
9785 }
9786
9787 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
9788                                    struct intel_crtc *crtc)
9789 {
9790         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9791         const struct intel_crtc_state *old_crtc_state =
9792                 intel_atomic_get_old_crtc_state(state, crtc);
9793         const struct intel_crtc_state *new_crtc_state =
9794                 intel_atomic_get_new_crtc_state(state, crtc);
9795         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9796
9797         /*
9798          * During modesets pipe configuration was programmed as the
9799          * CRTC was enabled.
9800          */
9801         if (!modeset) {
9802                 if (new_crtc_state->uapi.color_mgmt_changed ||
9803                     new_crtc_state->update_pipe)
9804                         intel_color_commit(new_crtc_state);
9805
9806                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9807                         bdw_set_pipemisc(new_crtc_state);
9808
9809                 if (new_crtc_state->update_pipe)
9810                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
9811
9812                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9813         }
9814
9815         if (dev_priv->display.atomic_update_watermarks)
9816                 dev_priv->display.atomic_update_watermarks(state, crtc);
9817 }
9818
9819 static void commit_pipe_post_planes(struct intel_atomic_state *state,
9820                                     struct intel_crtc *crtc)
9821 {
9822         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9823         const struct intel_crtc_state *new_crtc_state =
9824                 intel_atomic_get_new_crtc_state(state, crtc);
9825
9826         /*
9827          * Disable the scaler(s) after the plane(s) so that we don't
9828          * get a catastrophic underrun even if the two operations
9829          * end up happening in two different frames.
9830          */
9831         if (DISPLAY_VER(dev_priv) >= 9 &&
9832             !intel_crtc_needs_modeset(new_crtc_state))
9833                 skl_detach_scalers(new_crtc_state);
9834 }
9835
9836 static void intel_enable_crtc(struct intel_atomic_state *state,
9837                               struct intel_crtc *crtc)
9838 {
9839         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9840         const struct intel_crtc_state *new_crtc_state =
9841                 intel_atomic_get_new_crtc_state(state, crtc);
9842
9843         if (!intel_crtc_needs_modeset(new_crtc_state))
9844                 return;
9845
9846         intel_crtc_update_active_timings(new_crtc_state);
9847
9848         dev_priv->display.crtc_enable(state, crtc);
9849
9850         if (new_crtc_state->bigjoiner_slave)
9851                 return;
9852
9853         /* vblanks work again, re-enable pipe CRC. */
9854         intel_crtc_enable_pipe_crc(crtc);
9855 }
9856
9857 static void intel_update_crtc(struct intel_atomic_state *state,
9858                               struct intel_crtc *crtc)
9859 {
9860         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9861         const struct intel_crtc_state *old_crtc_state =
9862                 intel_atomic_get_old_crtc_state(state, crtc);
9863         struct intel_crtc_state *new_crtc_state =
9864                 intel_atomic_get_new_crtc_state(state, crtc);
9865         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9866
9867         if (!modeset) {
9868                 if (new_crtc_state->preload_luts &&
9869                     (new_crtc_state->uapi.color_mgmt_changed ||
9870                      new_crtc_state->update_pipe))
9871                         intel_color_load_luts(new_crtc_state);
9872
9873                 intel_pre_plane_update(state, crtc);
9874
9875                 if (new_crtc_state->update_pipe)
9876                         intel_encoders_update_pipe(state, crtc);
9877         }
9878
9879         intel_fbc_update(state, crtc);
9880
9881         /* Perform vblank evasion around commit operation */
9882         intel_pipe_update_start(new_crtc_state);
9883
9884         commit_pipe_pre_planes(state, crtc);
9885
9886         if (DISPLAY_VER(dev_priv) >= 9)
9887                 skl_update_planes_on_crtc(state, crtc);
9888         else
9889                 i9xx_update_planes_on_crtc(state, crtc);
9890
9891         commit_pipe_post_planes(state, crtc);
9892
9893         intel_pipe_update_end(new_crtc_state);
9894
9895         /*
9896          * We usually enable FIFO underrun interrupts as part of the
9897          * CRTC enable sequence during modesets.  But when we inherit a
9898          * valid pipe configuration from the BIOS we need to take care
9899          * of enabling them on the CRTC's first fastset.
9900          */
9901         if (new_crtc_state->update_pipe && !modeset &&
9902             old_crtc_state->inherited)
9903                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9904 }
9905
9906 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9907                                           struct intel_crtc_state *old_crtc_state,
9908                                           struct intel_crtc_state *new_crtc_state,
9909                                           struct intel_crtc *crtc)
9910 {
9911         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9912
9913         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9914
9915         intel_encoders_pre_disable(state, crtc);
9916
9917         intel_crtc_disable_planes(state, crtc);
9918
9919         /*
9920          * We still need special handling for disabling bigjoiner master
9921          * and slaves since for slave we do not have encoder or plls
9922          * so we dont need to disable those.
9923          */
9924         if (old_crtc_state->bigjoiner) {
9925                 intel_crtc_disable_planes(state,
9926                                           old_crtc_state->bigjoiner_linked_crtc);
9927                 old_crtc_state->bigjoiner_linked_crtc->active = false;
9928         }
9929
9930         /*
9931          * We need to disable pipe CRC before disabling the pipe,
9932          * or we race against vblank off.
9933          */
9934         intel_crtc_disable_pipe_crc(crtc);
9935
9936         dev_priv->display.crtc_disable(state, crtc);
9937         crtc->active = false;
9938         intel_fbc_disable(crtc);
9939         intel_disable_shared_dpll(old_crtc_state);
9940
9941         /* FIXME unify this for all platforms */
9942         if (!new_crtc_state->hw.active &&
9943             !HAS_GMCH(dev_priv) &&
9944             dev_priv->display.initial_watermarks)
9945                 dev_priv->display.initial_watermarks(state, crtc);
9946 }
9947
9948 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9949 {
9950         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
9951         struct intel_crtc *crtc;
9952         u32 handled = 0;
9953         int i;
9954
9955         /* Only disable port sync and MST slaves */
9956         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9957                                             new_crtc_state, i) {
9958                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
9959                         continue;
9960
9961                 if (!old_crtc_state->hw.active)
9962                         continue;
9963
9964                 /* In case of Transcoder port Sync master slave CRTCs can be
9965                  * assigned in any order and we need to make sure that
9966                  * slave CRTCs are disabled first and then master CRTC since
9967                  * Slave vblanks are masked till Master Vblanks.
9968                  */
9969                 if (!is_trans_port_sync_slave(old_crtc_state) &&
9970                     !intel_dp_mst_is_slave_trans(old_crtc_state))
9971                         continue;
9972
9973                 intel_pre_plane_update(state, crtc);
9974                 intel_old_crtc_state_disables(state, old_crtc_state,
9975                                               new_crtc_state, crtc);
9976                 handled |= BIT(crtc->pipe);
9977         }
9978
9979         /* Disable everything else left on */
9980         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9981                                             new_crtc_state, i) {
9982                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
9983                     (handled & BIT(crtc->pipe)) ||
9984                     old_crtc_state->bigjoiner_slave)
9985                         continue;
9986
9987                 intel_pre_plane_update(state, crtc);
9988                 if (old_crtc_state->bigjoiner) {
9989                         struct intel_crtc *slave =
9990                                 old_crtc_state->bigjoiner_linked_crtc;
9991
9992                         intel_pre_plane_update(state, slave);
9993                 }
9994
9995                 if (old_crtc_state->hw.active)
9996                         intel_old_crtc_state_disables(state, old_crtc_state,
9997                                                       new_crtc_state, crtc);
9998         }
9999 }
10000
10001 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10002 {
10003         struct intel_crtc_state *new_crtc_state;
10004         struct intel_crtc *crtc;
10005         int i;
10006
10007         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10008                 if (!new_crtc_state->hw.active)
10009                         continue;
10010
10011                 intel_enable_crtc(state, crtc);
10012                 intel_update_crtc(state, crtc);
10013         }
10014 }
10015
10016 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10017 {
10018         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10019         struct intel_crtc *crtc;
10020         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10021         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10022         u8 update_pipes = 0, modeset_pipes = 0;
10023         int i;
10024
10025         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10026                 enum pipe pipe = crtc->pipe;
10027
10028                 if (!new_crtc_state->hw.active)
10029                         continue;
10030
10031                 /* ignore allocations for crtc's that have been turned off. */
10032                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10033                         entries[pipe] = old_crtc_state->wm.skl.ddb;
10034                         update_pipes |= BIT(pipe);
10035                 } else {
10036                         modeset_pipes |= BIT(pipe);
10037                 }
10038         }
10039
10040         /*
10041          * Whenever the number of active pipes changes, we need to make sure we
10042          * update the pipes in the right order so that their ddb allocations
10043          * never overlap with each other between CRTC updates. Otherwise we'll
10044          * cause pipe underruns and other bad stuff.
10045          *
10046          * So first lets enable all pipes that do not need a fullmodeset as
10047          * those don't have any external dependency.
10048          */
10049         while (update_pipes) {
10050                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10051                                                     new_crtc_state, i) {
10052                         enum pipe pipe = crtc->pipe;
10053
10054                         if ((update_pipes & BIT(pipe)) == 0)
10055                                 continue;
10056
10057                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10058                                                         entries, I915_MAX_PIPES, pipe))
10059                                 continue;
10060
10061                         entries[pipe] = new_crtc_state->wm.skl.ddb;
10062                         update_pipes &= ~BIT(pipe);
10063
10064                         intel_update_crtc(state, crtc);
10065
10066                         /*
10067                          * If this is an already active pipe, it's DDB changed,
10068                          * and this isn't the last pipe that needs updating
10069                          * then we need to wait for a vblank to pass for the
10070                          * new ddb allocation to take effect.
10071                          */
10072                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10073                                                  &old_crtc_state->wm.skl.ddb) &&
10074                             (update_pipes | modeset_pipes))
10075                                 intel_wait_for_vblank(dev_priv, pipe);
10076                 }
10077         }
10078
10079         update_pipes = modeset_pipes;
10080
10081         /*
10082          * Enable all pipes that needs a modeset and do not depends on other
10083          * pipes
10084          */
10085         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10086                 enum pipe pipe = crtc->pipe;
10087
10088                 if ((modeset_pipes & BIT(pipe)) == 0)
10089                         continue;
10090
10091                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10092                     is_trans_port_sync_master(new_crtc_state) ||
10093                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10094                         continue;
10095
10096                 modeset_pipes &= ~BIT(pipe);
10097
10098                 intel_enable_crtc(state, crtc);
10099         }
10100
10101         /*
10102          * Then we enable all remaining pipes that depend on other
10103          * pipes: MST slaves and port sync masters, big joiner master
10104          */
10105         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10106                 enum pipe pipe = crtc->pipe;
10107
10108                 if ((modeset_pipes & BIT(pipe)) == 0)
10109                         continue;
10110
10111                 modeset_pipes &= ~BIT(pipe);
10112
10113                 intel_enable_crtc(state, crtc);
10114         }
10115
10116         /*
10117          * Finally we do the plane updates/etc. for all pipes that got enabled.
10118          */
10119         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10120                 enum pipe pipe = crtc->pipe;
10121
10122                 if ((update_pipes & BIT(pipe)) == 0)
10123                         continue;
10124
10125                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10126                                                                         entries, I915_MAX_PIPES, pipe));
10127
10128                 entries[pipe] = new_crtc_state->wm.skl.ddb;
10129                 update_pipes &= ~BIT(pipe);
10130
10131                 intel_update_crtc(state, crtc);
10132         }
10133
10134         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10135         drm_WARN_ON(&dev_priv->drm, update_pipes);
10136 }
10137
10138 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10139 {
10140         struct intel_atomic_state *state, *next;
10141         struct llist_node *freed;
10142
10143         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10144         llist_for_each_entry_safe(state, next, freed, freed)
10145                 drm_atomic_state_put(&state->base);
10146 }
10147
10148 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10149 {
10150         struct drm_i915_private *dev_priv =
10151                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10152
10153         intel_atomic_helper_free_state(dev_priv);
10154 }
10155
10156 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10157 {
10158         struct wait_queue_entry wait_fence, wait_reset;
10159         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10160
10161         init_wait_entry(&wait_fence, 0);
10162         init_wait_entry(&wait_reset, 0);
10163         for (;;) {
10164                 prepare_to_wait(&intel_state->commit_ready.wait,
10165                                 &wait_fence, TASK_UNINTERRUPTIBLE);
10166                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10167                                               I915_RESET_MODESET),
10168                                 &wait_reset, TASK_UNINTERRUPTIBLE);
10169
10170
10171                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10172                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10173                         break;
10174
10175                 schedule();
10176         }
10177         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10178         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10179                                   I915_RESET_MODESET),
10180                     &wait_reset);
10181 }
10182
10183 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10184 {
10185         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10186         struct intel_crtc *crtc;
10187         int i;
10188
10189         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10190                                             new_crtc_state, i)
10191                 intel_dsb_cleanup(old_crtc_state);
10192 }
10193
10194 static void intel_atomic_cleanup_work(struct work_struct *work)
10195 {
10196         struct intel_atomic_state *state =
10197                 container_of(work, struct intel_atomic_state, base.commit_work);
10198         struct drm_i915_private *i915 = to_i915(state->base.dev);
10199
10200         intel_cleanup_dsbs(state);
10201         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10202         drm_atomic_helper_commit_cleanup_done(&state->base);
10203         drm_atomic_state_put(&state->base);
10204
10205         intel_atomic_helper_free_state(i915);
10206 }
10207
10208 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10209 {
10210         struct drm_i915_private *i915 = to_i915(state->base.dev);
10211         struct intel_plane *plane;
10212         struct intel_plane_state *plane_state;
10213         int i;
10214
10215         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10216                 struct drm_framebuffer *fb = plane_state->hw.fb;
10217                 int ret;
10218
10219                 if (!fb ||
10220                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10221                         continue;
10222
10223                 /*
10224                  * The layout of the fast clear color value expected by HW
10225                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10226                  * - 4 x 4 bytes per-channel value
10227                  *   (in surface type specific float/int format provided by the fb user)
10228                  * - 8 bytes native color value used by the display
10229                  *   (converted/written by GPU during a fast clear operation using the
10230                  *    above per-channel values)
10231                  *
10232                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
10233                  * caller made sure that the object is synced wrt. the related color clear value
10234                  * GPU write on it.
10235                  */
10236                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10237                                                      fb->offsets[2] + 16,
10238                                                      &plane_state->ccval,
10239                                                      sizeof(plane_state->ccval));
10240                 /* The above could only fail if the FB obj has an unexpected backing store type. */
10241                 drm_WARN_ON(&i915->drm, ret);
10242         }
10243 }
10244
10245 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10246 {
10247         struct drm_device *dev = state->base.dev;
10248         struct drm_i915_private *dev_priv = to_i915(dev);
10249         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10250         struct intel_crtc *crtc;
10251         u64 put_domains[I915_MAX_PIPES] = {};
10252         intel_wakeref_t wakeref = 0;
10253         int i;
10254
10255         intel_atomic_commit_fence_wait(state);
10256
10257         drm_atomic_helper_wait_for_dependencies(&state->base);
10258
10259         if (state->modeset)
10260                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10261
10262         intel_atomic_prepare_plane_clear_colors(state);
10263
10264         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10265                                             new_crtc_state, i) {
10266                 if (intel_crtc_needs_modeset(new_crtc_state) ||
10267                     new_crtc_state->update_pipe) {
10268
10269                         put_domains[crtc->pipe] =
10270                                 modeset_get_crtc_power_domains(new_crtc_state);
10271                 }
10272         }
10273
10274         intel_commit_modeset_disables(state);
10275
10276         /* FIXME: Eventually get rid of our crtc->config pointer */
10277         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10278                 crtc->config = new_crtc_state;
10279
10280         if (state->modeset) {
10281                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10282
10283                 intel_set_cdclk_pre_plane_update(state);
10284
10285                 intel_modeset_verify_disabled(dev_priv, state);
10286         }
10287
10288         intel_sagv_pre_plane_update(state);
10289
10290         /* Complete the events for pipes that have now been disabled */
10291         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10292                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10293
10294                 /* Complete events for now disable pipes here. */
10295                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10296                         spin_lock_irq(&dev->event_lock);
10297                         drm_crtc_send_vblank_event(&crtc->base,
10298                                                    new_crtc_state->uapi.event);
10299                         spin_unlock_irq(&dev->event_lock);
10300
10301                         new_crtc_state->uapi.event = NULL;
10302                 }
10303         }
10304
10305         if (state->modeset)
10306                 intel_encoders_update_prepare(state);
10307
10308         intel_dbuf_pre_plane_update(state);
10309
10310         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10311                 if (new_crtc_state->uapi.async_flip)
10312                         intel_crtc_enable_flip_done(state, crtc);
10313         }
10314
10315         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10316         dev_priv->display.commit_modeset_enables(state);
10317
10318         if (state->modeset) {
10319                 intel_encoders_update_complete(state);
10320
10321                 intel_set_cdclk_post_plane_update(state);
10322         }
10323
10324         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10325          * already, but still need the state for the delayed optimization. To
10326          * fix this:
10327          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10328          * - schedule that vblank worker _before_ calling hw_done
10329          * - at the start of commit_tail, cancel it _synchrously
10330          * - switch over to the vblank wait helper in the core after that since
10331          *   we don't need out special handling any more.
10332          */
10333         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10334
10335         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10336                 if (new_crtc_state->uapi.async_flip)
10337                         intel_crtc_disable_flip_done(state, crtc);
10338
10339                 if (new_crtc_state->hw.active &&
10340                     !intel_crtc_needs_modeset(new_crtc_state) &&
10341                     !new_crtc_state->preload_luts &&
10342                     (new_crtc_state->uapi.color_mgmt_changed ||
10343                      new_crtc_state->update_pipe))
10344                         intel_color_load_luts(new_crtc_state);
10345         }
10346
10347         /*
10348          * Now that the vblank has passed, we can go ahead and program the
10349          * optimal watermarks on platforms that need two-step watermark
10350          * programming.
10351          *
10352          * TODO: Move this (and other cleanup) to an async worker eventually.
10353          */
10354         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10355                                             new_crtc_state, i) {
10356                 /*
10357                  * Gen2 reports pipe underruns whenever all planes are disabled.
10358                  * So re-enable underrun reporting after some planes get enabled.
10359                  *
10360                  * We do this before .optimize_watermarks() so that we have a
10361                  * chance of catching underruns with the intermediate watermarks
10362                  * vs. the new plane configuration.
10363                  */
10364                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10365                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10366
10367                 if (dev_priv->display.optimize_watermarks)
10368                         dev_priv->display.optimize_watermarks(state, crtc);
10369         }
10370
10371         intel_dbuf_post_plane_update(state);
10372
10373         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10374                 intel_post_plane_update(state, crtc);
10375
10376                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10377
10378                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10379
10380                 /*
10381                  * DSB cleanup is done in cleanup_work aligning with framebuffer
10382                  * cleanup. So copy and reset the dsb structure to sync with
10383                  * commit_done and later do dsb cleanup in cleanup_work.
10384                  */
10385                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10386         }
10387
10388         /* Underruns don't always raise interrupts, so check manually */
10389         intel_check_cpu_fifo_underruns(dev_priv);
10390         intel_check_pch_fifo_underruns(dev_priv);
10391
10392         if (state->modeset)
10393                 intel_verify_planes(state);
10394
10395         intel_sagv_post_plane_update(state);
10396
10397         drm_atomic_helper_commit_hw_done(&state->base);
10398
10399         if (state->modeset) {
10400                 /* As one of the primary mmio accessors, KMS has a high
10401                  * likelihood of triggering bugs in unclaimed access. After we
10402                  * finish modesetting, see if an error has been flagged, and if
10403                  * so enable debugging for the next modeset - and hope we catch
10404                  * the culprit.
10405                  */
10406                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10407                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10408         }
10409         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10410
10411         /*
10412          * Defer the cleanup of the old state to a separate worker to not
10413          * impede the current task (userspace for blocking modesets) that
10414          * are executed inline. For out-of-line asynchronous modesets/flips,
10415          * deferring to a new worker seems overkill, but we would place a
10416          * schedule point (cond_resched()) here anyway to keep latencies
10417          * down.
10418          */
10419         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10420         queue_work(system_highpri_wq, &state->base.commit_work);
10421 }
10422
10423 static void intel_atomic_commit_work(struct work_struct *work)
10424 {
10425         struct intel_atomic_state *state =
10426                 container_of(work, struct intel_atomic_state, base.commit_work);
10427
10428         intel_atomic_commit_tail(state);
10429 }
10430
10431 static int __i915_sw_fence_call
10432 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10433                           enum i915_sw_fence_notify notify)
10434 {
10435         struct intel_atomic_state *state =
10436                 container_of(fence, struct intel_atomic_state, commit_ready);
10437
10438         switch (notify) {
10439         case FENCE_COMPLETE:
10440                 /* we do blocking waits in the worker, nothing to do here */
10441                 break;
10442         case FENCE_FREE:
10443                 {
10444                         struct intel_atomic_helper *helper =
10445                                 &to_i915(state->base.dev)->atomic_helper;
10446
10447                         if (llist_add(&state->freed, &helper->free_list))
10448                                 schedule_work(&helper->free_work);
10449                         break;
10450                 }
10451         }
10452
10453         return NOTIFY_DONE;
10454 }
10455
10456 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10457 {
10458         struct intel_plane_state *old_plane_state, *new_plane_state;
10459         struct intel_plane *plane;
10460         int i;
10461
10462         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10463                                              new_plane_state, i)
10464                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10465                                         to_intel_frontbuffer(new_plane_state->hw.fb),
10466                                         plane->frontbuffer_bit);
10467 }
10468
10469 static int intel_atomic_commit(struct drm_device *dev,
10470                                struct drm_atomic_state *_state,
10471                                bool nonblock)
10472 {
10473         struct intel_atomic_state *state = to_intel_atomic_state(_state);
10474         struct drm_i915_private *dev_priv = to_i915(dev);
10475         int ret = 0;
10476
10477         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10478
10479         drm_atomic_state_get(&state->base);
10480         i915_sw_fence_init(&state->commit_ready,
10481                            intel_atomic_commit_ready);
10482
10483         /*
10484          * The intel_legacy_cursor_update() fast path takes care
10485          * of avoiding the vblank waits for simple cursor
10486          * movement and flips. For cursor on/off and size changes,
10487          * we want to perform the vblank waits so that watermark
10488          * updates happen during the correct frames. Gen9+ have
10489          * double buffered watermarks and so shouldn't need this.
10490          *
10491          * Unset state->legacy_cursor_update before the call to
10492          * drm_atomic_helper_setup_commit() because otherwise
10493          * drm_atomic_helper_wait_for_flip_done() is a noop and
10494          * we get FIFO underruns because we didn't wait
10495          * for vblank.
10496          *
10497          * FIXME doing watermarks and fb cleanup from a vblank worker
10498          * (assuming we had any) would solve these problems.
10499          */
10500         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10501                 struct intel_crtc_state *new_crtc_state;
10502                 struct intel_crtc *crtc;
10503                 int i;
10504
10505                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10506                         if (new_crtc_state->wm.need_postvbl_update ||
10507                             new_crtc_state->update_wm_post)
10508                                 state->base.legacy_cursor_update = false;
10509         }
10510
10511         ret = intel_atomic_prepare_commit(state);
10512         if (ret) {
10513                 drm_dbg_atomic(&dev_priv->drm,
10514                                "Preparing state failed with %i\n", ret);
10515                 i915_sw_fence_commit(&state->commit_ready);
10516                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10517                 return ret;
10518         }
10519
10520         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10521         if (!ret)
10522                 ret = drm_atomic_helper_swap_state(&state->base, true);
10523         if (!ret)
10524                 intel_atomic_swap_global_state(state);
10525
10526         if (ret) {
10527                 struct intel_crtc_state *new_crtc_state;
10528                 struct intel_crtc *crtc;
10529                 int i;
10530
10531                 i915_sw_fence_commit(&state->commit_ready);
10532
10533                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10534                         intel_dsb_cleanup(new_crtc_state);
10535
10536                 drm_atomic_helper_cleanup_planes(dev, &state->base);
10537                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10538                 return ret;
10539         }
10540         intel_shared_dpll_swap_state(state);
10541         intel_atomic_track_fbs(state);
10542
10543         drm_atomic_state_get(&state->base);
10544         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10545
10546         i915_sw_fence_commit(&state->commit_ready);
10547         if (nonblock && state->modeset) {
10548                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10549         } else if (nonblock) {
10550                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
10551         } else {
10552                 if (state->modeset)
10553                         flush_workqueue(dev_priv->modeset_wq);
10554                 intel_atomic_commit_tail(state);
10555         }
10556
10557         return 0;
10558 }
10559
10560 struct wait_rps_boost {
10561         struct wait_queue_entry wait;
10562
10563         struct drm_crtc *crtc;
10564         struct i915_request *request;
10565 };
10566
10567 static int do_rps_boost(struct wait_queue_entry *_wait,
10568                         unsigned mode, int sync, void *key)
10569 {
10570         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10571         struct i915_request *rq = wait->request;
10572
10573         /*
10574          * If we missed the vblank, but the request is already running it
10575          * is reasonable to assume that it will complete before the next
10576          * vblank without our intervention, so leave RPS alone.
10577          */
10578         if (!i915_request_started(rq))
10579                 intel_rps_boost(rq);
10580         i915_request_put(rq);
10581
10582         drm_crtc_vblank_put(wait->crtc);
10583
10584         list_del(&wait->wait.entry);
10585         kfree(wait);
10586         return 1;
10587 }
10588
10589 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10590                                        struct dma_fence *fence)
10591 {
10592         struct wait_rps_boost *wait;
10593
10594         if (!dma_fence_is_i915(fence))
10595                 return;
10596
10597         if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10598                 return;
10599
10600         if (drm_crtc_vblank_get(crtc))
10601                 return;
10602
10603         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10604         if (!wait) {
10605                 drm_crtc_vblank_put(crtc);
10606                 return;
10607         }
10608
10609         wait->request = to_request(dma_fence_get(fence));
10610         wait->crtc = crtc;
10611
10612         wait->wait.func = do_rps_boost;
10613         wait->wait.flags = 0;
10614
10615         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10616 }
10617
10618 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10619 {
10620         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10621         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10622         struct drm_framebuffer *fb = plane_state->hw.fb;
10623         struct i915_vma *vma;
10624         bool phys_cursor =
10625                 plane->id == PLANE_CURSOR &&
10626                 INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10627
10628         if (!intel_fb_uses_dpt(fb)) {
10629                 vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10630                                                  &plane_state->view.gtt,
10631                                                  intel_plane_uses_fence(plane_state),
10632                                                  &plane_state->flags);
10633                 if (IS_ERR(vma))
10634                         return PTR_ERR(vma);
10635
10636                 plane_state->ggtt_vma = vma;
10637         } else {
10638                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10639
10640                 vma = intel_dpt_pin(intel_fb->dpt_vm);
10641                 if (IS_ERR(vma))
10642                         return PTR_ERR(vma);
10643
10644                 plane_state->ggtt_vma = vma;
10645
10646                 vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10647                                            &plane_state->flags, intel_fb->dpt_vm);
10648                 if (IS_ERR(vma)) {
10649                         intel_dpt_unpin(intel_fb->dpt_vm);
10650                         plane_state->ggtt_vma = NULL;
10651                         return PTR_ERR(vma);
10652                 }
10653
10654                 plane_state->dpt_vma = vma;
10655
10656                 WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10657         }
10658
10659         return 0;
10660 }
10661
10662 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10663 {
10664         struct drm_framebuffer *fb = old_plane_state->hw.fb;
10665         struct i915_vma *vma;
10666
10667         if (!intel_fb_uses_dpt(fb)) {
10668                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10669                 if (vma)
10670                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10671         } else {
10672                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10673
10674                 vma = fetch_and_zero(&old_plane_state->dpt_vma);
10675                 if (vma)
10676                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10677
10678                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10679                 if (vma)
10680                         intel_dpt_unpin(intel_fb->dpt_vm);
10681         }
10682 }
10683
10684 /**
10685  * intel_prepare_plane_fb - Prepare fb for usage on plane
10686  * @_plane: drm plane to prepare for
10687  * @_new_plane_state: the plane state being prepared
10688  *
10689  * Prepares a framebuffer for usage on a display plane.  Generally this
10690  * involves pinning the underlying object and updating the frontbuffer tracking
10691  * bits.  Some older platforms need special physical address handling for
10692  * cursor planes.
10693  *
10694  * Returns 0 on success, negative error code on failure.
10695  */
10696 int
10697 intel_prepare_plane_fb(struct drm_plane *_plane,
10698                        struct drm_plane_state *_new_plane_state)
10699 {
10700         struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10701         struct intel_plane *plane = to_intel_plane(_plane);
10702         struct intel_plane_state *new_plane_state =
10703                 to_intel_plane_state(_new_plane_state);
10704         struct intel_atomic_state *state =
10705                 to_intel_atomic_state(new_plane_state->uapi.state);
10706         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10707         const struct intel_plane_state *old_plane_state =
10708                 intel_atomic_get_old_plane_state(state, plane);
10709         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10710         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10711         int ret;
10712
10713         if (old_obj) {
10714                 const struct intel_crtc_state *crtc_state =
10715                         intel_atomic_get_new_crtc_state(state,
10716                                                         to_intel_crtc(old_plane_state->hw.crtc));
10717
10718                 /* Big Hammer, we also need to ensure that any pending
10719                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10720                  * current scanout is retired before unpinning the old
10721                  * framebuffer. Note that we rely on userspace rendering
10722                  * into the buffer attached to the pipe they are waiting
10723                  * on. If not, userspace generates a GPU hang with IPEHR
10724                  * point to the MI_WAIT_FOR_EVENT.
10725                  *
10726                  * This should only fail upon a hung GPU, in which case we
10727                  * can safely continue.
10728                  */
10729                 if (intel_crtc_needs_modeset(crtc_state)) {
10730                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
10731                                                               old_obj->base.resv, NULL,
10732                                                               false, 0,
10733                                                               GFP_KERNEL);
10734                         if (ret < 0)
10735                                 return ret;
10736                 }
10737         }
10738
10739         if (new_plane_state->uapi.fence) { /* explicit fencing */
10740                 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10741                                              &attr);
10742                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10743                                                     new_plane_state->uapi.fence,
10744                                                     i915_fence_timeout(dev_priv),
10745                                                     GFP_KERNEL);
10746                 if (ret < 0)
10747                         return ret;
10748         }
10749
10750         if (!obj)
10751                 return 0;
10752
10753
10754         ret = intel_plane_pin_fb(new_plane_state);
10755         if (ret)
10756                 return ret;
10757
10758         i915_gem_object_wait_priority(obj, 0, &attr);
10759         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
10760
10761         if (!new_plane_state->uapi.fence) { /* implicit fencing */
10762                 struct dma_fence *fence;
10763
10764                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
10765                                                       obj->base.resv, NULL,
10766                                                       false,
10767                                                       i915_fence_timeout(dev_priv),
10768                                                       GFP_KERNEL);
10769                 if (ret < 0)
10770                         goto unpin_fb;
10771
10772                 fence = dma_resv_get_excl_unlocked(obj->base.resv);
10773                 if (fence) {
10774                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10775                                                    fence);
10776                         dma_fence_put(fence);
10777                 }
10778         } else {
10779                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10780                                            new_plane_state->uapi.fence);
10781         }
10782
10783         /*
10784          * We declare pageflips to be interactive and so merit a small bias
10785          * towards upclocking to deliver the frame on time. By only changing
10786          * the RPS thresholds to sample more regularly and aim for higher
10787          * clocks we can hopefully deliver low power workloads (like kodi)
10788          * that are not quite steady state without resorting to forcing
10789          * maximum clocks following a vblank miss (see do_rps_boost()).
10790          */
10791         if (!state->rps_interactive) {
10792                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10793                 state->rps_interactive = true;
10794         }
10795
10796         return 0;
10797
10798 unpin_fb:
10799         intel_plane_unpin_fb(new_plane_state);
10800
10801         return ret;
10802 }
10803
10804 /**
10805  * intel_cleanup_plane_fb - Cleans up an fb after plane use
10806  * @plane: drm plane to clean up for
10807  * @_old_plane_state: the state from the previous modeset
10808  *
10809  * Cleans up a framebuffer that has just been removed from a plane.
10810  */
10811 void
10812 intel_cleanup_plane_fb(struct drm_plane *plane,
10813                        struct drm_plane_state *_old_plane_state)
10814 {
10815         struct intel_plane_state *old_plane_state =
10816                 to_intel_plane_state(_old_plane_state);
10817         struct intel_atomic_state *state =
10818                 to_intel_atomic_state(old_plane_state->uapi.state);
10819         struct drm_i915_private *dev_priv = to_i915(plane->dev);
10820         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10821
10822         if (!obj)
10823                 return;
10824
10825         if (state->rps_interactive) {
10826                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10827                 state->rps_interactive = false;
10828         }
10829
10830         /* Should only be called after a successful intel_prepare_plane_fb()! */
10831         intel_plane_unpin_fb(old_plane_state);
10832 }
10833
10834 /**
10835  * intel_plane_destroy - destroy a plane
10836  * @plane: plane to destroy
10837  *
10838  * Common destruction function for all types of planes (primary, cursor,
10839  * sprite).
10840  */
10841 void intel_plane_destroy(struct drm_plane *plane)
10842 {
10843         drm_plane_cleanup(plane);
10844         kfree(to_intel_plane(plane));
10845 }
10846
10847 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10848 {
10849         struct intel_plane *plane;
10850
10851         for_each_intel_plane(&dev_priv->drm, plane) {
10852                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10853                                                                   plane->pipe);
10854
10855                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10856         }
10857 }
10858
10859
10860 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10861                                       struct drm_file *file)
10862 {
10863         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10864         struct drm_crtc *drmmode_crtc;
10865         struct intel_crtc *crtc;
10866
10867         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10868         if (!drmmode_crtc)
10869                 return -ENOENT;
10870
10871         crtc = to_intel_crtc(drmmode_crtc);
10872         pipe_from_crtc_id->pipe = crtc->pipe;
10873
10874         return 0;
10875 }
10876
10877 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10878 {
10879         struct drm_device *dev = encoder->base.dev;
10880         struct intel_encoder *source_encoder;
10881         u32 possible_clones = 0;
10882
10883         for_each_intel_encoder(dev, source_encoder) {
10884                 if (encoders_cloneable(encoder, source_encoder))
10885                         possible_clones |= drm_encoder_mask(&source_encoder->base);
10886         }
10887
10888         return possible_clones;
10889 }
10890
10891 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10892 {
10893         struct drm_device *dev = encoder->base.dev;
10894         struct intel_crtc *crtc;
10895         u32 possible_crtcs = 0;
10896
10897         for_each_intel_crtc(dev, crtc) {
10898                 if (encoder->pipe_mask & BIT(crtc->pipe))
10899                         possible_crtcs |= drm_crtc_mask(&crtc->base);
10900         }
10901
10902         return possible_crtcs;
10903 }
10904
10905 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10906 {
10907         if (!IS_MOBILE(dev_priv))
10908                 return false;
10909
10910         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10911                 return false;
10912
10913         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10914                 return false;
10915
10916         return true;
10917 }
10918
10919 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10920 {
10921         if (DISPLAY_VER(dev_priv) >= 9)
10922                 return false;
10923
10924         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10925                 return false;
10926
10927         if (HAS_PCH_LPT_H(dev_priv) &&
10928             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10929                 return false;
10930
10931         /* DDI E can't be used if DDI A requires 4 lanes */
10932         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10933                 return false;
10934
10935         if (!dev_priv->vbt.int_crt_support)
10936                 return false;
10937
10938         return true;
10939 }
10940
10941 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10942 {
10943         struct intel_encoder *encoder;
10944         bool dpd_is_edp = false;
10945
10946         intel_pps_unlock_regs_wa(dev_priv);
10947
10948         if (!HAS_DISPLAY(dev_priv))
10949                 return;
10950
10951         if (IS_DG2(dev_priv)) {
10952                 intel_ddi_init(dev_priv, PORT_A);
10953                 intel_ddi_init(dev_priv, PORT_B);
10954                 intel_ddi_init(dev_priv, PORT_C);
10955                 intel_ddi_init(dev_priv, PORT_D_XELPD);
10956         } else if (IS_ALDERLAKE_P(dev_priv)) {
10957                 intel_ddi_init(dev_priv, PORT_A);
10958                 intel_ddi_init(dev_priv, PORT_B);
10959                 intel_ddi_init(dev_priv, PORT_TC1);
10960                 intel_ddi_init(dev_priv, PORT_TC2);
10961                 intel_ddi_init(dev_priv, PORT_TC3);
10962                 intel_ddi_init(dev_priv, PORT_TC4);
10963         } else if (IS_ALDERLAKE_S(dev_priv)) {
10964                 intel_ddi_init(dev_priv, PORT_A);
10965                 intel_ddi_init(dev_priv, PORT_TC1);
10966                 intel_ddi_init(dev_priv, PORT_TC2);
10967                 intel_ddi_init(dev_priv, PORT_TC3);
10968                 intel_ddi_init(dev_priv, PORT_TC4);
10969         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
10970                 intel_ddi_init(dev_priv, PORT_A);
10971                 intel_ddi_init(dev_priv, PORT_B);
10972                 intel_ddi_init(dev_priv, PORT_TC1);
10973                 intel_ddi_init(dev_priv, PORT_TC2);
10974         } else if (DISPLAY_VER(dev_priv) >= 12) {
10975                 intel_ddi_init(dev_priv, PORT_A);
10976                 intel_ddi_init(dev_priv, PORT_B);
10977                 intel_ddi_init(dev_priv, PORT_TC1);
10978                 intel_ddi_init(dev_priv, PORT_TC2);
10979                 intel_ddi_init(dev_priv, PORT_TC3);
10980                 intel_ddi_init(dev_priv, PORT_TC4);
10981                 intel_ddi_init(dev_priv, PORT_TC5);
10982                 intel_ddi_init(dev_priv, PORT_TC6);
10983                 icl_dsi_init(dev_priv);
10984         } else if (IS_JSL_EHL(dev_priv)) {
10985                 intel_ddi_init(dev_priv, PORT_A);
10986                 intel_ddi_init(dev_priv, PORT_B);
10987                 intel_ddi_init(dev_priv, PORT_C);
10988                 intel_ddi_init(dev_priv, PORT_D);
10989                 icl_dsi_init(dev_priv);
10990         } else if (DISPLAY_VER(dev_priv) == 11) {
10991                 intel_ddi_init(dev_priv, PORT_A);
10992                 intel_ddi_init(dev_priv, PORT_B);
10993                 intel_ddi_init(dev_priv, PORT_C);
10994                 intel_ddi_init(dev_priv, PORT_D);
10995                 intel_ddi_init(dev_priv, PORT_E);
10996                 intel_ddi_init(dev_priv, PORT_F);
10997                 icl_dsi_init(dev_priv);
10998         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
10999                 intel_ddi_init(dev_priv, PORT_A);
11000                 intel_ddi_init(dev_priv, PORT_B);
11001                 intel_ddi_init(dev_priv, PORT_C);
11002                 vlv_dsi_init(dev_priv);
11003         } else if (DISPLAY_VER(dev_priv) >= 9) {
11004                 intel_ddi_init(dev_priv, PORT_A);
11005                 intel_ddi_init(dev_priv, PORT_B);
11006                 intel_ddi_init(dev_priv, PORT_C);
11007                 intel_ddi_init(dev_priv, PORT_D);
11008                 intel_ddi_init(dev_priv, PORT_E);
11009         } else if (HAS_DDI(dev_priv)) {
11010                 u32 found;
11011
11012                 if (intel_ddi_crt_present(dev_priv))
11013                         intel_crt_init(dev_priv);
11014
11015                 /* Haswell uses DDI functions to detect digital outputs. */
11016                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11017                 if (found)
11018                         intel_ddi_init(dev_priv, PORT_A);
11019
11020                 found = intel_de_read(dev_priv, SFUSE_STRAP);
11021                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11022                         intel_ddi_init(dev_priv, PORT_B);
11023                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11024                         intel_ddi_init(dev_priv, PORT_C);
11025                 if (found & SFUSE_STRAP_DDID_DETECTED)
11026                         intel_ddi_init(dev_priv, PORT_D);
11027                 if (found & SFUSE_STRAP_DDIF_DETECTED)
11028                         intel_ddi_init(dev_priv, PORT_F);
11029         } else if (HAS_PCH_SPLIT(dev_priv)) {
11030                 int found;
11031
11032                 /*
11033                  * intel_edp_init_connector() depends on this completing first,
11034                  * to prevent the registration of both eDP and LVDS and the
11035                  * incorrect sharing of the PPS.
11036                  */
11037                 intel_lvds_init(dev_priv);
11038                 intel_crt_init(dev_priv);
11039
11040                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11041
11042                 if (ilk_has_edp_a(dev_priv))
11043                         g4x_dp_init(dev_priv, DP_A, PORT_A);
11044
11045                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11046                         /* PCH SDVOB multiplex with HDMIB */
11047                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11048                         if (!found)
11049                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11050                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11051                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11052                 }
11053
11054                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11055                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11056
11057                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11058                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11059
11060                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11061                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11062
11063                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11064                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11065         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11066                 bool has_edp, has_port;
11067
11068                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11069                         intel_crt_init(dev_priv);
11070
11071                 /*
11072                  * The DP_DETECTED bit is the latched state of the DDC
11073                  * SDA pin at boot. However since eDP doesn't require DDC
11074                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
11075                  * eDP ports may have been muxed to an alternate function.
11076                  * Thus we can't rely on the DP_DETECTED bit alone to detect
11077                  * eDP ports. Consult the VBT as well as DP_DETECTED to
11078                  * detect eDP ports.
11079                  *
11080                  * Sadly the straps seem to be missing sometimes even for HDMI
11081                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11082                  * and VBT for the presence of the port. Additionally we can't
11083                  * trust the port type the VBT declares as we've seen at least
11084                  * HDMI ports that the VBT claim are DP or eDP.
11085                  */
11086                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11087                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11088                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11089                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11090                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11091                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11092
11093                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11094                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11095                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11096                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11097                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11098                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11099
11100                 if (IS_CHERRYVIEW(dev_priv)) {
11101                         /*
11102                          * eDP not supported on port D,
11103                          * so no need to worry about it
11104                          */
11105                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11106                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11107                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11108                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11109                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11110                 }
11111
11112                 vlv_dsi_init(dev_priv);
11113         } else if (IS_PINEVIEW(dev_priv)) {
11114                 intel_lvds_init(dev_priv);
11115                 intel_crt_init(dev_priv);
11116         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11117                 bool found = false;
11118
11119                 if (IS_MOBILE(dev_priv))
11120                         intel_lvds_init(dev_priv);
11121
11122                 intel_crt_init(dev_priv);
11123
11124                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11125                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11126                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11127                         if (!found && IS_G4X(dev_priv)) {
11128                                 drm_dbg_kms(&dev_priv->drm,
11129                                             "probing HDMI on SDVOB\n");
11130                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11131                         }
11132
11133                         if (!found && IS_G4X(dev_priv))
11134                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
11135                 }
11136
11137                 /* Before G4X SDVOC doesn't have its own detect register */
11138
11139                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11140                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11141                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11142                 }
11143
11144                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11145
11146                         if (IS_G4X(dev_priv)) {
11147                                 drm_dbg_kms(&dev_priv->drm,
11148                                             "probing HDMI on SDVOC\n");
11149                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11150                         }
11151                         if (IS_G4X(dev_priv))
11152                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
11153                 }
11154
11155                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11156                         g4x_dp_init(dev_priv, DP_D, PORT_D);
11157
11158                 if (SUPPORTS_TV(dev_priv))
11159                         intel_tv_init(dev_priv);
11160         } else if (DISPLAY_VER(dev_priv) == 2) {
11161                 if (IS_I85X(dev_priv))
11162                         intel_lvds_init(dev_priv);
11163
11164                 intel_crt_init(dev_priv);
11165                 intel_dvo_init(dev_priv);
11166         }
11167
11168         for_each_intel_encoder(&dev_priv->drm, encoder) {
11169                 encoder->base.possible_crtcs =
11170                         intel_encoder_possible_crtcs(encoder);
11171                 encoder->base.possible_clones =
11172                         intel_encoder_possible_clones(encoder);
11173         }
11174
11175         intel_init_pch_refclk(dev_priv);
11176
11177         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11178 }
11179
11180 static enum drm_mode_status
11181 intel_mode_valid(struct drm_device *dev,
11182                  const struct drm_display_mode *mode)
11183 {
11184         struct drm_i915_private *dev_priv = to_i915(dev);
11185         int hdisplay_max, htotal_max;
11186         int vdisplay_max, vtotal_max;
11187
11188         /*
11189          * Can't reject DBLSCAN here because Xorg ddxen can add piles
11190          * of DBLSCAN modes to the output's mode list when they detect
11191          * the scaling mode property on the connector. And they don't
11192          * ask the kernel to validate those modes in any way until
11193          * modeset time at which point the client gets a protocol error.
11194          * So in order to not upset those clients we silently ignore the
11195          * DBLSCAN flag on such connectors. For other connectors we will
11196          * reject modes with the DBLSCAN flag in encoder->compute_config().
11197          * And we always reject DBLSCAN modes in connector->mode_valid()
11198          * as we never want such modes on the connector's mode list.
11199          */
11200
11201         if (mode->vscan > 1)
11202                 return MODE_NO_VSCAN;
11203
11204         if (mode->flags & DRM_MODE_FLAG_HSKEW)
11205                 return MODE_H_ILLEGAL;
11206
11207         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11208                            DRM_MODE_FLAG_NCSYNC |
11209                            DRM_MODE_FLAG_PCSYNC))
11210                 return MODE_HSYNC;
11211
11212         if (mode->flags & (DRM_MODE_FLAG_BCAST |
11213                            DRM_MODE_FLAG_PIXMUX |
11214                            DRM_MODE_FLAG_CLKDIV2))
11215                 return MODE_BAD;
11216
11217         /* Transcoder timing limits */
11218         if (DISPLAY_VER(dev_priv) >= 11) {
11219                 hdisplay_max = 16384;
11220                 vdisplay_max = 8192;
11221                 htotal_max = 16384;
11222                 vtotal_max = 8192;
11223         } else if (DISPLAY_VER(dev_priv) >= 9 ||
11224                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11225                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11226                 vdisplay_max = 4096;
11227                 htotal_max = 8192;
11228                 vtotal_max = 8192;
11229         } else if (DISPLAY_VER(dev_priv) >= 3) {
11230                 hdisplay_max = 4096;
11231                 vdisplay_max = 4096;
11232                 htotal_max = 8192;
11233                 vtotal_max = 8192;
11234         } else {
11235                 hdisplay_max = 2048;
11236                 vdisplay_max = 2048;
11237                 htotal_max = 4096;
11238                 vtotal_max = 4096;
11239         }
11240
11241         if (mode->hdisplay > hdisplay_max ||
11242             mode->hsync_start > htotal_max ||
11243             mode->hsync_end > htotal_max ||
11244             mode->htotal > htotal_max)
11245                 return MODE_H_ILLEGAL;
11246
11247         if (mode->vdisplay > vdisplay_max ||
11248             mode->vsync_start > vtotal_max ||
11249             mode->vsync_end > vtotal_max ||
11250             mode->vtotal > vtotal_max)
11251                 return MODE_V_ILLEGAL;
11252
11253         if (DISPLAY_VER(dev_priv) >= 5) {
11254                 if (mode->hdisplay < 64 ||
11255                     mode->htotal - mode->hdisplay < 32)
11256                         return MODE_H_ILLEGAL;
11257
11258                 if (mode->vtotal - mode->vdisplay < 5)
11259                         return MODE_V_ILLEGAL;
11260         } else {
11261                 if (mode->htotal - mode->hdisplay < 32)
11262                         return MODE_H_ILLEGAL;
11263
11264                 if (mode->vtotal - mode->vdisplay < 3)
11265                         return MODE_V_ILLEGAL;
11266         }
11267
11268         return MODE_OK;
11269 }
11270
11271 enum drm_mode_status
11272 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11273                                 const struct drm_display_mode *mode,
11274                                 bool bigjoiner)
11275 {
11276         int plane_width_max, plane_height_max;
11277
11278         /*
11279          * intel_mode_valid() should be
11280          * sufficient on older platforms.
11281          */
11282         if (DISPLAY_VER(dev_priv) < 9)
11283                 return MODE_OK;
11284
11285         /*
11286          * Most people will probably want a fullscreen
11287          * plane so let's not advertize modes that are
11288          * too big for that.
11289          */
11290         if (DISPLAY_VER(dev_priv) >= 11) {
11291                 plane_width_max = 5120 << bigjoiner;
11292                 plane_height_max = 4320;
11293         } else {
11294                 plane_width_max = 5120;
11295                 plane_height_max = 4096;
11296         }
11297
11298         if (mode->hdisplay > plane_width_max)
11299                 return MODE_H_ILLEGAL;
11300
11301         if (mode->vdisplay > plane_height_max)
11302                 return MODE_V_ILLEGAL;
11303
11304         return MODE_OK;
11305 }
11306
11307 static const struct drm_mode_config_funcs intel_mode_funcs = {
11308         .fb_create = intel_user_framebuffer_create,
11309         .get_format_info = intel_get_format_info,
11310         .output_poll_changed = intel_fbdev_output_poll_changed,
11311         .mode_valid = intel_mode_valid,
11312         .atomic_check = intel_atomic_check,
11313         .atomic_commit = intel_atomic_commit,
11314         .atomic_state_alloc = intel_atomic_state_alloc,
11315         .atomic_state_clear = intel_atomic_state_clear,
11316         .atomic_state_free = intel_atomic_state_free,
11317 };
11318
11319 /**
11320  * intel_init_display_hooks - initialize the display modesetting hooks
11321  * @dev_priv: device private
11322  */
11323 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11324 {
11325         if (!HAS_DISPLAY(dev_priv))
11326                 return;
11327
11328         intel_init_cdclk_hooks(dev_priv);
11329         intel_init_audio_hooks(dev_priv);
11330
11331         intel_dpll_init_clock_hook(dev_priv);
11332
11333         if (DISPLAY_VER(dev_priv) >= 9) {
11334                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11335                 dev_priv->display.crtc_enable = hsw_crtc_enable;
11336                 dev_priv->display.crtc_disable = hsw_crtc_disable;
11337         } else if (HAS_DDI(dev_priv)) {
11338                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11339                 dev_priv->display.crtc_enable = hsw_crtc_enable;
11340                 dev_priv->display.crtc_disable = hsw_crtc_disable;
11341         } else if (HAS_PCH_SPLIT(dev_priv)) {
11342                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11343                 dev_priv->display.crtc_enable = ilk_crtc_enable;
11344                 dev_priv->display.crtc_disable = ilk_crtc_disable;
11345         } else if (IS_CHERRYVIEW(dev_priv) ||
11346                    IS_VALLEYVIEW(dev_priv)) {
11347                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11348                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
11349                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11350         } else {
11351                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11352                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
11353                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11354         }
11355
11356         intel_fdi_init_hook(dev_priv);
11357
11358         if (DISPLAY_VER(dev_priv) >= 9) {
11359                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11360                 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11361         } else {
11362                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11363                 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11364         }
11365
11366 }
11367
11368 void intel_modeset_init_hw(struct drm_i915_private *i915)
11369 {
11370         struct intel_cdclk_state *cdclk_state;
11371
11372         if (!HAS_DISPLAY(i915))
11373                 return;
11374
11375         cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11376
11377         intel_update_cdclk(i915);
11378         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11379         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11380 }
11381
11382 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11383 {
11384         struct drm_plane *plane;
11385         struct intel_crtc *crtc;
11386
11387         for_each_intel_crtc(state->dev, crtc) {
11388                 struct intel_crtc_state *crtc_state;
11389
11390                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
11391                 if (IS_ERR(crtc_state))
11392                         return PTR_ERR(crtc_state);
11393
11394                 if (crtc_state->hw.active) {
11395                         /*
11396                          * Preserve the inherited flag to avoid
11397                          * taking the full modeset path.
11398                          */
11399                         crtc_state->inherited = true;
11400                 }
11401         }
11402
11403         drm_for_each_plane(plane, state->dev) {
11404                 struct drm_plane_state *plane_state;
11405
11406                 plane_state = drm_atomic_get_plane_state(state, plane);
11407                 if (IS_ERR(plane_state))
11408                         return PTR_ERR(plane_state);
11409         }
11410
11411         return 0;
11412 }
11413
11414 /*
11415  * Calculate what we think the watermarks should be for the state we've read
11416  * out of the hardware and then immediately program those watermarks so that
11417  * we ensure the hardware settings match our internal state.
11418  *
11419  * We can calculate what we think WM's should be by creating a duplicate of the
11420  * current state (which was constructed during hardware readout) and running it
11421  * through the atomic check code to calculate new watermark values in the
11422  * state object.
11423  */
11424 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11425 {
11426         struct drm_atomic_state *state;
11427         struct intel_atomic_state *intel_state;
11428         struct intel_crtc *crtc;
11429         struct intel_crtc_state *crtc_state;
11430         struct drm_modeset_acquire_ctx ctx;
11431         int ret;
11432         int i;
11433
11434         /* Only supported on platforms that use atomic watermark design */
11435         if (!dev_priv->display.optimize_watermarks)
11436                 return;
11437
11438         state = drm_atomic_state_alloc(&dev_priv->drm);
11439         if (drm_WARN_ON(&dev_priv->drm, !state))
11440                 return;
11441
11442         intel_state = to_intel_atomic_state(state);
11443
11444         drm_modeset_acquire_init(&ctx, 0);
11445
11446 retry:
11447         state->acquire_ctx = &ctx;
11448
11449         /*
11450          * Hardware readout is the only time we don't want to calculate
11451          * intermediate watermarks (since we don't trust the current
11452          * watermarks).
11453          */
11454         if (!HAS_GMCH(dev_priv))
11455                 intel_state->skip_intermediate_wm = true;
11456
11457         ret = sanitize_watermarks_add_affected(state);
11458         if (ret)
11459                 goto fail;
11460
11461         ret = intel_atomic_check(&dev_priv->drm, state);
11462         if (ret)
11463                 goto fail;
11464
11465         /* Write calculated watermark values back */
11466         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11467                 crtc_state->wm.need_postvbl_update = true;
11468                 dev_priv->display.optimize_watermarks(intel_state, crtc);
11469
11470                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11471         }
11472
11473 fail:
11474         if (ret == -EDEADLK) {
11475                 drm_atomic_state_clear(state);
11476                 drm_modeset_backoff(&ctx);
11477                 goto retry;
11478         }
11479
11480         /*
11481          * If we fail here, it means that the hardware appears to be
11482          * programmed in a way that shouldn't be possible, given our
11483          * understanding of watermark requirements.  This might mean a
11484          * mistake in the hardware readout code or a mistake in the
11485          * watermark calculations for a given platform.  Raise a WARN
11486          * so that this is noticeable.
11487          *
11488          * If this actually happens, we'll have to just leave the
11489          * BIOS-programmed watermarks untouched and hope for the best.
11490          */
11491         drm_WARN(&dev_priv->drm, ret,
11492                  "Could not determine valid watermarks for inherited state\n");
11493
11494         drm_atomic_state_put(state);
11495
11496         drm_modeset_drop_locks(&ctx);
11497         drm_modeset_acquire_fini(&ctx);
11498 }
11499
11500 static int intel_initial_commit(struct drm_device *dev)
11501 {
11502         struct drm_atomic_state *state = NULL;
11503         struct drm_modeset_acquire_ctx ctx;
11504         struct intel_crtc *crtc;
11505         int ret = 0;
11506
11507         state = drm_atomic_state_alloc(dev);
11508         if (!state)
11509                 return -ENOMEM;
11510
11511         drm_modeset_acquire_init(&ctx, 0);
11512
11513 retry:
11514         state->acquire_ctx = &ctx;
11515
11516         for_each_intel_crtc(dev, crtc) {
11517                 struct intel_crtc_state *crtc_state =
11518                         intel_atomic_get_crtc_state(state, crtc);
11519
11520                 if (IS_ERR(crtc_state)) {
11521                         ret = PTR_ERR(crtc_state);
11522                         goto out;
11523                 }
11524
11525                 if (crtc_state->hw.active) {
11526                         struct intel_encoder *encoder;
11527
11528                         /*
11529                          * We've not yet detected sink capabilities
11530                          * (audio,infoframes,etc.) and thus we don't want to
11531                          * force a full state recomputation yet. We want that to
11532                          * happen only for the first real commit from userspace.
11533                          * So preserve the inherited flag for the time being.
11534                          */
11535                         crtc_state->inherited = true;
11536
11537                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
11538                         if (ret)
11539                                 goto out;
11540
11541                         /*
11542                          * FIXME hack to force a LUT update to avoid the
11543                          * plane update forcing the pipe gamma on without
11544                          * having a proper LUT loaded. Remove once we
11545                          * have readout for pipe gamma enable.
11546                          */
11547                         crtc_state->uapi.color_mgmt_changed = true;
11548
11549                         for_each_intel_encoder_mask(dev, encoder,
11550                                                     crtc_state->uapi.encoder_mask) {
11551                                 if (encoder->initial_fastset_check &&
11552                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
11553                                         ret = drm_atomic_add_affected_connectors(state,
11554                                                                                  &crtc->base);
11555                                         if (ret)
11556                                                 goto out;
11557                                 }
11558                         }
11559                 }
11560         }
11561
11562         ret = drm_atomic_commit(state);
11563
11564 out:
11565         if (ret == -EDEADLK) {
11566                 drm_atomic_state_clear(state);
11567                 drm_modeset_backoff(&ctx);
11568                 goto retry;
11569         }
11570
11571         drm_atomic_state_put(state);
11572
11573         drm_modeset_drop_locks(&ctx);
11574         drm_modeset_acquire_fini(&ctx);
11575
11576         return ret;
11577 }
11578
11579 static void intel_mode_config_init(struct drm_i915_private *i915)
11580 {
11581         struct drm_mode_config *mode_config = &i915->drm.mode_config;
11582
11583         drm_mode_config_init(&i915->drm);
11584         INIT_LIST_HEAD(&i915->global_obj_list);
11585
11586         mode_config->min_width = 0;
11587         mode_config->min_height = 0;
11588
11589         mode_config->preferred_depth = 24;
11590         mode_config->prefer_shadow = 1;
11591
11592         mode_config->funcs = &intel_mode_funcs;
11593
11594         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
11595
11596         /*
11597          * Maximum framebuffer dimensions, chosen to match
11598          * the maximum render engine surface size on gen4+.
11599          */
11600         if (DISPLAY_VER(i915) >= 7) {
11601                 mode_config->max_width = 16384;
11602                 mode_config->max_height = 16384;
11603         } else if (DISPLAY_VER(i915) >= 4) {
11604                 mode_config->max_width = 8192;
11605                 mode_config->max_height = 8192;
11606         } else if (DISPLAY_VER(i915) == 3) {
11607                 mode_config->max_width = 4096;
11608                 mode_config->max_height = 4096;
11609         } else {
11610                 mode_config->max_width = 2048;
11611                 mode_config->max_height = 2048;
11612         }
11613
11614         if (IS_I845G(i915) || IS_I865G(i915)) {
11615                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11616                 mode_config->cursor_height = 1023;
11617         } else if (IS_I830(i915) || IS_I85X(i915) ||
11618                    IS_I915G(i915) || IS_I915GM(i915)) {
11619                 mode_config->cursor_width = 64;
11620                 mode_config->cursor_height = 64;
11621         } else {
11622                 mode_config->cursor_width = 256;
11623                 mode_config->cursor_height = 256;
11624         }
11625 }
11626
11627 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11628 {
11629         intel_atomic_global_obj_cleanup(i915);
11630         drm_mode_config_cleanup(&i915->drm);
11631 }
11632
11633 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11634 {
11635         if (plane_config->fb) {
11636                 struct drm_framebuffer *fb = &plane_config->fb->base;
11637
11638                 /* We may only have the stub and not a full framebuffer */
11639                 if (drm_framebuffer_read_refcount(fb))
11640                         drm_framebuffer_put(fb);
11641                 else
11642                         kfree(fb);
11643         }
11644
11645         if (plane_config->vma)
11646                 i915_vma_put(plane_config->vma);
11647 }
11648
11649 /* part #1: call before irq install */
11650 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11651 {
11652         int ret;
11653
11654         if (i915_inject_probe_failure(i915))
11655                 return -ENODEV;
11656
11657         if (HAS_DISPLAY(i915)) {
11658                 ret = drm_vblank_init(&i915->drm,
11659                                       INTEL_NUM_PIPES(i915));
11660                 if (ret)
11661                         return ret;
11662         }
11663
11664         intel_bios_init(i915);
11665
11666         ret = intel_vga_register(i915);
11667         if (ret)
11668                 goto cleanup_bios;
11669
11670         /* FIXME: completely on the wrong abstraction layer */
11671         intel_power_domains_init_hw(i915, false);
11672
11673         if (!HAS_DISPLAY(i915))
11674                 return 0;
11675
11676         intel_dmc_ucode_init(i915);
11677
11678         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11679         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11680                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11681
11682         i915->framestart_delay = 1; /* 1-4 */
11683
11684         i915->window2_delay = 0; /* No DSB so no window2 delay */
11685
11686         intel_mode_config_init(i915);
11687
11688         ret = intel_cdclk_init(i915);
11689         if (ret)
11690                 goto cleanup_vga_client_pw_domain_dmc;
11691
11692         ret = intel_dbuf_init(i915);
11693         if (ret)
11694                 goto cleanup_vga_client_pw_domain_dmc;
11695
11696         ret = intel_bw_init(i915);
11697         if (ret)
11698                 goto cleanup_vga_client_pw_domain_dmc;
11699
11700         init_llist_head(&i915->atomic_helper.free_list);
11701         INIT_WORK(&i915->atomic_helper.free_work,
11702                   intel_atomic_helper_free_state_worker);
11703
11704         intel_init_quirks(i915);
11705
11706         intel_fbc_init(i915);
11707
11708         return 0;
11709
11710 cleanup_vga_client_pw_domain_dmc:
11711         intel_dmc_ucode_fini(i915);
11712         intel_power_domains_driver_remove(i915);
11713         intel_vga_unregister(i915);
11714 cleanup_bios:
11715         intel_bios_driver_remove(i915);
11716
11717         return ret;
11718 }
11719
11720 /* part #2: call after irq install, but before gem init */
11721 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11722 {
11723         struct drm_device *dev = &i915->drm;
11724         enum pipe pipe;
11725         struct intel_crtc *crtc;
11726         int ret;
11727
11728         if (!HAS_DISPLAY(i915))
11729                 return 0;
11730
11731         intel_init_pm(i915);
11732
11733         intel_panel_sanitize_ssc(i915);
11734
11735         intel_pps_setup(i915);
11736
11737         intel_gmbus_setup(i915);
11738
11739         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11740                     INTEL_NUM_PIPES(i915),
11741                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11742
11743         for_each_pipe(i915, pipe) {
11744                 ret = intel_crtc_init(i915, pipe);
11745                 if (ret) {
11746                         intel_mode_config_cleanup(i915);
11747                         return ret;
11748                 }
11749         }
11750
11751         intel_plane_possible_crtcs_init(i915);
11752         intel_shared_dpll_init(dev);
11753         intel_fdi_pll_freq_update(i915);
11754
11755         intel_update_czclk(i915);
11756         intel_modeset_init_hw(i915);
11757         intel_dpll_update_ref_clks(i915);
11758
11759         intel_hdcp_component_init(i915);
11760
11761         if (i915->max_cdclk_freq == 0)
11762                 intel_update_max_cdclk(i915);
11763
11764         /*
11765          * If the platform has HTI, we need to find out whether it has reserved
11766          * any display resources before we create our display outputs.
11767          */
11768         if (INTEL_INFO(i915)->display.has_hti)
11769                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11770
11771         /* Just disable it once at startup */
11772         intel_vga_disable(i915);
11773         intel_setup_outputs(i915);
11774
11775         drm_modeset_lock_all(dev);
11776         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11777         drm_modeset_unlock_all(dev);
11778
11779         for_each_intel_crtc(dev, crtc) {
11780                 struct intel_initial_plane_config plane_config = {};
11781
11782                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11783                         continue;
11784
11785                 /*
11786                  * Note that reserving the BIOS fb up front prevents us
11787                  * from stuffing other stolen allocations like the ring
11788                  * on top.  This prevents some ugliness at boot time, and
11789                  * can even allow for smooth boot transitions if the BIOS
11790                  * fb is large enough for the active pipe configuration.
11791                  */
11792                 i915->display.get_initial_plane_config(crtc, &plane_config);
11793
11794                 /*
11795                  * If the fb is shared between multiple heads, we'll
11796                  * just get the first one.
11797                  */
11798                 intel_find_initial_plane_obj(crtc, &plane_config);
11799
11800                 plane_config_fini(&plane_config);
11801         }
11802
11803         /*
11804          * Make sure hardware watermarks really match the state we read out.
11805          * Note that we need to do this after reconstructing the BIOS fb's
11806          * since the watermark calculation done here will use pstate->fb.
11807          */
11808         if (!HAS_GMCH(i915))
11809                 sanitize_watermarks(i915);
11810
11811         return 0;
11812 }
11813
11814 /* part #3: call after gem init */
11815 int intel_modeset_init(struct drm_i915_private *i915)
11816 {
11817         int ret;
11818
11819         if (!HAS_DISPLAY(i915))
11820                 return 0;
11821
11822         /*
11823          * Force all active planes to recompute their states. So that on
11824          * mode_setcrtc after probe, all the intel_plane_state variables
11825          * are already calculated and there is no assert_plane warnings
11826          * during bootup.
11827          */
11828         ret = intel_initial_commit(&i915->drm);
11829         if (ret)
11830                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11831
11832         intel_overlay_setup(i915);
11833
11834         ret = intel_fbdev_init(&i915->drm);
11835         if (ret)
11836                 return ret;
11837
11838         /* Only enable hotplug handling once the fbdev is fully set up. */
11839         intel_hpd_init(i915);
11840         intel_hpd_poll_disable(i915);
11841
11842         intel_init_ipc(i915);
11843
11844         return 0;
11845 }
11846
11847 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11848 {
11849         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11850         /* 640x480@60Hz, ~25175 kHz */
11851         struct dpll clock = {
11852                 .m1 = 18,
11853                 .m2 = 7,
11854                 .p1 = 13,
11855                 .p2 = 4,
11856                 .n = 2,
11857         };
11858         u32 dpll, fp;
11859         int i;
11860
11861         drm_WARN_ON(&dev_priv->drm,
11862                     i9xx_calc_dpll_params(48000, &clock) != 25154);
11863
11864         drm_dbg_kms(&dev_priv->drm,
11865                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11866                     pipe_name(pipe), clock.vco, clock.dot);
11867
11868         fp = i9xx_dpll_compute_fp(&clock);
11869         dpll = DPLL_DVO_2X_MODE |
11870                 DPLL_VGA_MODE_DIS |
11871                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11872                 PLL_P2_DIVIDE_BY_4 |
11873                 PLL_REF_INPUT_DREFCLK |
11874                 DPLL_VCO_ENABLE;
11875
11876         intel_de_write(dev_priv, FP0(pipe), fp);
11877         intel_de_write(dev_priv, FP1(pipe), fp);
11878
11879         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
11880         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
11881         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
11882         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
11883         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
11884         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
11885         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
11886
11887         /*
11888          * Apparently we need to have VGA mode enabled prior to changing
11889          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
11890          * dividers, even though the register value does change.
11891          */
11892         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
11893         intel_de_write(dev_priv, DPLL(pipe), dpll);
11894
11895         /* Wait for the clocks to stabilize. */
11896         intel_de_posting_read(dev_priv, DPLL(pipe));
11897         udelay(150);
11898
11899         /* The pixel multiplier can only be updated once the
11900          * DPLL is enabled and the clocks are stable.
11901          *
11902          * So write it again.
11903          */
11904         intel_de_write(dev_priv, DPLL(pipe), dpll);
11905
11906         /* We do this three times for luck */
11907         for (i = 0; i < 3 ; i++) {
11908                 intel_de_write(dev_priv, DPLL(pipe), dpll);
11909                 intel_de_posting_read(dev_priv, DPLL(pipe));
11910                 udelay(150); /* wait for warmup */
11911         }
11912
11913         intel_de_write(dev_priv, PIPECONF(pipe),
11914                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
11915         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11916
11917         intel_wait_for_pipe_scanline_moving(crtc);
11918 }
11919
11920 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11921 {
11922         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11923
11924         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
11925                     pipe_name(pipe));
11926
11927         drm_WARN_ON(&dev_priv->drm,
11928                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
11929                     DISPLAY_PLANE_ENABLE);
11930         drm_WARN_ON(&dev_priv->drm,
11931                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
11932                     DISPLAY_PLANE_ENABLE);
11933         drm_WARN_ON(&dev_priv->drm,
11934                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
11935                     DISPLAY_PLANE_ENABLE);
11936         drm_WARN_ON(&dev_priv->drm,
11937                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
11938         drm_WARN_ON(&dev_priv->drm,
11939                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
11940
11941         intel_de_write(dev_priv, PIPECONF(pipe), 0);
11942         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11943
11944         intel_wait_for_pipe_scanline_stopped(crtc);
11945
11946         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
11947         intel_de_posting_read(dev_priv, DPLL(pipe));
11948 }
11949
11950 static void
11951 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
11952 {
11953         struct intel_crtc *crtc;
11954
11955         if (DISPLAY_VER(dev_priv) >= 4)
11956                 return;
11957
11958         for_each_intel_crtc(&dev_priv->drm, crtc) {
11959                 struct intel_plane *plane =
11960                         to_intel_plane(crtc->base.primary);
11961                 struct intel_crtc *plane_crtc;
11962                 enum pipe pipe;
11963
11964                 if (!plane->get_hw_state(plane, &pipe))
11965                         continue;
11966
11967                 if (pipe == crtc->pipe)
11968                         continue;
11969
11970                 drm_dbg_kms(&dev_priv->drm,
11971                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
11972                             plane->base.base.id, plane->base.name);
11973
11974                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11975                 intel_plane_disable_noatomic(plane_crtc, plane);
11976         }
11977 }
11978
11979 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
11980 {
11981         struct drm_device *dev = crtc->base.dev;
11982         struct intel_encoder *encoder;
11983
11984         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
11985                 return true;
11986
11987         return false;
11988 }
11989
11990 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
11991 {
11992         struct drm_device *dev = encoder->base.dev;
11993         struct intel_connector *connector;
11994
11995         for_each_connector_on_encoder(dev, &encoder->base, connector)
11996                 return connector;
11997
11998         return NULL;
11999 }
12000
12001 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12002                               enum pipe pch_transcoder)
12003 {
12004         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12005                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12006 }
12007
12008 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12009 {
12010         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12011         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12012         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12013
12014         if (DISPLAY_VER(dev_priv) >= 9 ||
12015             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12016                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12017                 u32 val;
12018
12019                 if (transcoder_is_dsi(cpu_transcoder))
12020                         return;
12021
12022                 val = intel_de_read(dev_priv, reg);
12023                 val &= ~HSW_FRAME_START_DELAY_MASK;
12024                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12025                 intel_de_write(dev_priv, reg, val);
12026         } else {
12027                 i915_reg_t reg = PIPECONF(cpu_transcoder);
12028                 u32 val;
12029
12030                 val = intel_de_read(dev_priv, reg);
12031                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12032                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12033                 intel_de_write(dev_priv, reg, val);
12034         }
12035
12036         if (!crtc_state->has_pch_encoder)
12037                 return;
12038
12039         if (HAS_PCH_IBX(dev_priv)) {
12040                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12041                 u32 val;
12042
12043                 val = intel_de_read(dev_priv, reg);
12044                 val &= ~TRANS_FRAME_START_DELAY_MASK;
12045                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12046                 intel_de_write(dev_priv, reg, val);
12047         } else {
12048                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12049                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12050                 u32 val;
12051
12052                 val = intel_de_read(dev_priv, reg);
12053                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12054                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12055                 intel_de_write(dev_priv, reg, val);
12056         }
12057 }
12058
12059 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12060                                 struct drm_modeset_acquire_ctx *ctx)
12061 {
12062         struct drm_device *dev = crtc->base.dev;
12063         struct drm_i915_private *dev_priv = to_i915(dev);
12064         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12065
12066         if (crtc_state->hw.active) {
12067                 struct intel_plane *plane;
12068
12069                 /* Clear any frame start delays used for debugging left by the BIOS */
12070                 intel_sanitize_frame_start_delay(crtc_state);
12071
12072                 /* Disable everything but the primary plane */
12073                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
12074                         const struct intel_plane_state *plane_state =
12075                                 to_intel_plane_state(plane->base.state);
12076
12077                         if (plane_state->uapi.visible &&
12078                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12079                                 intel_plane_disable_noatomic(crtc, plane);
12080                 }
12081
12082                 /*
12083                  * Disable any background color set by the BIOS, but enable the
12084                  * gamma and CSC to match how we program our planes.
12085                  */
12086                 if (DISPLAY_VER(dev_priv) >= 9)
12087                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12088                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12089         }
12090
12091         /* Adjust the state of the output pipe according to whether we
12092          * have active connectors/encoders. */
12093         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12094             !crtc_state->bigjoiner_slave)
12095                 intel_crtc_disable_noatomic(crtc, ctx);
12096
12097         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12098                 /*
12099                  * We start out with underrun reporting disabled to avoid races.
12100                  * For correct bookkeeping mark this on active crtcs.
12101                  *
12102                  * Also on gmch platforms we dont have any hardware bits to
12103                  * disable the underrun reporting. Which means we need to start
12104                  * out with underrun reporting disabled also on inactive pipes,
12105                  * since otherwise we'll complain about the garbage we read when
12106                  * e.g. coming up after runtime pm.
12107                  *
12108                  * No protection against concurrent access is required - at
12109                  * worst a fifo underrun happens which also sets this to false.
12110                  */
12111                 crtc->cpu_fifo_underrun_disabled = true;
12112                 /*
12113                  * We track the PCH trancoder underrun reporting state
12114                  * within the crtc. With crtc for pipe A housing the underrun
12115                  * reporting state for PCH transcoder A, crtc for pipe B housing
12116                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12117                  * and marking underrun reporting as disabled for the non-existing
12118                  * PCH transcoders B and C would prevent enabling the south
12119                  * error interrupt (see cpt_can_enable_serr_int()).
12120                  */
12121                 if (has_pch_trancoder(dev_priv, crtc->pipe))
12122                         crtc->pch_fifo_underrun_disabled = true;
12123         }
12124 }
12125
12126 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12127 {
12128         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12129
12130         /*
12131          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12132          * the hardware when a high res displays plugged in. DPLL P
12133          * divider is zero, and the pipe timings are bonkers. We'll
12134          * try to disable everything in that case.
12135          *
12136          * FIXME would be nice to be able to sanitize this state
12137          * without several WARNs, but for now let's take the easy
12138          * road.
12139          */
12140         return IS_SANDYBRIDGE(dev_priv) &&
12141                 crtc_state->hw.active &&
12142                 crtc_state->shared_dpll &&
12143                 crtc_state->port_clock == 0;
12144 }
12145
12146 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12147 {
12148         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12149         struct intel_connector *connector;
12150         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12151         struct intel_crtc_state *crtc_state = crtc ?
12152                 to_intel_crtc_state(crtc->base.state) : NULL;
12153
12154         /* We need to check both for a crtc link (meaning that the
12155          * encoder is active and trying to read from a pipe) and the
12156          * pipe itself being active. */
12157         bool has_active_crtc = crtc_state &&
12158                 crtc_state->hw.active;
12159
12160         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12161                 drm_dbg_kms(&dev_priv->drm,
12162                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12163                             pipe_name(crtc->pipe));
12164                 has_active_crtc = false;
12165         }
12166
12167         connector = intel_encoder_find_connector(encoder);
12168         if (connector && !has_active_crtc) {
12169                 drm_dbg_kms(&dev_priv->drm,
12170                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12171                             encoder->base.base.id,
12172                             encoder->base.name);
12173
12174                 /* Connector is active, but has no active pipe. This is
12175                  * fallout from our resume register restoring. Disable
12176                  * the encoder manually again. */
12177                 if (crtc_state) {
12178                         struct drm_encoder *best_encoder;
12179
12180                         drm_dbg_kms(&dev_priv->drm,
12181                                     "[ENCODER:%d:%s] manually disabled\n",
12182                                     encoder->base.base.id,
12183                                     encoder->base.name);
12184
12185                         /* avoid oopsing in case the hooks consult best_encoder */
12186                         best_encoder = connector->base.state->best_encoder;
12187                         connector->base.state->best_encoder = &encoder->base;
12188
12189                         /* FIXME NULL atomic state passed! */
12190                         if (encoder->disable)
12191                                 encoder->disable(NULL, encoder, crtc_state,
12192                                                  connector->base.state);
12193                         if (encoder->post_disable)
12194                                 encoder->post_disable(NULL, encoder, crtc_state,
12195                                                       connector->base.state);
12196
12197                         connector->base.state->best_encoder = best_encoder;
12198                 }
12199                 encoder->base.crtc = NULL;
12200
12201                 /* Inconsistent output/port/pipe state happens presumably due to
12202                  * a bug in one of the get_hw_state functions. Or someplace else
12203                  * in our code, like the register restore mess on resume. Clamp
12204                  * things to off as a safer default. */
12205
12206                 connector->base.dpms = DRM_MODE_DPMS_OFF;
12207                 connector->base.encoder = NULL;
12208         }
12209
12210         /* notify opregion of the sanitized encoder state */
12211         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12212
12213         if (HAS_DDI(dev_priv))
12214                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
12215 }
12216
12217 /* FIXME read out full plane state for all planes */
12218 static void readout_plane_state(struct drm_i915_private *dev_priv)
12219 {
12220         struct intel_plane *plane;
12221         struct intel_crtc *crtc;
12222
12223         for_each_intel_plane(&dev_priv->drm, plane) {
12224                 struct intel_plane_state *plane_state =
12225                         to_intel_plane_state(plane->base.state);
12226                 struct intel_crtc_state *crtc_state;
12227                 enum pipe pipe = PIPE_A;
12228                 bool visible;
12229
12230                 visible = plane->get_hw_state(plane, &pipe);
12231
12232                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12233                 crtc_state = to_intel_crtc_state(crtc->base.state);
12234
12235                 intel_set_plane_visible(crtc_state, plane_state, visible);
12236
12237                 drm_dbg_kms(&dev_priv->drm,
12238                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12239                             plane->base.base.id, plane->base.name,
12240                             enableddisabled(visible), pipe_name(pipe));
12241         }
12242
12243         for_each_intel_crtc(&dev_priv->drm, crtc) {
12244                 struct intel_crtc_state *crtc_state =
12245                         to_intel_crtc_state(crtc->base.state);
12246
12247                 fixup_plane_bitmasks(crtc_state);
12248         }
12249 }
12250
12251 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12252 {
12253         struct drm_i915_private *dev_priv = to_i915(dev);
12254         struct intel_cdclk_state *cdclk_state =
12255                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12256         struct intel_dbuf_state *dbuf_state =
12257                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12258         enum pipe pipe;
12259         struct intel_crtc *crtc;
12260         struct intel_encoder *encoder;
12261         struct intel_connector *connector;
12262         struct drm_connector_list_iter conn_iter;
12263         u8 active_pipes = 0;
12264
12265         for_each_intel_crtc(dev, crtc) {
12266                 struct intel_crtc_state *crtc_state =
12267                         to_intel_crtc_state(crtc->base.state);
12268
12269                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12270                 intel_crtc_free_hw_state(crtc_state);
12271                 intel_crtc_state_reset(crtc_state, crtc);
12272
12273                 intel_crtc_get_pipe_config(crtc_state);
12274
12275                 crtc_state->hw.enable = crtc_state->hw.active;
12276
12277                 crtc->base.enabled = crtc_state->hw.enable;
12278                 crtc->active = crtc_state->hw.active;
12279
12280                 if (crtc_state->hw.active)
12281                         active_pipes |= BIT(crtc->pipe);
12282
12283                 drm_dbg_kms(&dev_priv->drm,
12284                             "[CRTC:%d:%s] hw state readout: %s\n",
12285                             crtc->base.base.id, crtc->base.name,
12286                             enableddisabled(crtc_state->hw.active));
12287         }
12288
12289         dev_priv->active_pipes = cdclk_state->active_pipes =
12290                 dbuf_state->active_pipes = active_pipes;
12291
12292         readout_plane_state(dev_priv);
12293
12294         for_each_intel_encoder(dev, encoder) {
12295                 pipe = 0;
12296
12297                 if (encoder->get_hw_state(encoder, &pipe)) {
12298                         struct intel_crtc_state *crtc_state;
12299
12300                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12301                         crtc_state = to_intel_crtc_state(crtc->base.state);
12302
12303                         encoder->base.crtc = &crtc->base;
12304                         intel_encoder_get_config(encoder, crtc_state);
12305                         if (encoder->sync_state)
12306                                 encoder->sync_state(encoder, crtc_state);
12307
12308                         /* read out to slave crtc as well for bigjoiner */
12309                         if (crtc_state->bigjoiner) {
12310                                 /* encoder should read be linked to bigjoiner master */
12311                                 WARN_ON(crtc_state->bigjoiner_slave);
12312
12313                                 crtc = crtc_state->bigjoiner_linked_crtc;
12314                                 crtc_state = to_intel_crtc_state(crtc->base.state);
12315                                 intel_encoder_get_config(encoder, crtc_state);
12316                         }
12317                 } else {
12318                         encoder->base.crtc = NULL;
12319                 }
12320
12321                 drm_dbg_kms(&dev_priv->drm,
12322                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12323                             encoder->base.base.id, encoder->base.name,
12324                             enableddisabled(encoder->base.crtc),
12325                             pipe_name(pipe));
12326         }
12327
12328         intel_dpll_readout_hw_state(dev_priv);
12329
12330         drm_connector_list_iter_begin(dev, &conn_iter);
12331         for_each_intel_connector_iter(connector, &conn_iter) {
12332                 if (connector->get_hw_state(connector)) {
12333                         struct intel_crtc_state *crtc_state;
12334                         struct intel_crtc *crtc;
12335
12336                         connector->base.dpms = DRM_MODE_DPMS_ON;
12337
12338                         encoder = intel_attached_encoder(connector);
12339                         connector->base.encoder = &encoder->base;
12340
12341                         crtc = to_intel_crtc(encoder->base.crtc);
12342                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12343
12344                         if (crtc_state && crtc_state->hw.active) {
12345                                 /*
12346                                  * This has to be done during hardware readout
12347                                  * because anything calling .crtc_disable may
12348                                  * rely on the connector_mask being accurate.
12349                                  */
12350                                 crtc_state->uapi.connector_mask |=
12351                                         drm_connector_mask(&connector->base);
12352                                 crtc_state->uapi.encoder_mask |=
12353                                         drm_encoder_mask(&encoder->base);
12354                         }
12355                 } else {
12356                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12357                         connector->base.encoder = NULL;
12358                 }
12359                 drm_dbg_kms(&dev_priv->drm,
12360                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
12361                             connector->base.base.id, connector->base.name,
12362                             enableddisabled(connector->base.encoder));
12363         }
12364         drm_connector_list_iter_end(&conn_iter);
12365
12366         for_each_intel_crtc(dev, crtc) {
12367                 struct intel_bw_state *bw_state =
12368                         to_intel_bw_state(dev_priv->bw_obj.state);
12369                 struct intel_crtc_state *crtc_state =
12370                         to_intel_crtc_state(crtc->base.state);
12371                 struct intel_plane *plane;
12372                 int min_cdclk = 0;
12373
12374                 if (crtc_state->bigjoiner_slave)
12375                         continue;
12376
12377                 if (crtc_state->hw.active) {
12378                         /*
12379                          * The initial mode needs to be set in order to keep
12380                          * the atomic core happy. It wants a valid mode if the
12381                          * crtc's enabled, so we do the above call.
12382                          *
12383                          * But we don't set all the derived state fully, hence
12384                          * set a flag to indicate that a full recalculation is
12385                          * needed on the next commit.
12386                          */
12387                         crtc_state->inherited = true;
12388
12389                         intel_crtc_update_active_timings(crtc_state);
12390
12391                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
12392                 }
12393
12394                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12395                         const struct intel_plane_state *plane_state =
12396                                 to_intel_plane_state(plane->base.state);
12397
12398                         /*
12399                          * FIXME don't have the fb yet, so can't
12400                          * use intel_plane_data_rate() :(
12401                          */
12402                         if (plane_state->uapi.visible)
12403                                 crtc_state->data_rate[plane->id] =
12404                                         4 * crtc_state->pixel_rate;
12405                         /*
12406                          * FIXME don't have the fb yet, so can't
12407                          * use plane->min_cdclk() :(
12408                          */
12409                         if (plane_state->uapi.visible && plane->min_cdclk) {
12410                                 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12411                                         crtc_state->min_cdclk[plane->id] =
12412                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12413                                 else
12414                                         crtc_state->min_cdclk[plane->id] =
12415                                                 crtc_state->pixel_rate;
12416                         }
12417                         drm_dbg_kms(&dev_priv->drm,
12418                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
12419                                     plane->base.base.id, plane->base.name,
12420                                     crtc_state->min_cdclk[plane->id]);
12421                 }
12422
12423                 if (crtc_state->hw.active) {
12424                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12425                         if (drm_WARN_ON(dev, min_cdclk < 0))
12426                                 min_cdclk = 0;
12427                 }
12428
12429                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12430                 cdclk_state->min_voltage_level[crtc->pipe] =
12431                         crtc_state->min_voltage_level;
12432
12433                 intel_bw_crtc_update(bw_state, crtc_state);
12434
12435                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
12436
12437                 /* discard our incomplete slave state, copy it from master */
12438                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
12439                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12440                         struct intel_crtc_state *slave_crtc_state =
12441                                 to_intel_crtc_state(slave->base.state);
12442
12443                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12444                         slave->base.mode = crtc->base.mode;
12445
12446                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12447                         cdclk_state->min_voltage_level[slave->pipe] =
12448                                 crtc_state->min_voltage_level;
12449
12450                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12451                                 const struct intel_plane_state *plane_state =
12452                                         to_intel_plane_state(plane->base.state);
12453
12454                                 /*
12455                                  * FIXME don't have the fb yet, so can't
12456                                  * use intel_plane_data_rate() :(
12457                                  */
12458                                 if (plane_state->uapi.visible)
12459                                         crtc_state->data_rate[plane->id] =
12460                                                 4 * crtc_state->pixel_rate;
12461                                 else
12462                                         crtc_state->data_rate[plane->id] = 0;
12463                         }
12464
12465                         intel_bw_crtc_update(bw_state, slave_crtc_state);
12466                         drm_calc_timestamping_constants(&slave->base,
12467                                                         &slave_crtc_state->hw.adjusted_mode);
12468                 }
12469         }
12470 }
12471
12472 static void
12473 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12474 {
12475         struct intel_encoder *encoder;
12476
12477         for_each_intel_encoder(&dev_priv->drm, encoder) {
12478                 struct intel_crtc_state *crtc_state;
12479
12480                 if (!encoder->get_power_domains)
12481                         continue;
12482
12483                 /*
12484                  * MST-primary and inactive encoders don't have a crtc state
12485                  * and neither of these require any power domain references.
12486                  */
12487                 if (!encoder->base.crtc)
12488                         continue;
12489
12490                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12491                 encoder->get_power_domains(encoder, crtc_state);
12492         }
12493 }
12494
12495 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12496 {
12497         /*
12498          * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
12499          * Also known as Wa_14010480278.
12500          */
12501         if (IS_DISPLAY_VER(dev_priv, 10, 12))
12502                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12503                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12504
12505         if (IS_HASWELL(dev_priv)) {
12506                 /*
12507                  * WaRsPkgCStateDisplayPMReq:hsw
12508                  * System hang if this isn't done before disabling all planes!
12509                  */
12510                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
12511                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12512         }
12513
12514         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12515                 /* Display WA #1142:kbl,cfl,cml */
12516                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12517                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12518                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12519                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12520                              KBL_ARB_FILL_SPARE_14);
12521         }
12522 }
12523
12524 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12525                                        enum port port, i915_reg_t hdmi_reg)
12526 {
12527         u32 val = intel_de_read(dev_priv, hdmi_reg);
12528
12529         if (val & SDVO_ENABLE ||
12530             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12531                 return;
12532
12533         drm_dbg_kms(&dev_priv->drm,
12534                     "Sanitizing transcoder select for HDMI %c\n",
12535                     port_name(port));
12536
12537         val &= ~SDVO_PIPE_SEL_MASK;
12538         val |= SDVO_PIPE_SEL(PIPE_A);
12539
12540         intel_de_write(dev_priv, hdmi_reg, val);
12541 }
12542
12543 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12544                                      enum port port, i915_reg_t dp_reg)
12545 {
12546         u32 val = intel_de_read(dev_priv, dp_reg);
12547
12548         if (val & DP_PORT_EN ||
12549             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12550                 return;
12551
12552         drm_dbg_kms(&dev_priv->drm,
12553                     "Sanitizing transcoder select for DP %c\n",
12554                     port_name(port));
12555
12556         val &= ~DP_PIPE_SEL_MASK;
12557         val |= DP_PIPE_SEL(PIPE_A);
12558
12559         intel_de_write(dev_priv, dp_reg, val);
12560 }
12561
12562 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12563 {
12564         /*
12565          * The BIOS may select transcoder B on some of the PCH
12566          * ports even it doesn't enable the port. This would trip
12567          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12568          * Sanitize the transcoder select bits to prevent that. We
12569          * assume that the BIOS never actually enabled the port,
12570          * because if it did we'd actually have to toggle the port
12571          * on and back off to make the transcoder A select stick
12572          * (see. intel_dp_link_down(), intel_disable_hdmi(),
12573          * intel_disable_sdvo()).
12574          */
12575         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12576         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12577         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12578
12579         /* PCH SDVOB multiplex with HDMIB */
12580         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12581         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12582         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12583 }
12584
12585 /* Scan out the current hw modeset state,
12586  * and sanitizes it to the current state
12587  */
12588 static void
12589 intel_modeset_setup_hw_state(struct drm_device *dev,
12590                              struct drm_modeset_acquire_ctx *ctx)
12591 {
12592         struct drm_i915_private *dev_priv = to_i915(dev);
12593         struct intel_encoder *encoder;
12594         struct intel_crtc *crtc;
12595         intel_wakeref_t wakeref;
12596
12597         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12598
12599         intel_early_display_was(dev_priv);
12600         intel_modeset_readout_hw_state(dev);
12601
12602         /* HW state is read out, now we need to sanitize this mess. */
12603
12604         /* Sanitize the TypeC port mode upfront, encoders depend on this */
12605         for_each_intel_encoder(dev, encoder) {
12606                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
12607
12608                 /* We need to sanitize only the MST primary port. */
12609                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
12610                     intel_phy_is_tc(dev_priv, phy))
12611                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
12612         }
12613
12614         get_encoder_power_domains(dev_priv);
12615
12616         if (HAS_PCH_IBX(dev_priv))
12617                 ibx_sanitize_pch_ports(dev_priv);
12618
12619         /*
12620          * intel_sanitize_plane_mapping() may need to do vblank
12621          * waits, so we need vblank interrupts restored beforehand.
12622          */
12623         for_each_intel_crtc(&dev_priv->drm, crtc) {
12624                 struct intel_crtc_state *crtc_state =
12625                         to_intel_crtc_state(crtc->base.state);
12626
12627                 drm_crtc_vblank_reset(&crtc->base);
12628
12629                 if (crtc_state->hw.active)
12630                         intel_crtc_vblank_on(crtc_state);
12631         }
12632
12633         intel_sanitize_plane_mapping(dev_priv);
12634
12635         for_each_intel_encoder(dev, encoder)
12636                 intel_sanitize_encoder(encoder);
12637
12638         for_each_intel_crtc(&dev_priv->drm, crtc) {
12639                 struct intel_crtc_state *crtc_state =
12640                         to_intel_crtc_state(crtc->base.state);
12641
12642                 intel_sanitize_crtc(crtc, ctx);
12643                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12644         }
12645
12646         intel_modeset_update_connector_atomic_state(dev);
12647
12648         intel_dpll_sanitize_state(dev_priv);
12649
12650         if (IS_G4X(dev_priv)) {
12651                 g4x_wm_get_hw_state(dev_priv);
12652                 g4x_wm_sanitize(dev_priv);
12653         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12654                 vlv_wm_get_hw_state(dev_priv);
12655                 vlv_wm_sanitize(dev_priv);
12656         } else if (DISPLAY_VER(dev_priv) >= 9) {
12657                 skl_wm_get_hw_state(dev_priv);
12658         } else if (HAS_PCH_SPLIT(dev_priv)) {
12659                 ilk_wm_get_hw_state(dev_priv);
12660         }
12661
12662         for_each_intel_crtc(dev, crtc) {
12663                 struct intel_crtc_state *crtc_state =
12664                         to_intel_crtc_state(crtc->base.state);
12665                 u64 put_domains;
12666
12667                 put_domains = modeset_get_crtc_power_domains(crtc_state);
12668                 if (drm_WARN_ON(dev, put_domains))
12669                         modeset_put_crtc_power_domains(crtc, put_domains);
12670         }
12671
12672         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12673 }
12674
12675 void intel_display_resume(struct drm_device *dev)
12676 {
12677         struct drm_i915_private *dev_priv = to_i915(dev);
12678         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12679         struct drm_modeset_acquire_ctx ctx;
12680         int ret;
12681
12682         if (!HAS_DISPLAY(dev_priv))
12683                 return;
12684
12685         dev_priv->modeset_restore_state = NULL;
12686         if (state)
12687                 state->acquire_ctx = &ctx;
12688
12689         drm_modeset_acquire_init(&ctx, 0);
12690
12691         while (1) {
12692                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
12693                 if (ret != -EDEADLK)
12694                         break;
12695
12696                 drm_modeset_backoff(&ctx);
12697         }
12698
12699         if (!ret)
12700                 ret = __intel_display_resume(dev, state, &ctx);
12701
12702         intel_enable_ipc(dev_priv);
12703         drm_modeset_drop_locks(&ctx);
12704         drm_modeset_acquire_fini(&ctx);
12705
12706         if (ret)
12707                 drm_err(&dev_priv->drm,
12708                         "Restoring old state failed with %i\n", ret);
12709         if (state)
12710                 drm_atomic_state_put(state);
12711 }
12712
12713 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12714 {
12715         struct intel_connector *connector;
12716         struct drm_connector_list_iter conn_iter;
12717
12718         /* Kill all the work that may have been queued by hpd. */
12719         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12720         for_each_intel_connector_iter(connector, &conn_iter) {
12721                 if (connector->modeset_retry_work.func)
12722                         cancel_work_sync(&connector->modeset_retry_work);
12723                 if (connector->hdcp.shim) {
12724                         cancel_delayed_work_sync(&connector->hdcp.check_work);
12725                         cancel_work_sync(&connector->hdcp.prop_work);
12726                 }
12727         }
12728         drm_connector_list_iter_end(&conn_iter);
12729 }
12730
12731 /* part #1: call before irq uninstall */
12732 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12733 {
12734         if (!HAS_DISPLAY(i915))
12735                 return;
12736
12737         flush_workqueue(i915->flip_wq);
12738         flush_workqueue(i915->modeset_wq);
12739
12740         flush_work(&i915->atomic_helper.free_work);
12741         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12742 }
12743
12744 /* part #2: call after irq uninstall */
12745 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12746 {
12747         if (!HAS_DISPLAY(i915))
12748                 return;
12749
12750         /*
12751          * Due to the hpd irq storm handling the hotplug work can re-arm the
12752          * poll handlers. Hence disable polling after hpd handling is shut down.
12753          */
12754         intel_hpd_poll_fini(i915);
12755
12756         /*
12757          * MST topology needs to be suspended so we don't have any calls to
12758          * fbdev after it's finalized. MST will be destroyed later as part of
12759          * drm_mode_config_cleanup()
12760          */
12761         intel_dp_mst_suspend(i915);
12762
12763         /* poll work can call into fbdev, hence clean that up afterwards */
12764         intel_fbdev_fini(i915);
12765
12766         intel_unregister_dsm_handler();
12767
12768         intel_fbc_global_disable(i915);
12769
12770         /* flush any delayed tasks or pending work */
12771         flush_scheduled_work();
12772
12773         intel_hdcp_component_fini(i915);
12774
12775         intel_mode_config_cleanup(i915);
12776
12777         intel_overlay_cleanup(i915);
12778
12779         intel_gmbus_teardown(i915);
12780
12781         destroy_workqueue(i915->flip_wq);
12782         destroy_workqueue(i915->modeset_wq);
12783
12784         intel_fbc_cleanup_cfb(i915);
12785 }
12786
12787 /* part #3: call after gem init */
12788 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12789 {
12790         intel_dmc_ucode_fini(i915);
12791
12792         intel_power_domains_driver_remove(i915);
12793
12794         intel_vga_unregister(i915);
12795
12796         intel_bios_driver_remove(i915);
12797 }
12798
12799 void intel_display_driver_register(struct drm_i915_private *i915)
12800 {
12801         if (!HAS_DISPLAY(i915))
12802                 return;
12803
12804         intel_display_debugfs_register(i915);
12805
12806         /* Must be done after probing outputs */
12807         intel_opregion_register(i915);
12808         acpi_video_register();
12809
12810         intel_audio_init(i915);
12811
12812         /*
12813          * Some ports require correctly set-up hpd registers for
12814          * detection to work properly (leading to ghost connected
12815          * connector status), e.g. VGA on gm45.  Hence we can only set
12816          * up the initial fbdev config after hpd irqs are fully
12817          * enabled. We do it last so that the async config cannot run
12818          * before the connectors are registered.
12819          */
12820         intel_fbdev_initial_config_async(&i915->drm);
12821
12822         /*
12823          * We need to coordinate the hotplugs with the asynchronous
12824          * fbdev configuration, for which we use the
12825          * fbdev->async_cookie.
12826          */
12827         drm_kms_helper_poll_init(&i915->drm);
12828 }
12829
12830 void intel_display_driver_unregister(struct drm_i915_private *i915)
12831 {
12832         if (!HAS_DISPLAY(i915))
12833                 return;
12834
12835         intel_fbdev_unregister(i915);
12836         intel_audio_deinit(i915);
12837
12838         /*
12839          * After flushing the fbdev (incl. a late async config which
12840          * will have delayed queuing of a hotplug event), then flush
12841          * the hotplug events.
12842          */
12843         drm_kms_helper_poll_fini(&i915->drm);
12844         drm_atomic_helper_shutdown(&i915->drm);
12845
12846         acpi_video_unregister();
12847         intel_opregion_unregister(i915);
12848 }