a25cad621f81f4331fd87b48cd198d85c79e45cc
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_dpt.h"
88 #include "intel_fbc.h"
89 #include "intel_fdi.h"
90 #include "intel_fbdev.h"
91 #include "intel_fifo_underrun.h"
92 #include "intel_frontbuffer.h"
93 #include "intel_hdcp.h"
94 #include "intel_hotplug.h"
95 #include "intel_overlay.h"
96 #include "intel_panel.h"
97 #include "intel_pipe_crc.h"
98 #include "intel_pm.h"
99 #include "intel_pps.h"
100 #include "intel_psr.h"
101 #include "intel_quirks.h"
102 #include "intel_sideband.h"
103 #include "intel_sprite.h"
104 #include "intel_tc.h"
105 #include "intel_vga.h"
106 #include "i9xx_plane.h"
107 #include "skl_scaler.h"
108 #include "skl_universal_plane.h"
109
110 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
111                                 struct intel_crtc_state *pipe_config);
112 static void ilk_pch_clock_get(struct intel_crtc *crtc,
113                               struct intel_crtc_state *pipe_config);
114
115 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
116 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
118                                          const struct intel_link_m_n *m_n,
119                                          const struct intel_link_m_n *m2_n2);
120 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
121 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
124 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126                                          struct drm_modeset_acquire_ctx *ctx);
127
128 /* returns HPLL frequency in kHz */
129 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
130 {
131         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
132
133         /* Obtain SKU information */
134         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
135                 CCK_FUSE_HPLL_FREQ_MASK;
136
137         return vco_freq[hpll_freq] * 1000;
138 }
139
140 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
141                       const char *name, u32 reg, int ref_freq)
142 {
143         u32 val;
144         int divider;
145
146         val = vlv_cck_read(dev_priv, reg);
147         divider = val & CCK_FREQUENCY_VALUES;
148
149         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
150                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
151                  "%s change in progress\n", name);
152
153         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
154 }
155
156 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
157                            const char *name, u32 reg)
158 {
159         int hpll;
160
161         vlv_cck_get(dev_priv);
162
163         if (dev_priv->hpll_freq == 0)
164                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
165
166         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
167
168         vlv_cck_put(dev_priv);
169
170         return hpll;
171 }
172
173 static void intel_update_czclk(struct drm_i915_private *dev_priv)
174 {
175         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
176                 return;
177
178         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
179                                                       CCK_CZ_CLOCK_CONTROL);
180
181         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
182                 dev_priv->czclk_freq);
183 }
184
185 /* WA Display #0827: Gen9:all */
186 static void
187 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
188 {
189         if (enable)
190                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
191                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
192         else
193                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
194                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
195 }
196
197 /* Wa_2006604312:icl,ehl */
198 static void
199 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
200                        bool enable)
201 {
202         if (enable)
203                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
204                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
205         else
206                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
207                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
208 }
209
210 static bool
211 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
212 {
213         return crtc_state->master_transcoder != INVALID_TRANSCODER;
214 }
215
216 static bool
217 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
218 {
219         return crtc_state->sync_mode_slaves_mask != 0;
220 }
221
222 bool
223 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
224 {
225         return is_trans_port_sync_master(crtc_state) ||
226                 is_trans_port_sync_slave(crtc_state);
227 }
228
229 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
230                                     enum pipe pipe)
231 {
232         i915_reg_t reg = PIPEDSL(pipe);
233         u32 line1, line2;
234         u32 line_mask;
235
236         if (DISPLAY_VER(dev_priv) == 2)
237                 line_mask = DSL_LINEMASK_GEN2;
238         else
239                 line_mask = DSL_LINEMASK_GEN3;
240
241         line1 = intel_de_read(dev_priv, reg) & line_mask;
242         msleep(5);
243         line2 = intel_de_read(dev_priv, reg) & line_mask;
244
245         return line1 != line2;
246 }
247
248 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
249 {
250         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
251         enum pipe pipe = crtc->pipe;
252
253         /* Wait for the display line to settle/start moving */
254         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
255                 drm_err(&dev_priv->drm,
256                         "pipe %c scanline %s wait timed out\n",
257                         pipe_name(pipe), onoff(state));
258 }
259
260 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
261 {
262         wait_for_pipe_scanline_moving(crtc, false);
263 }
264
265 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
266 {
267         wait_for_pipe_scanline_moving(crtc, true);
268 }
269
270 static void
271 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
272 {
273         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
274         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
275
276         if (DISPLAY_VER(dev_priv) >= 4) {
277                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
278                 i915_reg_t reg = PIPECONF(cpu_transcoder);
279
280                 /* Wait for the Pipe State to go off */
281                 if (intel_de_wait_for_clear(dev_priv, reg,
282                                             I965_PIPECONF_ACTIVE, 100))
283                         drm_WARN(&dev_priv->drm, 1,
284                                  "pipe_off wait timed out\n");
285         } else {
286                 intel_wait_for_pipe_scanline_stopped(crtc);
287         }
288 }
289
290 /* Only for pre-ILK configs */
291 void assert_pll(struct drm_i915_private *dev_priv,
292                 enum pipe pipe, bool state)
293 {
294         u32 val;
295         bool cur_state;
296
297         val = intel_de_read(dev_priv, DPLL(pipe));
298         cur_state = !!(val & DPLL_VCO_ENABLE);
299         I915_STATE_WARN(cur_state != state,
300              "PLL state assertion failure (expected %s, current %s)\n",
301                         onoff(state), onoff(cur_state));
302 }
303
304 /* XXX: the dsi pll is shared between MIPI DSI ports */
305 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
306 {
307         u32 val;
308         bool cur_state;
309
310         vlv_cck_get(dev_priv);
311         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
312         vlv_cck_put(dev_priv);
313
314         cur_state = val & DSI_PLL_VCO_EN;
315         I915_STATE_WARN(cur_state != state,
316              "DSI PLL state assertion failure (expected %s, current %s)\n",
317                         onoff(state), onoff(cur_state));
318 }
319
320 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
321                           enum pipe pipe, bool state)
322 {
323         bool cur_state;
324
325         if (HAS_DDI(dev_priv)) {
326                 /*
327                  * DDI does not have a specific FDI_TX register.
328                  *
329                  * FDI is never fed from EDP transcoder
330                  * so pipe->transcoder cast is fine here.
331                  */
332                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
333                 u32 val = intel_de_read(dev_priv,
334                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
335                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
336         } else {
337                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
338                 cur_state = !!(val & FDI_TX_ENABLE);
339         }
340         I915_STATE_WARN(cur_state != state,
341              "FDI TX state assertion failure (expected %s, current %s)\n",
342                         onoff(state), onoff(cur_state));
343 }
344 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
345 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
346
347 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
348                           enum pipe pipe, bool state)
349 {
350         u32 val;
351         bool cur_state;
352
353         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
354         cur_state = !!(val & FDI_RX_ENABLE);
355         I915_STATE_WARN(cur_state != state,
356              "FDI RX state assertion failure (expected %s, current %s)\n",
357                         onoff(state), onoff(cur_state));
358 }
359 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
360 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
361
362 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
363                                       enum pipe pipe)
364 {
365         u32 val;
366
367         /* ILK FDI PLL is always enabled */
368         if (IS_IRONLAKE(dev_priv))
369                 return;
370
371         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
372         if (HAS_DDI(dev_priv))
373                 return;
374
375         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
376         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
377 }
378
379 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
380                        enum pipe pipe, bool state)
381 {
382         u32 val;
383         bool cur_state;
384
385         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
386         cur_state = !!(val & FDI_RX_PLL_ENABLE);
387         I915_STATE_WARN(cur_state != state,
388              "FDI RX PLL assertion failure (expected %s, current %s)\n",
389                         onoff(state), onoff(cur_state));
390 }
391
392 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
393 {
394         i915_reg_t pp_reg;
395         u32 val;
396         enum pipe panel_pipe = INVALID_PIPE;
397         bool locked = true;
398
399         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
400                 return;
401
402         if (HAS_PCH_SPLIT(dev_priv)) {
403                 u32 port_sel;
404
405                 pp_reg = PP_CONTROL(0);
406                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
407
408                 switch (port_sel) {
409                 case PANEL_PORT_SELECT_LVDS:
410                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
411                         break;
412                 case PANEL_PORT_SELECT_DPA:
413                         g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
414                         break;
415                 case PANEL_PORT_SELECT_DPC:
416                         g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
417                         break;
418                 case PANEL_PORT_SELECT_DPD:
419                         g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
420                         break;
421                 default:
422                         MISSING_CASE(port_sel);
423                         break;
424                 }
425         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
426                 /* presumably write lock depends on pipe, not port select */
427                 pp_reg = PP_CONTROL(pipe);
428                 panel_pipe = pipe;
429         } else {
430                 u32 port_sel;
431
432                 pp_reg = PP_CONTROL(0);
433                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
434
435                 drm_WARN_ON(&dev_priv->drm,
436                             port_sel != PANEL_PORT_SELECT_LVDS);
437                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
438         }
439
440         val = intel_de_read(dev_priv, pp_reg);
441         if (!(val & PANEL_POWER_ON) ||
442             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
443                 locked = false;
444
445         I915_STATE_WARN(panel_pipe == pipe && locked,
446              "panel assertion failure, pipe %c regs locked\n",
447              pipe_name(pipe));
448 }
449
450 void assert_pipe(struct drm_i915_private *dev_priv,
451                  enum transcoder cpu_transcoder, bool state)
452 {
453         bool cur_state;
454         enum intel_display_power_domain power_domain;
455         intel_wakeref_t wakeref;
456
457         /* we keep both pipes enabled on 830 */
458         if (IS_I830(dev_priv))
459                 state = true;
460
461         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
462         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
463         if (wakeref) {
464                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
465                 cur_state = !!(val & PIPECONF_ENABLE);
466
467                 intel_display_power_put(dev_priv, power_domain, wakeref);
468         } else {
469                 cur_state = false;
470         }
471
472         I915_STATE_WARN(cur_state != state,
473                         "transcoder %s assertion failure (expected %s, current %s)\n",
474                         transcoder_name(cpu_transcoder),
475                         onoff(state), onoff(cur_state));
476 }
477
478 static void assert_plane(struct intel_plane *plane, bool state)
479 {
480         enum pipe pipe;
481         bool cur_state;
482
483         cur_state = plane->get_hw_state(plane, &pipe);
484
485         I915_STATE_WARN(cur_state != state,
486                         "%s assertion failure (expected %s, current %s)\n",
487                         plane->base.name, onoff(state), onoff(cur_state));
488 }
489
490 #define assert_plane_enabled(p) assert_plane(p, true)
491 #define assert_plane_disabled(p) assert_plane(p, false)
492
493 static void assert_planes_disabled(struct intel_crtc *crtc)
494 {
495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
496         struct intel_plane *plane;
497
498         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
499                 assert_plane_disabled(plane);
500 }
501
502 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
503                                     enum pipe pipe)
504 {
505         u32 val;
506         bool enabled;
507
508         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
509         enabled = !!(val & TRANS_ENABLE);
510         I915_STATE_WARN(enabled,
511              "transcoder assertion failed, should be off on pipe %c but is still active\n",
512              pipe_name(pipe));
513 }
514
515 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
516                                    enum pipe pipe, enum port port,
517                                    i915_reg_t dp_reg)
518 {
519         enum pipe port_pipe;
520         bool state;
521
522         state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
523
524         I915_STATE_WARN(state && port_pipe == pipe,
525                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
526                         port_name(port), pipe_name(pipe));
527
528         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
529                         "IBX PCH DP %c still using transcoder B\n",
530                         port_name(port));
531 }
532
533 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
534                                      enum pipe pipe, enum port port,
535                                      i915_reg_t hdmi_reg)
536 {
537         enum pipe port_pipe;
538         bool state;
539
540         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
541
542         I915_STATE_WARN(state && port_pipe == pipe,
543                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
544                         port_name(port), pipe_name(pipe));
545
546         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
547                         "IBX PCH HDMI %c still using transcoder B\n",
548                         port_name(port));
549 }
550
551 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
552                                       enum pipe pipe)
553 {
554         enum pipe port_pipe;
555
556         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
557         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
558         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
559
560         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
561                         port_pipe == pipe,
562                         "PCH VGA enabled on transcoder %c, should be disabled\n",
563                         pipe_name(pipe));
564
565         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
566                         port_pipe == pipe,
567                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
568                         pipe_name(pipe));
569
570         /* PCH SDVOB multiplex with HDMIB */
571         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
572         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
573         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
574 }
575
576 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
577                          struct intel_digital_port *dig_port,
578                          unsigned int expected_mask)
579 {
580         u32 port_mask;
581         i915_reg_t dpll_reg;
582
583         switch (dig_port->base.port) {
584         case PORT_B:
585                 port_mask = DPLL_PORTB_READY_MASK;
586                 dpll_reg = DPLL(0);
587                 break;
588         case PORT_C:
589                 port_mask = DPLL_PORTC_READY_MASK;
590                 dpll_reg = DPLL(0);
591                 expected_mask <<= 4;
592                 break;
593         case PORT_D:
594                 port_mask = DPLL_PORTD_READY_MASK;
595                 dpll_reg = DPIO_PHY_STATUS;
596                 break;
597         default:
598                 BUG();
599         }
600
601         if (intel_de_wait_for_register(dev_priv, dpll_reg,
602                                        port_mask, expected_mask, 1000))
603                 drm_WARN(&dev_priv->drm, 1,
604                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
605                          dig_port->base.base.base.id, dig_port->base.base.name,
606                          intel_de_read(dev_priv, dpll_reg) & port_mask,
607                          expected_mask);
608 }
609
610 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
611 {
612         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
613         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
614         enum pipe pipe = crtc->pipe;
615         i915_reg_t reg;
616         u32 val, pipeconf_val;
617
618         /* Make sure PCH DPLL is enabled */
619         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
620
621         /* FDI must be feeding us bits for PCH ports */
622         assert_fdi_tx_enabled(dev_priv, pipe);
623         assert_fdi_rx_enabled(dev_priv, pipe);
624
625         if (HAS_PCH_CPT(dev_priv)) {
626                 reg = TRANS_CHICKEN2(pipe);
627                 val = intel_de_read(dev_priv, reg);
628                 /*
629                  * Workaround: Set the timing override bit
630                  * before enabling the pch transcoder.
631                  */
632                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
633                 /* Configure frame start delay to match the CPU */
634                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
635                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
636                 intel_de_write(dev_priv, reg, val);
637         }
638
639         reg = PCH_TRANSCONF(pipe);
640         val = intel_de_read(dev_priv, reg);
641         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
642
643         if (HAS_PCH_IBX(dev_priv)) {
644                 /* Configure frame start delay to match the CPU */
645                 val &= ~TRANS_FRAME_START_DELAY_MASK;
646                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
647
648                 /*
649                  * Make the BPC in transcoder be consistent with
650                  * that in pipeconf reg. For HDMI we must use 8bpc
651                  * here for both 8bpc and 12bpc.
652                  */
653                 val &= ~PIPECONF_BPC_MASK;
654                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
655                         val |= PIPECONF_8BPC;
656                 else
657                         val |= pipeconf_val & PIPECONF_BPC_MASK;
658         }
659
660         val &= ~TRANS_INTERLACE_MASK;
661         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
662                 if (HAS_PCH_IBX(dev_priv) &&
663                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
664                         val |= TRANS_LEGACY_INTERLACED_ILK;
665                 else
666                         val |= TRANS_INTERLACED;
667         } else {
668                 val |= TRANS_PROGRESSIVE;
669         }
670
671         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
672         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
673                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
674                         pipe_name(pipe));
675 }
676
677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
678                                       enum transcoder cpu_transcoder)
679 {
680         u32 val, pipeconf_val;
681
682         /* FDI must be feeding us bits for PCH ports */
683         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
684         assert_fdi_rx_enabled(dev_priv, PIPE_A);
685
686         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
687         /* Workaround: set timing override bit. */
688         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
689         /* Configure frame start delay to match the CPU */
690         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
691         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
692         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
693
694         val = TRANS_ENABLE;
695         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
696
697         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
698             PIPECONF_INTERLACED_ILK)
699                 val |= TRANS_INTERLACED;
700         else
701                 val |= TRANS_PROGRESSIVE;
702
703         intel_de_write(dev_priv, LPT_TRANSCONF, val);
704         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
705                                   TRANS_STATE_ENABLE, 100))
706                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
707 }
708
709 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
710                                        enum pipe pipe)
711 {
712         i915_reg_t reg;
713         u32 val;
714
715         /* FDI relies on the transcoder */
716         assert_fdi_tx_disabled(dev_priv, pipe);
717         assert_fdi_rx_disabled(dev_priv, pipe);
718
719         /* Ports must be off as well */
720         assert_pch_ports_disabled(dev_priv, pipe);
721
722         reg = PCH_TRANSCONF(pipe);
723         val = intel_de_read(dev_priv, reg);
724         val &= ~TRANS_ENABLE;
725         intel_de_write(dev_priv, reg, val);
726         /* wait for PCH transcoder off, transcoder state */
727         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
728                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
729                         pipe_name(pipe));
730
731         if (HAS_PCH_CPT(dev_priv)) {
732                 /* Workaround: Clear the timing override chicken bit again. */
733                 reg = TRANS_CHICKEN2(pipe);
734                 val = intel_de_read(dev_priv, reg);
735                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
736                 intel_de_write(dev_priv, reg, val);
737         }
738 }
739
740 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
741 {
742         u32 val;
743
744         val = intel_de_read(dev_priv, LPT_TRANSCONF);
745         val &= ~TRANS_ENABLE;
746         intel_de_write(dev_priv, LPT_TRANSCONF, val);
747         /* wait for PCH transcoder off, transcoder state */
748         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
749                                     TRANS_STATE_ENABLE, 50))
750                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
751
752         /* Workaround: clear timing override bit. */
753         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
754         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
755         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
756 }
757
758 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
759 {
760         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
761
762         if (HAS_PCH_LPT(dev_priv))
763                 return PIPE_A;
764         else
765                 return crtc->pipe;
766 }
767
768 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
769 {
770         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
771         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
772         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
773         enum pipe pipe = crtc->pipe;
774         i915_reg_t reg;
775         u32 val;
776
777         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
778
779         assert_planes_disabled(crtc);
780
781         /*
782          * A pipe without a PLL won't actually be able to drive bits from
783          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
784          * need the check.
785          */
786         if (HAS_GMCH(dev_priv)) {
787                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
788                         assert_dsi_pll_enabled(dev_priv);
789                 else
790                         assert_pll_enabled(dev_priv, pipe);
791         } else {
792                 if (new_crtc_state->has_pch_encoder) {
793                         /* if driving the PCH, we need FDI enabled */
794                         assert_fdi_rx_pll_enabled(dev_priv,
795                                                   intel_crtc_pch_transcoder(crtc));
796                         assert_fdi_tx_pll_enabled(dev_priv,
797                                                   (enum pipe) cpu_transcoder);
798                 }
799                 /* FIXME: assert CPU port conditions for SNB+ */
800         }
801
802         /* Wa_22012358565:adl-p */
803         if (DISPLAY_VER(dev_priv) == 13)
804                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
805                              0, PIPE_ARB_USE_PROG_SLOTS);
806
807         reg = PIPECONF(cpu_transcoder);
808         val = intel_de_read(dev_priv, reg);
809         if (val & PIPECONF_ENABLE) {
810                 /* we keep both pipes enabled on 830 */
811                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
812                 return;
813         }
814
815         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
816         intel_de_posting_read(dev_priv, reg);
817
818         /*
819          * Until the pipe starts PIPEDSL reads will return a stale value,
820          * which causes an apparent vblank timestamp jump when PIPEDSL
821          * resets to its proper value. That also messes up the frame count
822          * when it's derived from the timestamps. So let's wait for the
823          * pipe to start properly before we call drm_crtc_vblank_on()
824          */
825         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
826                 intel_wait_for_pipe_scanline_moving(crtc);
827 }
828
829 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
830 {
831         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
832         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
833         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
834         enum pipe pipe = crtc->pipe;
835         i915_reg_t reg;
836         u32 val;
837
838         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
839
840         /*
841          * Make sure planes won't keep trying to pump pixels to us,
842          * or we might hang the display.
843          */
844         assert_planes_disabled(crtc);
845
846         reg = PIPECONF(cpu_transcoder);
847         val = intel_de_read(dev_priv, reg);
848         if ((val & PIPECONF_ENABLE) == 0)
849                 return;
850
851         /*
852          * Double wide has implications for planes
853          * so best keep it disabled when not needed.
854          */
855         if (old_crtc_state->double_wide)
856                 val &= ~PIPECONF_DOUBLE_WIDE;
857
858         /* Don't disable pipe or pipe PLLs if needed */
859         if (!IS_I830(dev_priv))
860                 val &= ~PIPECONF_ENABLE;
861
862         if (DISPLAY_VER(dev_priv) >= 12)
863                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
864                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
865
866         intel_de_write(dev_priv, reg, val);
867         if ((val & PIPECONF_ENABLE) == 0)
868                 intel_wait_for_pipe_off(old_crtc_state);
869 }
870
871 bool
872 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
873                                     u64 modifier)
874 {
875         return info->is_yuv &&
876                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
877 }
878
879 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
880 {
881         unsigned int size = 0;
882         int i;
883
884         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
885                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
886
887         return size;
888 }
889
890 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
891 {
892         unsigned int size = 0;
893         int i;
894
895         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
896                 size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
897
898         return size;
899 }
900
901 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
902 {
903         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
904         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
905
906         return DISPLAY_VER(dev_priv) < 4 ||
907                 (plane->has_fbc &&
908                  plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
909 }
910
911 static struct i915_vma *
912 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
913                      const struct i915_ggtt_view *view,
914                      bool uses_fence,
915                      unsigned long *out_flags,
916                      struct i915_address_space *vm)
917 {
918         struct drm_device *dev = fb->dev;
919         struct drm_i915_private *dev_priv = to_i915(dev);
920         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
921         struct i915_vma *vma;
922         u32 alignment;
923         int ret;
924
925         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
926                 return ERR_PTR(-EINVAL);
927
928         alignment = 4096 * 512;
929
930         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
931
932         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
933         if (ret) {
934                 vma = ERR_PTR(ret);
935                 goto err;
936         }
937
938         vma = i915_vma_instance(obj, vm, view);
939         if (IS_ERR(vma))
940                 goto err;
941
942         if (i915_vma_misplaced(vma, 0, alignment, 0)) {
943                 ret = i915_vma_unbind(vma);
944                 if (ret) {
945                         vma = ERR_PTR(ret);
946                         goto err;
947                 }
948         }
949
950         ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
951         if (ret) {
952                 vma = ERR_PTR(ret);
953                 goto err;
954         }
955
956         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
957
958         i915_gem_object_flush_if_display(obj);
959
960         i915_vma_get(vma);
961 err:
962         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
963
964         return vma;
965 }
966
967 struct i915_vma *
968 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
969                            bool phys_cursor,
970                            const struct i915_ggtt_view *view,
971                            bool uses_fence,
972                            unsigned long *out_flags)
973 {
974         struct drm_device *dev = fb->dev;
975         struct drm_i915_private *dev_priv = to_i915(dev);
976         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
977         intel_wakeref_t wakeref;
978         struct i915_gem_ww_ctx ww;
979         struct i915_vma *vma;
980         unsigned int pinctl;
981         u32 alignment;
982         int ret;
983
984         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
985                 return ERR_PTR(-EINVAL);
986
987         if (phys_cursor)
988                 alignment = intel_cursor_alignment(dev_priv);
989         else
990                 alignment = intel_surf_alignment(fb, 0);
991         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
992                 return ERR_PTR(-EINVAL);
993
994         /* Note that the w/a also requires 64 PTE of padding following the
995          * bo. We currently fill all unused PTE with the shadow page and so
996          * we should always have valid PTE following the scanout preventing
997          * the VT-d warning.
998          */
999         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1000                 alignment = 256 * 1024;
1001
1002         /*
1003          * Global gtt pte registers are special registers which actually forward
1004          * writes to a chunk of system memory. Which means that there is no risk
1005          * that the register values disappear as soon as we call
1006          * intel_runtime_pm_put(), so it is correct to wrap only the
1007          * pin/unpin/fence and not more.
1008          */
1009         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1010
1011         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1012
1013         /*
1014          * Valleyview is definitely limited to scanning out the first
1015          * 512MiB. Lets presume this behaviour was inherited from the
1016          * g4x display engine and that all earlier gen are similarly
1017          * limited. Testing suggests that it is a little more
1018          * complicated than this. For example, Cherryview appears quite
1019          * happy to scanout from anywhere within its global aperture.
1020          */
1021         pinctl = 0;
1022         if (HAS_GMCH(dev_priv))
1023                 pinctl |= PIN_MAPPABLE;
1024
1025         i915_gem_ww_ctx_init(&ww, true);
1026 retry:
1027         ret = i915_gem_object_lock(obj, &ww);
1028         if (!ret && phys_cursor)
1029                 ret = i915_gem_object_attach_phys(obj, alignment);
1030         else if (!ret && HAS_LMEM(dev_priv))
1031                 ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1032         /* TODO: Do we need to sync when migration becomes async? */
1033         if (!ret)
1034                 ret = i915_gem_object_pin_pages(obj);
1035         if (ret)
1036                 goto err;
1037
1038         if (!ret) {
1039                 vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1040                                                            view, pinctl);
1041                 if (IS_ERR(vma)) {
1042                         ret = PTR_ERR(vma);
1043                         goto err_unpin;
1044                 }
1045         }
1046
1047         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1048                 /*
1049                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1050                  * fence, whereas 965+ only requires a fence if using
1051                  * framebuffer compression.  For simplicity, we always, when
1052                  * possible, install a fence as the cost is not that onerous.
1053                  *
1054                  * If we fail to fence the tiled scanout, then either the
1055                  * modeset will reject the change (which is highly unlikely as
1056                  * the affected systems, all but one, do not have unmappable
1057                  * space) or we will not be able to enable full powersaving
1058                  * techniques (also likely not to apply due to various limits
1059                  * FBC and the like impose on the size of the buffer, which
1060                  * presumably we violated anyway with this unmappable buffer).
1061                  * Anyway, it is presumably better to stumble onwards with
1062                  * something and try to run the system in a "less than optimal"
1063                  * mode that matches the user configuration.
1064                  */
1065                 ret = i915_vma_pin_fence(vma);
1066                 if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1067                         i915_vma_unpin(vma);
1068                         goto err_unpin;
1069                 }
1070                 ret = 0;
1071
1072                 if (vma->fence)
1073                         *out_flags |= PLANE_HAS_FENCE;
1074         }
1075
1076         i915_vma_get(vma);
1077
1078 err_unpin:
1079         i915_gem_object_unpin_pages(obj);
1080 err:
1081         if (ret == -EDEADLK) {
1082                 ret = i915_gem_ww_ctx_backoff(&ww);
1083                 if (!ret)
1084                         goto retry;
1085         }
1086         i915_gem_ww_ctx_fini(&ww);
1087         if (ret)
1088                 vma = ERR_PTR(ret);
1089
1090         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1091         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1092         return vma;
1093 }
1094
1095 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1096 {
1097         if (flags & PLANE_HAS_FENCE)
1098                 i915_vma_unpin_fence(vma);
1099         i915_vma_unpin(vma);
1100         i915_vma_put(vma);
1101 }
1102
1103 /*
1104  * Convert the x/y offsets into a linear offset.
1105  * Only valid with 0/180 degree rotation, which is fine since linear
1106  * offset is only used with linear buffers on pre-hsw and tiled buffers
1107  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1108  */
1109 u32 intel_fb_xy_to_linear(int x, int y,
1110                           const struct intel_plane_state *state,
1111                           int color_plane)
1112 {
1113         const struct drm_framebuffer *fb = state->hw.fb;
1114         unsigned int cpp = fb->format->cpp[color_plane];
1115         unsigned int pitch = state->view.color_plane[color_plane].stride;
1116
1117         return y * pitch + x * cpp;
1118 }
1119
1120 /*
1121  * Add the x/y offsets derived from fb->offsets[] to the user
1122  * specified plane src x/y offsets. The resulting x/y offsets
1123  * specify the start of scanout from the beginning of the gtt mapping.
1124  */
1125 void intel_add_fb_offsets(int *x, int *y,
1126                           const struct intel_plane_state *state,
1127                           int color_plane)
1128
1129 {
1130         *x += state->view.color_plane[color_plane].x;
1131         *y += state->view.color_plane[color_plane].y;
1132 }
1133
1134 /*
1135  * From the Sky Lake PRM:
1136  * "The Color Control Surface (CCS) contains the compression status of
1137  *  the cache-line pairs. The compression state of the cache-line pair
1138  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1139  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1140  *  cache-line-pairs. CCS is always Y tiled."
1141  *
1142  * Since cache line pairs refers to horizontally adjacent cache lines,
1143  * each cache line in the CCS corresponds to an area of 32x16 cache
1144  * lines on the main surface. Since each pixel is 4 bytes, this gives
1145  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1146  * main surface.
1147  */
1148 static const struct drm_format_info skl_ccs_formats[] = {
1149         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1150           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1151         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1152           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1153         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1154           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1155         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1156           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1157 };
1158
1159 /*
1160  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1161  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1162  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1163  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1164  * the main surface.
1165  */
1166 static const struct drm_format_info gen12_ccs_formats[] = {
1167         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1168           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1169           .hsub = 1, .vsub = 1, },
1170         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1171           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1172           .hsub = 1, .vsub = 1, },
1173         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1174           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1175           .hsub = 1, .vsub = 1, .has_alpha = true },
1176         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1177           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1178           .hsub = 1, .vsub = 1, .has_alpha = true },
1179         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1180           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1181           .hsub = 2, .vsub = 1, .is_yuv = true },
1182         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1183           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1184           .hsub = 2, .vsub = 1, .is_yuv = true },
1185         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1186           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1187           .hsub = 2, .vsub = 1, .is_yuv = true },
1188         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1189           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1190           .hsub = 2, .vsub = 1, .is_yuv = true },
1191         { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1192           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1193           .hsub = 1, .vsub = 1, .is_yuv = true },
1194         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1195           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1196           .hsub = 2, .vsub = 2, .is_yuv = true },
1197         { .format = DRM_FORMAT_P010, .num_planes = 4,
1198           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1199           .hsub = 2, .vsub = 2, .is_yuv = true },
1200         { .format = DRM_FORMAT_P012, .num_planes = 4,
1201           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1202           .hsub = 2, .vsub = 2, .is_yuv = true },
1203         { .format = DRM_FORMAT_P016, .num_planes = 4,
1204           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1205           .hsub = 2, .vsub = 2, .is_yuv = true },
1206 };
1207
1208 /*
1209  * Same as gen12_ccs_formats[] above, but with additional surface used
1210  * to pass Clear Color information in plane 2 with 64 bits of data.
1211  */
1212 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1213         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1214           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1215           .hsub = 1, .vsub = 1, },
1216         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1217           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1218           .hsub = 1, .vsub = 1, },
1219         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1220           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1221           .hsub = 1, .vsub = 1, .has_alpha = true },
1222         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1223           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1224           .hsub = 1, .vsub = 1, .has_alpha = true },
1225 };
1226
1227 static const struct drm_format_info *
1228 lookup_format_info(const struct drm_format_info formats[],
1229                    int num_formats, u32 format)
1230 {
1231         int i;
1232
1233         for (i = 0; i < num_formats; i++) {
1234                 if (formats[i].format == format)
1235                         return &formats[i];
1236         }
1237
1238         return NULL;
1239 }
1240
1241 static const struct drm_format_info *
1242 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1243 {
1244         switch (cmd->modifier[0]) {
1245         case I915_FORMAT_MOD_Y_TILED_CCS:
1246         case I915_FORMAT_MOD_Yf_TILED_CCS:
1247                 return lookup_format_info(skl_ccs_formats,
1248                                           ARRAY_SIZE(skl_ccs_formats),
1249                                           cmd->pixel_format);
1250         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1251         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1252                 return lookup_format_info(gen12_ccs_formats,
1253                                           ARRAY_SIZE(gen12_ccs_formats),
1254                                           cmd->pixel_format);
1255         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1256                 return lookup_format_info(gen12_ccs_cc_formats,
1257                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1258                                           cmd->pixel_format);
1259         default:
1260                 return NULL;
1261         }
1262 }
1263
1264 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1265                               u32 pixel_format, u64 modifier)
1266 {
1267         struct intel_crtc *crtc;
1268         struct intel_plane *plane;
1269
1270         if (!HAS_DISPLAY(dev_priv))
1271                 return 0;
1272
1273         /*
1274          * We assume the primary plane for pipe A has
1275          * the highest stride limits of them all,
1276          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1277          */
1278         crtc = intel_get_first_crtc(dev_priv);
1279         if (!crtc)
1280                 return 0;
1281
1282         plane = to_intel_plane(crtc->base.primary);
1283
1284         return plane->max_stride(plane, pixel_format, modifier,
1285                                  DRM_MODE_ROTATE_0);
1286 }
1287
1288 static struct i915_vma *
1289 initial_plane_vma(struct drm_i915_private *i915,
1290                   struct intel_initial_plane_config *plane_config)
1291 {
1292         struct drm_i915_gem_object *obj;
1293         struct i915_vma *vma;
1294         u32 base, size;
1295
1296         if (plane_config->size == 0)
1297                 return NULL;
1298
1299         base = round_down(plane_config->base,
1300                           I915_GTT_MIN_ALIGNMENT);
1301         size = round_up(plane_config->base + plane_config->size,
1302                         I915_GTT_MIN_ALIGNMENT);
1303         size -= base;
1304
1305         /*
1306          * If the FB is too big, just don't use it since fbdev is not very
1307          * important and we should probably use that space with FBC or other
1308          * features.
1309          */
1310         if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1311             size * 2 > i915->stolen_usable_size)
1312                 return NULL;
1313
1314         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1315         if (IS_ERR(obj))
1316                 return NULL;
1317
1318         /*
1319          * Mark it WT ahead of time to avoid changing the
1320          * cache_level during fbdev initialization. The
1321          * unbind there would get stuck waiting for rcu.
1322          */
1323         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1324                                             I915_CACHE_WT : I915_CACHE_NONE);
1325
1326         switch (plane_config->tiling) {
1327         case I915_TILING_NONE:
1328                 break;
1329         case I915_TILING_X:
1330         case I915_TILING_Y:
1331                 obj->tiling_and_stride =
1332                         plane_config->fb->base.pitches[0] |
1333                         plane_config->tiling;
1334                 break;
1335         default:
1336                 MISSING_CASE(plane_config->tiling);
1337                 goto err_obj;
1338         }
1339
1340         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1341         if (IS_ERR(vma))
1342                 goto err_obj;
1343
1344         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1345                 goto err_obj;
1346
1347         if (i915_gem_object_is_tiled(obj) &&
1348             !i915_vma_is_map_and_fenceable(vma))
1349                 goto err_obj;
1350
1351         return vma;
1352
1353 err_obj:
1354         i915_gem_object_put(obj);
1355         return NULL;
1356 }
1357
1358 static bool
1359 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1360                               struct intel_initial_plane_config *plane_config)
1361 {
1362         struct drm_device *dev = crtc->base.dev;
1363         struct drm_i915_private *dev_priv = to_i915(dev);
1364         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1365         struct drm_framebuffer *fb = &plane_config->fb->base;
1366         struct i915_vma *vma;
1367
1368         switch (fb->modifier) {
1369         case DRM_FORMAT_MOD_LINEAR:
1370         case I915_FORMAT_MOD_X_TILED:
1371         case I915_FORMAT_MOD_Y_TILED:
1372                 break;
1373         default:
1374                 drm_dbg(&dev_priv->drm,
1375                         "Unsupported modifier for initial FB: 0x%llx\n",
1376                         fb->modifier);
1377                 return false;
1378         }
1379
1380         vma = initial_plane_vma(dev_priv, plane_config);
1381         if (!vma)
1382                 return false;
1383
1384         mode_cmd.pixel_format = fb->format->format;
1385         mode_cmd.width = fb->width;
1386         mode_cmd.height = fb->height;
1387         mode_cmd.pitches[0] = fb->pitches[0];
1388         mode_cmd.modifier[0] = fb->modifier;
1389         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1390
1391         if (intel_framebuffer_init(to_intel_framebuffer(fb),
1392                                    vma->obj, &mode_cmd)) {
1393                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1394                 goto err_vma;
1395         }
1396
1397         plane_config->vma = vma;
1398         return true;
1399
1400 err_vma:
1401         i915_vma_put(vma);
1402         return false;
1403 }
1404
1405 static void
1406 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1407                         struct intel_plane_state *plane_state,
1408                         bool visible)
1409 {
1410         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1411
1412         plane_state->uapi.visible = visible;
1413
1414         if (visible)
1415                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1416         else
1417                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1418 }
1419
1420 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1421 {
1422         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1423         struct drm_plane *plane;
1424
1425         /*
1426          * Active_planes aliases if multiple "primary" or cursor planes
1427          * have been used on the same (or wrong) pipe. plane_mask uses
1428          * unique ids, hence we can use that to reconstruct active_planes.
1429          */
1430         crtc_state->enabled_planes = 0;
1431         crtc_state->active_planes = 0;
1432
1433         drm_for_each_plane_mask(plane, &dev_priv->drm,
1434                                 crtc_state->uapi.plane_mask) {
1435                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1436                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1437         }
1438 }
1439
1440 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1441                                          struct intel_plane *plane)
1442 {
1443         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1444         struct intel_crtc_state *crtc_state =
1445                 to_intel_crtc_state(crtc->base.state);
1446         struct intel_plane_state *plane_state =
1447                 to_intel_plane_state(plane->base.state);
1448
1449         drm_dbg_kms(&dev_priv->drm,
1450                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1451                     plane->base.base.id, plane->base.name,
1452                     crtc->base.base.id, crtc->base.name);
1453
1454         intel_set_plane_visible(crtc_state, plane_state, false);
1455         fixup_plane_bitmasks(crtc_state);
1456         crtc_state->data_rate[plane->id] = 0;
1457         crtc_state->min_cdclk[plane->id] = 0;
1458
1459         if (plane->id == PLANE_PRIMARY)
1460                 hsw_disable_ips(crtc_state);
1461
1462         /*
1463          * Vblank time updates from the shadow to live plane control register
1464          * are blocked if the memory self-refresh mode is active at that
1465          * moment. So to make sure the plane gets truly disabled, disable
1466          * first the self-refresh mode. The self-refresh enable bit in turn
1467          * will be checked/applied by the HW only at the next frame start
1468          * event which is after the vblank start event, so we need to have a
1469          * wait-for-vblank between disabling the plane and the pipe.
1470          */
1471         if (HAS_GMCH(dev_priv) &&
1472             intel_set_memory_cxsr(dev_priv, false))
1473                 intel_wait_for_vblank(dev_priv, crtc->pipe);
1474
1475         /*
1476          * Gen2 reports pipe underruns whenever all planes are disabled.
1477          * So disable underrun reporting before all the planes get disabled.
1478          */
1479         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1480                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1481
1482         intel_disable_plane(plane, crtc_state);
1483         intel_wait_for_vblank(dev_priv, crtc->pipe);
1484 }
1485
1486 static bool
1487 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1488                               const struct intel_initial_plane_config *plane_config,
1489                               struct drm_framebuffer **fb,
1490                               struct i915_vma **vma)
1491 {
1492         struct intel_crtc *crtc;
1493
1494         for_each_intel_crtc(&i915->drm, crtc) {
1495                 struct intel_crtc_state *crtc_state =
1496                         to_intel_crtc_state(crtc->base.state);
1497                 struct intel_plane *plane =
1498                         to_intel_plane(crtc->base.primary);
1499                 struct intel_plane_state *plane_state =
1500                         to_intel_plane_state(plane->base.state);
1501
1502                 if (!crtc_state->uapi.active)
1503                         continue;
1504
1505                 if (!plane_state->ggtt_vma)
1506                         continue;
1507
1508                 if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1509                         *fb = plane_state->hw.fb;
1510                         *vma = plane_state->ggtt_vma;
1511                         return true;
1512                 }
1513         }
1514
1515         return false;
1516 }
1517
1518 static void
1519 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1520                              struct intel_initial_plane_config *plane_config)
1521 {
1522         struct drm_device *dev = crtc->base.dev;
1523         struct drm_i915_private *dev_priv = to_i915(dev);
1524         struct intel_crtc_state *crtc_state =
1525                 to_intel_crtc_state(crtc->base.state);
1526         struct intel_plane *plane =
1527                 to_intel_plane(crtc->base.primary);
1528         struct intel_plane_state *plane_state =
1529                 to_intel_plane_state(plane->base.state);
1530         struct drm_framebuffer *fb;
1531         struct i915_vma *vma;
1532
1533         /*
1534          * TODO:
1535          *   Disable planes if get_initial_plane_config() failed.
1536          *   Make sure things work if the surface base is not page aligned.
1537          */
1538         if (!plane_config->fb)
1539                 return;
1540
1541         if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1542                 fb = &plane_config->fb->base;
1543                 vma = plane_config->vma;
1544                 goto valid_fb;
1545         }
1546
1547         /*
1548          * Failed to alloc the obj, check to see if we should share
1549          * an fb with another CRTC instead
1550          */
1551         if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1552                 goto valid_fb;
1553
1554         /*
1555          * We've failed to reconstruct the BIOS FB.  Current display state
1556          * indicates that the primary plane is visible, but has a NULL FB,
1557          * which will lead to problems later if we don't fix it up.  The
1558          * simplest solution is to just disable the primary plane now and
1559          * pretend the BIOS never had it enabled.
1560          */
1561         intel_plane_disable_noatomic(crtc, plane);
1562         if (crtc_state->bigjoiner) {
1563                 struct intel_crtc *slave =
1564                         crtc_state->bigjoiner_linked_crtc;
1565                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1566         }
1567
1568         return;
1569
1570 valid_fb:
1571         plane_state->uapi.rotation = plane_config->rotation;
1572         intel_fb_fill_view(to_intel_framebuffer(fb),
1573                            plane_state->uapi.rotation, &plane_state->view);
1574
1575         __i915_vma_pin(vma);
1576         plane_state->ggtt_vma = i915_vma_get(vma);
1577         if (intel_plane_uses_fence(plane_state) &&
1578             i915_vma_pin_fence(vma) == 0 && vma->fence)
1579                 plane_state->flags |= PLANE_HAS_FENCE;
1580
1581         plane_state->uapi.src_x = 0;
1582         plane_state->uapi.src_y = 0;
1583         plane_state->uapi.src_w = fb->width << 16;
1584         plane_state->uapi.src_h = fb->height << 16;
1585
1586         plane_state->uapi.crtc_x = 0;
1587         plane_state->uapi.crtc_y = 0;
1588         plane_state->uapi.crtc_w = fb->width;
1589         plane_state->uapi.crtc_h = fb->height;
1590
1591         if (plane_config->tiling)
1592                 dev_priv->preserve_bios_swizzle = true;
1593
1594         plane_state->uapi.fb = fb;
1595         drm_framebuffer_get(fb);
1596
1597         plane_state->uapi.crtc = &crtc->base;
1598         intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
1599
1600         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1601
1602         atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
1603 }
1604
1605 unsigned int
1606 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1607 {
1608         int x = 0, y = 0;
1609
1610         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1611                                           plane_state->view.color_plane[0].offset, 0);
1612
1613         return y;
1614 }
1615
1616 static int
1617 __intel_display_resume(struct drm_device *dev,
1618                        struct drm_atomic_state *state,
1619                        struct drm_modeset_acquire_ctx *ctx)
1620 {
1621         struct drm_crtc_state *crtc_state;
1622         struct drm_crtc *crtc;
1623         int i, ret;
1624
1625         intel_modeset_setup_hw_state(dev, ctx);
1626         intel_vga_redisable(to_i915(dev));
1627
1628         if (!state)
1629                 return 0;
1630
1631         /*
1632          * We've duplicated the state, pointers to the old state are invalid.
1633          *
1634          * Don't attempt to use the old state until we commit the duplicated state.
1635          */
1636         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1637                 /*
1638                  * Force recalculation even if we restore
1639                  * current state. With fast modeset this may not result
1640                  * in a modeset when the state is compatible.
1641                  */
1642                 crtc_state->mode_changed = true;
1643         }
1644
1645         /* ignore any reset values/BIOS leftovers in the WM registers */
1646         if (!HAS_GMCH(to_i915(dev)))
1647                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
1648
1649         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1650
1651         drm_WARN_ON(dev, ret == -EDEADLK);
1652         return ret;
1653 }
1654
1655 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1656 {
1657         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1658                 intel_has_gpu_reset(&dev_priv->gt));
1659 }
1660
1661 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1662 {
1663         struct drm_device *dev = &dev_priv->drm;
1664         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1665         struct drm_atomic_state *state;
1666         int ret;
1667
1668         if (!HAS_DISPLAY(dev_priv))
1669                 return;
1670
1671         /* reset doesn't touch the display */
1672         if (!dev_priv->params.force_reset_modeset_test &&
1673             !gpu_reset_clobbers_display(dev_priv))
1674                 return;
1675
1676         /* We have a modeset vs reset deadlock, defensively unbreak it. */
1677         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1678         smp_mb__after_atomic();
1679         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1680
1681         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1682                 drm_dbg_kms(&dev_priv->drm,
1683                             "Modeset potentially stuck, unbreaking through wedging\n");
1684                 intel_gt_set_wedged(&dev_priv->gt);
1685         }
1686
1687         /*
1688          * Need mode_config.mutex so that we don't
1689          * trample ongoing ->detect() and whatnot.
1690          */
1691         mutex_lock(&dev->mode_config.mutex);
1692         drm_modeset_acquire_init(ctx, 0);
1693         while (1) {
1694                 ret = drm_modeset_lock_all_ctx(dev, ctx);
1695                 if (ret != -EDEADLK)
1696                         break;
1697
1698                 drm_modeset_backoff(ctx);
1699         }
1700         /*
1701          * Disabling the crtcs gracefully seems nicer. Also the
1702          * g33 docs say we should at least disable all the planes.
1703          */
1704         state = drm_atomic_helper_duplicate_state(dev, ctx);
1705         if (IS_ERR(state)) {
1706                 ret = PTR_ERR(state);
1707                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1708                         ret);
1709                 return;
1710         }
1711
1712         ret = drm_atomic_helper_disable_all(dev, ctx);
1713         if (ret) {
1714                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1715                         ret);
1716                 drm_atomic_state_put(state);
1717                 return;
1718         }
1719
1720         dev_priv->modeset_restore_state = state;
1721         state->acquire_ctx = ctx;
1722 }
1723
1724 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1725 {
1726         struct drm_device *dev = &dev_priv->drm;
1727         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1728         struct drm_atomic_state *state;
1729         int ret;
1730
1731         if (!HAS_DISPLAY(dev_priv))
1732                 return;
1733
1734         /* reset doesn't touch the display */
1735         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1736                 return;
1737
1738         state = fetch_and_zero(&dev_priv->modeset_restore_state);
1739         if (!state)
1740                 goto unlock;
1741
1742         /* reset doesn't touch the display */
1743         if (!gpu_reset_clobbers_display(dev_priv)) {
1744                 /* for testing only restore the display */
1745                 ret = __intel_display_resume(dev, state, ctx);
1746                 if (ret)
1747                         drm_err(&dev_priv->drm,
1748                                 "Restoring old state failed with %i\n", ret);
1749         } else {
1750                 /*
1751                  * The display has been reset as well,
1752                  * so need a full re-initialization.
1753                  */
1754                 intel_pps_unlock_regs_wa(dev_priv);
1755                 intel_modeset_init_hw(dev_priv);
1756                 intel_init_clock_gating(dev_priv);
1757                 intel_hpd_init(dev_priv);
1758
1759                 ret = __intel_display_resume(dev, state, ctx);
1760                 if (ret)
1761                         drm_err(&dev_priv->drm,
1762                                 "Restoring old state failed with %i\n", ret);
1763
1764                 intel_hpd_poll_disable(dev_priv);
1765         }
1766
1767         drm_atomic_state_put(state);
1768 unlock:
1769         drm_modeset_drop_locks(ctx);
1770         drm_modeset_acquire_fini(ctx);
1771         mutex_unlock(&dev->mode_config.mutex);
1772
1773         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1774 }
1775
1776 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
1777 {
1778         if (crtc_state->pch_pfit.enabled &&
1779             (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
1780              crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
1781              crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
1782                 return false;
1783
1784         if (crtc_state->dsc.compression_enable)
1785                 return false;
1786
1787         if (crtc_state->has_psr2)
1788                 return false;
1789
1790         if (crtc_state->splitter.enable)
1791                 return false;
1792
1793         return true;
1794 }
1795
1796 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
1797 {
1798         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1800         enum pipe pipe = crtc->pipe;
1801         u32 tmp;
1802
1803         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1804
1805         /*
1806          * Display WA #1153: icl
1807          * enable hardware to bypass the alpha math
1808          * and rounding for per-pixel values 00 and 0xff
1809          */
1810         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1811         /*
1812          * Display WA # 1605353570: icl
1813          * Set the pixel rounding bit to 1 for allowing
1814          * passthrough of Frame buffer pixels unmodified
1815          * across pipe
1816          */
1817         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1818
1819         if (IS_DG2(dev_priv)) {
1820                 /*
1821                  * Underrun recovery must always be disabled on DG2.  However
1822                  * the chicken bit meaning is inverted compared to other
1823                  * platforms.
1824                  */
1825                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1826         } else if (DISPLAY_VER(dev_priv) >= 13) {
1827                 if (underrun_recovery_supported(crtc_state))
1828                         tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
1829                 else
1830                         tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1831         }
1832
1833         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1834 }
1835
1836 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1837 {
1838         struct drm_crtc *crtc;
1839         bool cleanup_done;
1840
1841         drm_for_each_crtc(crtc, &dev_priv->drm) {
1842                 struct drm_crtc_commit *commit;
1843                 spin_lock(&crtc->commit_lock);
1844                 commit = list_first_entry_or_null(&crtc->commit_list,
1845                                                   struct drm_crtc_commit, commit_entry);
1846                 cleanup_done = commit ?
1847                         try_wait_for_completion(&commit->cleanup_done) : true;
1848                 spin_unlock(&crtc->commit_lock);
1849
1850                 if (cleanup_done)
1851                         continue;
1852
1853                 drm_crtc_wait_one_vblank(crtc);
1854
1855                 return true;
1856         }
1857
1858         return false;
1859 }
1860
1861 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1862 {
1863         u32 temp;
1864
1865         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1866
1867         mutex_lock(&dev_priv->sb_lock);
1868
1869         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1870         temp |= SBI_SSCCTL_DISABLE;
1871         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1872
1873         mutex_unlock(&dev_priv->sb_lock);
1874 }
1875
1876 /* Program iCLKIP clock to the desired frequency */
1877 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1878 {
1879         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1880         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1881         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1882         u32 divsel, phaseinc, auxdiv, phasedir = 0;
1883         u32 temp;
1884
1885         lpt_disable_iclkip(dev_priv);
1886
1887         /* The iCLK virtual clock root frequency is in MHz,
1888          * but the adjusted_mode->crtc_clock in in KHz. To get the
1889          * divisors, it is necessary to divide one by another, so we
1890          * convert the virtual clock precision to KHz here for higher
1891          * precision.
1892          */
1893         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1894                 u32 iclk_virtual_root_freq = 172800 * 1000;
1895                 u32 iclk_pi_range = 64;
1896                 u32 desired_divisor;
1897
1898                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1899                                                     clock << auxdiv);
1900                 divsel = (desired_divisor / iclk_pi_range) - 2;
1901                 phaseinc = desired_divisor % iclk_pi_range;
1902
1903                 /*
1904                  * Near 20MHz is a corner case which is
1905                  * out of range for the 7-bit divisor
1906                  */
1907                 if (divsel <= 0x7f)
1908                         break;
1909         }
1910
1911         /* This should not happen with any sane values */
1912         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1913                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1914         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1915                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
1916
1917         drm_dbg_kms(&dev_priv->drm,
1918                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
1919                     clock, auxdiv, divsel, phasedir, phaseinc);
1920
1921         mutex_lock(&dev_priv->sb_lock);
1922
1923         /* Program SSCDIVINTPHASE6 */
1924         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
1925         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
1926         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
1927         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
1928         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
1929         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
1930         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
1931         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
1932
1933         /* Program SSCAUXDIV */
1934         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
1935         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
1936         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
1937         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
1938
1939         /* Enable modulator and associated divider */
1940         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1941         temp &= ~SBI_SSCCTL_DISABLE;
1942         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1943
1944         mutex_unlock(&dev_priv->sb_lock);
1945
1946         /* Wait for initialization time */
1947         udelay(24);
1948
1949         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
1950 }
1951
1952 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
1953 {
1954         u32 divsel, phaseinc, auxdiv;
1955         u32 iclk_virtual_root_freq = 172800 * 1000;
1956         u32 iclk_pi_range = 64;
1957         u32 desired_divisor;
1958         u32 temp;
1959
1960         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
1961                 return 0;
1962
1963         mutex_lock(&dev_priv->sb_lock);
1964
1965         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1966         if (temp & SBI_SSCCTL_DISABLE) {
1967                 mutex_unlock(&dev_priv->sb_lock);
1968                 return 0;
1969         }
1970
1971         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
1972         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
1973                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
1974         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
1975                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
1976
1977         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
1978         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
1979                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
1980
1981         mutex_unlock(&dev_priv->sb_lock);
1982
1983         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
1984
1985         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1986                                  desired_divisor << auxdiv);
1987 }
1988
1989 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
1990                                            enum pipe pch_transcoder)
1991 {
1992         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1993         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1994         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1995
1996         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
1997                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
1998         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
1999                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2000         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2001                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2002
2003         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2004                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2005         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2006                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2007         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2008                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2009         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2010                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2011 }
2012
2013 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2014 {
2015         u32 temp;
2016
2017         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2018         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2019                 return;
2020
2021         drm_WARN_ON(&dev_priv->drm,
2022                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2023                     FDI_RX_ENABLE);
2024         drm_WARN_ON(&dev_priv->drm,
2025                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2026                     FDI_RX_ENABLE);
2027
2028         temp &= ~FDI_BC_BIFURCATION_SELECT;
2029         if (enable)
2030                 temp |= FDI_BC_BIFURCATION_SELECT;
2031
2032         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2033                     enable ? "en" : "dis");
2034         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2035         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2036 }
2037
2038 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2039 {
2040         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2041         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2042
2043         switch (crtc->pipe) {
2044         case PIPE_A:
2045                 break;
2046         case PIPE_B:
2047                 if (crtc_state->fdi_lanes > 2)
2048                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
2049                 else
2050                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
2051
2052                 break;
2053         case PIPE_C:
2054                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2055
2056                 break;
2057         default:
2058                 BUG();
2059         }
2060 }
2061
2062 /*
2063  * Finds the encoder associated with the given CRTC. This can only be
2064  * used when we know that the CRTC isn't feeding multiple encoders!
2065  */
2066 struct intel_encoder *
2067 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2068                            const struct intel_crtc_state *crtc_state)
2069 {
2070         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2071         const struct drm_connector_state *connector_state;
2072         const struct drm_connector *connector;
2073         struct intel_encoder *encoder = NULL;
2074         int num_encoders = 0;
2075         int i;
2076
2077         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2078                 if (connector_state->crtc != &crtc->base)
2079                         continue;
2080
2081                 encoder = to_intel_encoder(connector_state->best_encoder);
2082                 num_encoders++;
2083         }
2084
2085         drm_WARN(encoder->base.dev, num_encoders != 1,
2086                  "%d encoders for pipe %c\n",
2087                  num_encoders, pipe_name(crtc->pipe));
2088
2089         return encoder;
2090 }
2091
2092 /*
2093  * Enable PCH resources required for PCH ports:
2094  *   - PCH PLLs
2095  *   - FDI training & RX/TX
2096  *   - update transcoder timings
2097  *   - DP transcoding bits
2098  *   - transcoder
2099  */
2100 static void ilk_pch_enable(const struct intel_atomic_state *state,
2101                            const struct intel_crtc_state *crtc_state)
2102 {
2103         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2104         struct drm_device *dev = crtc->base.dev;
2105         struct drm_i915_private *dev_priv = to_i915(dev);
2106         enum pipe pipe = crtc->pipe;
2107         u32 temp;
2108
2109         assert_pch_transcoder_disabled(dev_priv, pipe);
2110
2111         if (IS_IVYBRIDGE(dev_priv))
2112                 ivb_update_fdi_bc_bifurcation(crtc_state);
2113
2114         /* Write the TU size bits before fdi link training, so that error
2115          * detection works. */
2116         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2117                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2118
2119         /* For PCH output, training FDI link */
2120         dev_priv->display.fdi_link_train(crtc, crtc_state);
2121
2122         /* We need to program the right clock selection before writing the pixel
2123          * mutliplier into the DPLL. */
2124         if (HAS_PCH_CPT(dev_priv)) {
2125                 u32 sel;
2126
2127                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2128                 temp |= TRANS_DPLL_ENABLE(pipe);
2129                 sel = TRANS_DPLLB_SEL(pipe);
2130                 if (crtc_state->shared_dpll ==
2131                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2132                         temp |= sel;
2133                 else
2134                         temp &= ~sel;
2135                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2136         }
2137
2138         /* XXX: pch pll's can be enabled any time before we enable the PCH
2139          * transcoder, and we actually should do this to not upset any PCH
2140          * transcoder that already use the clock when we share it.
2141          *
2142          * Note that enable_shared_dpll tries to do the right thing, but
2143          * get_shared_dpll unconditionally resets the pll - we need that to have
2144          * the right LVDS enable sequence. */
2145         intel_enable_shared_dpll(crtc_state);
2146
2147         /* set transcoder timing, panel must allow it */
2148         assert_panel_unlocked(dev_priv, pipe);
2149         ilk_pch_transcoder_set_timings(crtc_state, pipe);
2150
2151         intel_fdi_normal_train(crtc);
2152
2153         /* For PCH DP, enable TRANS_DP_CTL */
2154         if (HAS_PCH_CPT(dev_priv) &&
2155             intel_crtc_has_dp_encoder(crtc_state)) {
2156                 const struct drm_display_mode *adjusted_mode =
2157                         &crtc_state->hw.adjusted_mode;
2158                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2159                 i915_reg_t reg = TRANS_DP_CTL(pipe);
2160                 enum port port;
2161
2162                 temp = intel_de_read(dev_priv, reg);
2163                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2164                           TRANS_DP_SYNC_MASK |
2165                           TRANS_DP_BPC_MASK);
2166                 temp |= TRANS_DP_OUTPUT_ENABLE;
2167                 temp |= bpc << 9; /* same format but at 11:9 */
2168
2169                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2170                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2171                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2172                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2173
2174                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2175                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2176                 temp |= TRANS_DP_PORT_SEL(port);
2177
2178                 intel_de_write(dev_priv, reg, temp);
2179         }
2180
2181         ilk_enable_pch_transcoder(crtc_state);
2182 }
2183
2184 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2185 {
2186         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2187         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2188         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2189
2190         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2191
2192         lpt_program_iclkip(crtc_state);
2193
2194         /* Set transcoder timing. */
2195         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2196
2197         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2198 }
2199
2200 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2201                                enum pipe pipe)
2202 {
2203         i915_reg_t dslreg = PIPEDSL(pipe);
2204         u32 temp;
2205
2206         temp = intel_de_read(dev_priv, dslreg);
2207         udelay(500);
2208         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2209                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2210                         drm_err(&dev_priv->drm,
2211                                 "mode set failed: pipe %c stuck\n",
2212                                 pipe_name(pipe));
2213         }
2214 }
2215
2216 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2217 {
2218         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2219         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2220         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2221         enum pipe pipe = crtc->pipe;
2222         int width = drm_rect_width(dst);
2223         int height = drm_rect_height(dst);
2224         int x = dst->x1;
2225         int y = dst->y1;
2226
2227         if (!crtc_state->pch_pfit.enabled)
2228                 return;
2229
2230         /* Force use of hard-coded filter coefficients
2231          * as some pre-programmed values are broken,
2232          * e.g. x201.
2233          */
2234         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2235                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2236                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2237         else
2238                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2239                                PF_FILTER_MED_3x3);
2240         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2241         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2242 }
2243
2244 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2245 {
2246         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2247         struct drm_device *dev = crtc->base.dev;
2248         struct drm_i915_private *dev_priv = to_i915(dev);
2249
2250         if (!crtc_state->ips_enabled)
2251                 return;
2252
2253         /*
2254          * We can only enable IPS after we enable a plane and wait for a vblank
2255          * This function is called from post_plane_update, which is run after
2256          * a vblank wait.
2257          */
2258         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2259
2260         if (IS_BROADWELL(dev_priv)) {
2261                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2262                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
2263                 /* Quoting Art Runyan: "its not safe to expect any particular
2264                  * value in IPS_CTL bit 31 after enabling IPS through the
2265                  * mailbox." Moreover, the mailbox may return a bogus state,
2266                  * so we need to just enable it and continue on.
2267                  */
2268         } else {
2269                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2270                 /* The bit only becomes 1 in the next vblank, so this wait here
2271                  * is essentially intel_wait_for_vblank. If we don't have this
2272                  * and don't wait for vblanks until the end of crtc_enable, then
2273                  * the HW state readout code will complain that the expected
2274                  * IPS_CTL value is not the one we read. */
2275                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2276                         drm_err(&dev_priv->drm,
2277                                 "Timed out waiting for IPS enable\n");
2278         }
2279 }
2280
2281 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2282 {
2283         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2284         struct drm_device *dev = crtc->base.dev;
2285         struct drm_i915_private *dev_priv = to_i915(dev);
2286
2287         if (!crtc_state->ips_enabled)
2288                 return;
2289
2290         if (IS_BROADWELL(dev_priv)) {
2291                 drm_WARN_ON(dev,
2292                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2293                 /*
2294                  * Wait for PCODE to finish disabling IPS. The BSpec specified
2295                  * 42ms timeout value leads to occasional timeouts so use 100ms
2296                  * instead.
2297                  */
2298                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2299                         drm_err(&dev_priv->drm,
2300                                 "Timed out waiting for IPS disable\n");
2301         } else {
2302                 intel_de_write(dev_priv, IPS_CTL, 0);
2303                 intel_de_posting_read(dev_priv, IPS_CTL);
2304         }
2305
2306         /* We need to wait for a vblank before we can disable the plane. */
2307         intel_wait_for_vblank(dev_priv, crtc->pipe);
2308 }
2309
2310 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2311 {
2312         if (crtc->overlay)
2313                 (void) intel_overlay_switch_off(crtc->overlay);
2314
2315         /* Let userspace switch the overlay on again. In most cases userspace
2316          * has to recompute where to put it anyway.
2317          */
2318 }
2319
2320 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2321                                        const struct intel_crtc_state *new_crtc_state)
2322 {
2323         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2324         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2325
2326         if (!old_crtc_state->ips_enabled)
2327                 return false;
2328
2329         if (intel_crtc_needs_modeset(new_crtc_state))
2330                 return true;
2331
2332         /*
2333          * Workaround : Do not read or write the pipe palette/gamma data while
2334          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2335          *
2336          * Disable IPS before we program the LUT.
2337          */
2338         if (IS_HASWELL(dev_priv) &&
2339             (new_crtc_state->uapi.color_mgmt_changed ||
2340              new_crtc_state->update_pipe) &&
2341             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2342                 return true;
2343
2344         return !new_crtc_state->ips_enabled;
2345 }
2346
2347 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2348                                        const struct intel_crtc_state *new_crtc_state)
2349 {
2350         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2351         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2352
2353         if (!new_crtc_state->ips_enabled)
2354                 return false;
2355
2356         if (intel_crtc_needs_modeset(new_crtc_state))
2357                 return true;
2358
2359         /*
2360          * Workaround : Do not read or write the pipe palette/gamma data while
2361          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2362          *
2363          * Re-enable IPS after the LUT has been programmed.
2364          */
2365         if (IS_HASWELL(dev_priv) &&
2366             (new_crtc_state->uapi.color_mgmt_changed ||
2367              new_crtc_state->update_pipe) &&
2368             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2369                 return true;
2370
2371         /*
2372          * We can't read out IPS on broadwell, assume the worst and
2373          * forcibly enable IPS on the first fastset.
2374          */
2375         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2376                 return true;
2377
2378         return !old_crtc_state->ips_enabled;
2379 }
2380
2381 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2382 {
2383         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2384
2385         if (!crtc_state->nv12_planes)
2386                 return false;
2387
2388         /* WA Display #0827: Gen9:all */
2389         if (DISPLAY_VER(dev_priv) == 9)
2390                 return true;
2391
2392         return false;
2393 }
2394
2395 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2396 {
2397         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2398
2399         /* Wa_2006604312:icl,ehl */
2400         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2401                 return true;
2402
2403         return false;
2404 }
2405
2406 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2407                             const struct intel_crtc_state *new_crtc_state)
2408 {
2409         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2410                 new_crtc_state->active_planes;
2411 }
2412
2413 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2414                              const struct intel_crtc_state *new_crtc_state)
2415 {
2416         return old_crtc_state->active_planes &&
2417                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2418 }
2419
2420 static void intel_post_plane_update(struct intel_atomic_state *state,
2421                                     struct intel_crtc *crtc)
2422 {
2423         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2424         const struct intel_crtc_state *old_crtc_state =
2425                 intel_atomic_get_old_crtc_state(state, crtc);
2426         const struct intel_crtc_state *new_crtc_state =
2427                 intel_atomic_get_new_crtc_state(state, crtc);
2428         enum pipe pipe = crtc->pipe;
2429
2430         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2431
2432         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2433                 intel_update_watermarks(crtc);
2434
2435         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2436                 hsw_enable_ips(new_crtc_state);
2437
2438         intel_fbc_post_update(state, crtc);
2439
2440         if (needs_nv12_wa(old_crtc_state) &&
2441             !needs_nv12_wa(new_crtc_state))
2442                 skl_wa_827(dev_priv, pipe, false);
2443
2444         if (needs_scalerclk_wa(old_crtc_state) &&
2445             !needs_scalerclk_wa(new_crtc_state))
2446                 icl_wa_scalerclkgating(dev_priv, pipe, false);
2447 }
2448
2449 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2450                                         struct intel_crtc *crtc)
2451 {
2452         const struct intel_crtc_state *crtc_state =
2453                 intel_atomic_get_new_crtc_state(state, crtc);
2454         u8 update_planes = crtc_state->update_planes;
2455         const struct intel_plane_state *plane_state;
2456         struct intel_plane *plane;
2457         int i;
2458
2459         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2460                 if (plane->enable_flip_done &&
2461                     plane->pipe == crtc->pipe &&
2462                     update_planes & BIT(plane->id))
2463                         plane->enable_flip_done(plane);
2464         }
2465 }
2466
2467 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2468                                          struct intel_crtc *crtc)
2469 {
2470         const struct intel_crtc_state *crtc_state =
2471                 intel_atomic_get_new_crtc_state(state, crtc);
2472         u8 update_planes = crtc_state->update_planes;
2473         const struct intel_plane_state *plane_state;
2474         struct intel_plane *plane;
2475         int i;
2476
2477         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2478                 if (plane->disable_flip_done &&
2479                     plane->pipe == crtc->pipe &&
2480                     update_planes & BIT(plane->id))
2481                         plane->disable_flip_done(plane);
2482         }
2483 }
2484
2485 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2486                                              struct intel_crtc *crtc)
2487 {
2488         struct drm_i915_private *i915 = to_i915(state->base.dev);
2489         const struct intel_crtc_state *old_crtc_state =
2490                 intel_atomic_get_old_crtc_state(state, crtc);
2491         const struct intel_crtc_state *new_crtc_state =
2492                 intel_atomic_get_new_crtc_state(state, crtc);
2493         u8 update_planes = new_crtc_state->update_planes;
2494         const struct intel_plane_state *old_plane_state;
2495         struct intel_plane *plane;
2496         bool need_vbl_wait = false;
2497         int i;
2498
2499         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2500                 if (plane->need_async_flip_disable_wa &&
2501                     plane->pipe == crtc->pipe &&
2502                     update_planes & BIT(plane->id)) {
2503                         /*
2504                          * Apart from the async flip bit we want to
2505                          * preserve the old state for the plane.
2506                          */
2507                         plane->async_flip(plane, old_crtc_state,
2508                                           old_plane_state, false);
2509                         need_vbl_wait = true;
2510                 }
2511         }
2512
2513         if (need_vbl_wait)
2514                 intel_wait_for_vblank(i915, crtc->pipe);
2515 }
2516
2517 static void intel_pre_plane_update(struct intel_atomic_state *state,
2518                                    struct intel_crtc *crtc)
2519 {
2520         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2521         const struct intel_crtc_state *old_crtc_state =
2522                 intel_atomic_get_old_crtc_state(state, crtc);
2523         const struct intel_crtc_state *new_crtc_state =
2524                 intel_atomic_get_new_crtc_state(state, crtc);
2525         enum pipe pipe = crtc->pipe;
2526
2527         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2528                 hsw_disable_ips(old_crtc_state);
2529
2530         if (intel_fbc_pre_update(state, crtc))
2531                 intel_wait_for_vblank(dev_priv, pipe);
2532
2533         /* Display WA 827 */
2534         if (!needs_nv12_wa(old_crtc_state) &&
2535             needs_nv12_wa(new_crtc_state))
2536                 skl_wa_827(dev_priv, pipe, true);
2537
2538         /* Wa_2006604312:icl,ehl */
2539         if (!needs_scalerclk_wa(old_crtc_state) &&
2540             needs_scalerclk_wa(new_crtc_state))
2541                 icl_wa_scalerclkgating(dev_priv, pipe, true);
2542
2543         /*
2544          * Vblank time updates from the shadow to live plane control register
2545          * are blocked if the memory self-refresh mode is active at that
2546          * moment. So to make sure the plane gets truly disabled, disable
2547          * first the self-refresh mode. The self-refresh enable bit in turn
2548          * will be checked/applied by the HW only at the next frame start
2549          * event which is after the vblank start event, so we need to have a
2550          * wait-for-vblank between disabling the plane and the pipe.
2551          */
2552         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2553             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2554                 intel_wait_for_vblank(dev_priv, pipe);
2555
2556         /*
2557          * IVB workaround: must disable low power watermarks for at least
2558          * one frame before enabling scaling.  LP watermarks can be re-enabled
2559          * when scaling is disabled.
2560          *
2561          * WaCxSRDisabledForSpriteScaling:ivb
2562          */
2563         if (old_crtc_state->hw.active &&
2564             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2565                 intel_wait_for_vblank(dev_priv, pipe);
2566
2567         /*
2568          * If we're doing a modeset we don't need to do any
2569          * pre-vblank watermark programming here.
2570          */
2571         if (!intel_crtc_needs_modeset(new_crtc_state)) {
2572                 /*
2573                  * For platforms that support atomic watermarks, program the
2574                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2575                  * will be the intermediate values that are safe for both pre- and
2576                  * post- vblank; when vblank happens, the 'active' values will be set
2577                  * to the final 'target' values and we'll do this again to get the
2578                  * optimal watermarks.  For gen9+ platforms, the values we program here
2579                  * will be the final target values which will get automatically latched
2580                  * at vblank time; no further programming will be necessary.
2581                  *
2582                  * If a platform hasn't been transitioned to atomic watermarks yet,
2583                  * we'll continue to update watermarks the old way, if flags tell
2584                  * us to.
2585                  */
2586                 if (dev_priv->display.initial_watermarks)
2587                         dev_priv->display.initial_watermarks(state, crtc);
2588                 else if (new_crtc_state->update_wm_pre)
2589                         intel_update_watermarks(crtc);
2590         }
2591
2592         /*
2593          * Gen2 reports pipe underruns whenever all planes are disabled.
2594          * So disable underrun reporting before all the planes get disabled.
2595          *
2596          * We do this after .initial_watermarks() so that we have a
2597          * chance of catching underruns with the intermediate watermarks
2598          * vs. the old plane configuration.
2599          */
2600         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2601                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2602
2603         /*
2604          * WA for platforms where async address update enable bit
2605          * is double buffered and only latched at start of vblank.
2606          */
2607         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2608                 intel_crtc_async_flip_disable_wa(state, crtc);
2609 }
2610
2611 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2612                                       struct intel_crtc *crtc)
2613 {
2614         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2615         const struct intel_crtc_state *new_crtc_state =
2616                 intel_atomic_get_new_crtc_state(state, crtc);
2617         unsigned int update_mask = new_crtc_state->update_planes;
2618         const struct intel_plane_state *old_plane_state;
2619         struct intel_plane *plane;
2620         unsigned fb_bits = 0;
2621         int i;
2622
2623         intel_crtc_dpms_overlay_disable(crtc);
2624
2625         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2626                 if (crtc->pipe != plane->pipe ||
2627                     !(update_mask & BIT(plane->id)))
2628                         continue;
2629
2630                 intel_disable_plane(plane, new_crtc_state);
2631
2632                 if (old_plane_state->uapi.visible)
2633                         fb_bits |= plane->frontbuffer_bit;
2634         }
2635
2636         intel_frontbuffer_flip(dev_priv, fb_bits);
2637 }
2638
2639 /*
2640  * intel_connector_primary_encoder - get the primary encoder for a connector
2641  * @connector: connector for which to return the encoder
2642  *
2643  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2644  * all connectors to their encoder, except for DP-MST connectors which have
2645  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2646  * pointed to by as many DP-MST connectors as there are pipes.
2647  */
2648 static struct intel_encoder *
2649 intel_connector_primary_encoder(struct intel_connector *connector)
2650 {
2651         struct intel_encoder *encoder;
2652
2653         if (connector->mst_port)
2654                 return &dp_to_dig_port(connector->mst_port)->base;
2655
2656         encoder = intel_attached_encoder(connector);
2657         drm_WARN_ON(connector->base.dev, !encoder);
2658
2659         return encoder;
2660 }
2661
2662 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2663 {
2664         struct drm_connector_state *new_conn_state;
2665         struct drm_connector *connector;
2666         int i;
2667
2668         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2669                                         i) {
2670                 struct intel_connector *intel_connector;
2671                 struct intel_encoder *encoder;
2672                 struct intel_crtc *crtc;
2673
2674                 if (!intel_connector_needs_modeset(state, connector))
2675                         continue;
2676
2677                 intel_connector = to_intel_connector(connector);
2678                 encoder = intel_connector_primary_encoder(intel_connector);
2679                 if (!encoder->update_prepare)
2680                         continue;
2681
2682                 crtc = new_conn_state->crtc ?
2683                         to_intel_crtc(new_conn_state->crtc) : NULL;
2684                 encoder->update_prepare(state, encoder, crtc);
2685         }
2686 }
2687
2688 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2689 {
2690         struct drm_connector_state *new_conn_state;
2691         struct drm_connector *connector;
2692         int i;
2693
2694         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2695                                         i) {
2696                 struct intel_connector *intel_connector;
2697                 struct intel_encoder *encoder;
2698                 struct intel_crtc *crtc;
2699
2700                 if (!intel_connector_needs_modeset(state, connector))
2701                         continue;
2702
2703                 intel_connector = to_intel_connector(connector);
2704                 encoder = intel_connector_primary_encoder(intel_connector);
2705                 if (!encoder->update_complete)
2706                         continue;
2707
2708                 crtc = new_conn_state->crtc ?
2709                         to_intel_crtc(new_conn_state->crtc) : NULL;
2710                 encoder->update_complete(state, encoder, crtc);
2711         }
2712 }
2713
2714 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2715                                           struct intel_crtc *crtc)
2716 {
2717         const struct intel_crtc_state *crtc_state =
2718                 intel_atomic_get_new_crtc_state(state, crtc);
2719         const struct drm_connector_state *conn_state;
2720         struct drm_connector *conn;
2721         int i;
2722
2723         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2724                 struct intel_encoder *encoder =
2725                         to_intel_encoder(conn_state->best_encoder);
2726
2727                 if (conn_state->crtc != &crtc->base)
2728                         continue;
2729
2730                 if (encoder->pre_pll_enable)
2731                         encoder->pre_pll_enable(state, encoder,
2732                                                 crtc_state, conn_state);
2733         }
2734 }
2735
2736 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2737                                       struct intel_crtc *crtc)
2738 {
2739         const struct intel_crtc_state *crtc_state =
2740                 intel_atomic_get_new_crtc_state(state, crtc);
2741         const struct drm_connector_state *conn_state;
2742         struct drm_connector *conn;
2743         int i;
2744
2745         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2746                 struct intel_encoder *encoder =
2747                         to_intel_encoder(conn_state->best_encoder);
2748
2749                 if (conn_state->crtc != &crtc->base)
2750                         continue;
2751
2752                 if (encoder->pre_enable)
2753                         encoder->pre_enable(state, encoder,
2754                                             crtc_state, conn_state);
2755         }
2756 }
2757
2758 static void intel_encoders_enable(struct intel_atomic_state *state,
2759                                   struct intel_crtc *crtc)
2760 {
2761         const struct intel_crtc_state *crtc_state =
2762                 intel_atomic_get_new_crtc_state(state, crtc);
2763         const struct drm_connector_state *conn_state;
2764         struct drm_connector *conn;
2765         int i;
2766
2767         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2768                 struct intel_encoder *encoder =
2769                         to_intel_encoder(conn_state->best_encoder);
2770
2771                 if (conn_state->crtc != &crtc->base)
2772                         continue;
2773
2774                 if (encoder->enable)
2775                         encoder->enable(state, encoder,
2776                                         crtc_state, conn_state);
2777                 intel_opregion_notify_encoder(encoder, true);
2778         }
2779 }
2780
2781 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
2782                                        struct intel_crtc *crtc)
2783 {
2784         const struct intel_crtc_state *old_crtc_state =
2785                 intel_atomic_get_old_crtc_state(state, crtc);
2786         const struct drm_connector_state *old_conn_state;
2787         struct drm_connector *conn;
2788         int i;
2789
2790         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2791                 struct intel_encoder *encoder =
2792                         to_intel_encoder(old_conn_state->best_encoder);
2793
2794                 if (old_conn_state->crtc != &crtc->base)
2795                         continue;
2796
2797                 if (encoder->pre_disable)
2798                         encoder->pre_disable(state, encoder, old_crtc_state,
2799                                              old_conn_state);
2800         }
2801 }
2802
2803 static void intel_encoders_disable(struct intel_atomic_state *state,
2804                                    struct intel_crtc *crtc)
2805 {
2806         const struct intel_crtc_state *old_crtc_state =
2807                 intel_atomic_get_old_crtc_state(state, crtc);
2808         const struct drm_connector_state *old_conn_state;
2809         struct drm_connector *conn;
2810         int i;
2811
2812         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2813                 struct intel_encoder *encoder =
2814                         to_intel_encoder(old_conn_state->best_encoder);
2815
2816                 if (old_conn_state->crtc != &crtc->base)
2817                         continue;
2818
2819                 intel_opregion_notify_encoder(encoder, false);
2820                 if (encoder->disable)
2821                         encoder->disable(state, encoder,
2822                                          old_crtc_state, old_conn_state);
2823         }
2824 }
2825
2826 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2827                                         struct intel_crtc *crtc)
2828 {
2829         const struct intel_crtc_state *old_crtc_state =
2830                 intel_atomic_get_old_crtc_state(state, crtc);
2831         const struct drm_connector_state *old_conn_state;
2832         struct drm_connector *conn;
2833         int i;
2834
2835         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2836                 struct intel_encoder *encoder =
2837                         to_intel_encoder(old_conn_state->best_encoder);
2838
2839                 if (old_conn_state->crtc != &crtc->base)
2840                         continue;
2841
2842                 if (encoder->post_disable)
2843                         encoder->post_disable(state, encoder,
2844                                               old_crtc_state, old_conn_state);
2845         }
2846 }
2847
2848 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2849                                             struct intel_crtc *crtc)
2850 {
2851         const struct intel_crtc_state *old_crtc_state =
2852                 intel_atomic_get_old_crtc_state(state, crtc);
2853         const struct drm_connector_state *old_conn_state;
2854         struct drm_connector *conn;
2855         int i;
2856
2857         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2858                 struct intel_encoder *encoder =
2859                         to_intel_encoder(old_conn_state->best_encoder);
2860
2861                 if (old_conn_state->crtc != &crtc->base)
2862                         continue;
2863
2864                 if (encoder->post_pll_disable)
2865                         encoder->post_pll_disable(state, encoder,
2866                                                   old_crtc_state, old_conn_state);
2867         }
2868 }
2869
2870 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2871                                        struct intel_crtc *crtc)
2872 {
2873         const struct intel_crtc_state *crtc_state =
2874                 intel_atomic_get_new_crtc_state(state, crtc);
2875         const struct drm_connector_state *conn_state;
2876         struct drm_connector *conn;
2877         int i;
2878
2879         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2880                 struct intel_encoder *encoder =
2881                         to_intel_encoder(conn_state->best_encoder);
2882
2883                 if (conn_state->crtc != &crtc->base)
2884                         continue;
2885
2886                 if (encoder->update_pipe)
2887                         encoder->update_pipe(state, encoder,
2888                                              crtc_state, conn_state);
2889         }
2890 }
2891
2892 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2893 {
2894         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2895         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2896
2897         plane->disable_plane(plane, crtc_state);
2898 }
2899
2900 static void ilk_crtc_enable(struct intel_atomic_state *state,
2901                             struct intel_crtc *crtc)
2902 {
2903         const struct intel_crtc_state *new_crtc_state =
2904                 intel_atomic_get_new_crtc_state(state, crtc);
2905         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2906         enum pipe pipe = crtc->pipe;
2907
2908         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2909                 return;
2910
2911         /*
2912          * Sometimes spurious CPU pipe underruns happen during FDI
2913          * training, at least with VGA+HDMI cloning. Suppress them.
2914          *
2915          * On ILK we get an occasional spurious CPU pipe underruns
2916          * between eDP port A enable and vdd enable. Also PCH port
2917          * enable seems to result in the occasional CPU pipe underrun.
2918          *
2919          * Spurious PCH underruns also occur during PCH enabling.
2920          */
2921         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2922         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2923
2924         if (intel_crtc_has_dp_encoder(new_crtc_state))
2925                 intel_dp_set_m_n(new_crtc_state, M1_N1);
2926
2927         intel_set_transcoder_timings(new_crtc_state);
2928         intel_set_pipe_src_size(new_crtc_state);
2929
2930         if (new_crtc_state->has_pch_encoder)
2931                 intel_cpu_transcoder_set_m_n(new_crtc_state,
2932                                              &new_crtc_state->fdi_m_n, NULL);
2933
2934         ilk_set_pipeconf(new_crtc_state);
2935
2936         crtc->active = true;
2937
2938         intel_encoders_pre_enable(state, crtc);
2939
2940         if (new_crtc_state->has_pch_encoder) {
2941                 /* Note: FDI PLL enabling _must_ be done before we enable the
2942                  * cpu pipes, hence this is separate from all the other fdi/pch
2943                  * enabling. */
2944                 ilk_fdi_pll_enable(new_crtc_state);
2945         } else {
2946                 assert_fdi_tx_disabled(dev_priv, pipe);
2947                 assert_fdi_rx_disabled(dev_priv, pipe);
2948         }
2949
2950         ilk_pfit_enable(new_crtc_state);
2951
2952         /*
2953          * On ILK+ LUT must be loaded before the pipe is running but with
2954          * clocks enabled
2955          */
2956         intel_color_load_luts(new_crtc_state);
2957         intel_color_commit(new_crtc_state);
2958         /* update DSPCNTR to configure gamma for pipe bottom color */
2959         intel_disable_primary_plane(new_crtc_state);
2960
2961         if (dev_priv->display.initial_watermarks)
2962                 dev_priv->display.initial_watermarks(state, crtc);
2963         intel_enable_pipe(new_crtc_state);
2964
2965         if (new_crtc_state->has_pch_encoder)
2966                 ilk_pch_enable(state, new_crtc_state);
2967
2968         intel_crtc_vblank_on(new_crtc_state);
2969
2970         intel_encoders_enable(state, crtc);
2971
2972         if (HAS_PCH_CPT(dev_priv))
2973                 cpt_verify_modeset(dev_priv, pipe);
2974
2975         /*
2976          * Must wait for vblank to avoid spurious PCH FIFO underruns.
2977          * And a second vblank wait is needed at least on ILK with
2978          * some interlaced HDMI modes. Let's do the double wait always
2979          * in case there are more corner cases we don't know about.
2980          */
2981         if (new_crtc_state->has_pch_encoder) {
2982                 intel_wait_for_vblank(dev_priv, pipe);
2983                 intel_wait_for_vblank(dev_priv, pipe);
2984         }
2985         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2986         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2987 }
2988
2989 /* IPS only exists on ULT machines and is tied to pipe A. */
2990 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
2991 {
2992         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
2993 }
2994
2995 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
2996                                             enum pipe pipe, bool apply)
2997 {
2998         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
2999         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3000
3001         if (apply)
3002                 val |= mask;
3003         else
3004                 val &= ~mask;
3005
3006         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3007 }
3008
3009 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3010 {
3011         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3012         enum pipe pipe = crtc->pipe;
3013         u32 val;
3014
3015         /* Wa_22010947358:adl-p */
3016         if (IS_ALDERLAKE_P(dev_priv))
3017                 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3018         else
3019                 val = MBUS_DBOX_A_CREDIT(2);
3020
3021         if (DISPLAY_VER(dev_priv) >= 12) {
3022                 val |= MBUS_DBOX_BW_CREDIT(2);
3023                 val |= MBUS_DBOX_B_CREDIT(12);
3024         } else {
3025                 val |= MBUS_DBOX_BW_CREDIT(1);
3026                 val |= MBUS_DBOX_B_CREDIT(8);
3027         }
3028
3029         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3030 }
3031
3032 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3033 {
3034         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3035         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3036
3037         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3038                        HSW_LINETIME(crtc_state->linetime) |
3039                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
3040 }
3041
3042 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3043 {
3044         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3045         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3046         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3047         u32 val;
3048
3049         val = intel_de_read(dev_priv, reg);
3050         val &= ~HSW_FRAME_START_DELAY_MASK;
3051         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3052         intel_de_write(dev_priv, reg, val);
3053 }
3054
3055 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3056                                          const struct intel_crtc_state *crtc_state)
3057 {
3058         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3059         struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3060         struct intel_crtc_state *master_crtc_state;
3061         struct drm_connector_state *conn_state;
3062         struct drm_connector *conn;
3063         struct intel_encoder *encoder = NULL;
3064         int i;
3065
3066         if (crtc_state->bigjoiner_slave)
3067                 master = crtc_state->bigjoiner_linked_crtc;
3068
3069         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3070
3071         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3072                 if (conn_state->crtc != &master->base)
3073                         continue;
3074
3075                 encoder = to_intel_encoder(conn_state->best_encoder);
3076                 break;
3077         }
3078
3079         if (!crtc_state->bigjoiner_slave) {
3080                 /* need to enable VDSC, which we skipped in pre-enable */
3081                 intel_dsc_enable(encoder, crtc_state);
3082         } else {
3083                 /*
3084                  * Enable sequence steps 1-7 on bigjoiner master
3085                  */
3086                 intel_encoders_pre_pll_enable(state, master);
3087                 if (master_crtc_state->shared_dpll)
3088                         intel_enable_shared_dpll(master_crtc_state);
3089                 intel_encoders_pre_enable(state, master);
3090
3091                 /* and DSC on slave */
3092                 intel_dsc_enable(NULL, crtc_state);
3093         }
3094
3095         if (DISPLAY_VER(dev_priv) >= 13)
3096                 intel_uncompressed_joiner_enable(crtc_state);
3097 }
3098
3099 static void hsw_crtc_enable(struct intel_atomic_state *state,
3100                             struct intel_crtc *crtc)
3101 {
3102         const struct intel_crtc_state *new_crtc_state =
3103                 intel_atomic_get_new_crtc_state(state, crtc);
3104         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3105         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3106         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3107         bool psl_clkgate_wa;
3108
3109         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3110                 return;
3111
3112         if (!new_crtc_state->bigjoiner) {
3113                 intel_encoders_pre_pll_enable(state, crtc);
3114
3115                 if (new_crtc_state->shared_dpll)
3116                         intel_enable_shared_dpll(new_crtc_state);
3117
3118                 intel_encoders_pre_enable(state, crtc);
3119         } else {
3120                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3121         }
3122
3123         intel_set_pipe_src_size(new_crtc_state);
3124         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3125                 bdw_set_pipemisc(new_crtc_state);
3126
3127         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3128                 intel_set_transcoder_timings(new_crtc_state);
3129
3130                 if (cpu_transcoder != TRANSCODER_EDP)
3131                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3132                                        new_crtc_state->pixel_multiplier - 1);
3133
3134                 if (new_crtc_state->has_pch_encoder)
3135                         intel_cpu_transcoder_set_m_n(new_crtc_state,
3136                                                      &new_crtc_state->fdi_m_n, NULL);
3137
3138                 hsw_set_frame_start_delay(new_crtc_state);
3139         }
3140
3141         if (!transcoder_is_dsi(cpu_transcoder))
3142                 hsw_set_pipeconf(new_crtc_state);
3143
3144         crtc->active = true;
3145
3146         /* Display WA #1180: WaDisableScalarClockGating: glk */
3147         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3148                 new_crtc_state->pch_pfit.enabled;
3149         if (psl_clkgate_wa)
3150                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3151
3152         if (DISPLAY_VER(dev_priv) >= 9)
3153                 skl_pfit_enable(new_crtc_state);
3154         else
3155                 ilk_pfit_enable(new_crtc_state);
3156
3157         /*
3158          * On ILK+ LUT must be loaded before the pipe is running but with
3159          * clocks enabled
3160          */
3161         intel_color_load_luts(new_crtc_state);
3162         intel_color_commit(new_crtc_state);
3163         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
3164         if (DISPLAY_VER(dev_priv) < 9)
3165                 intel_disable_primary_plane(new_crtc_state);
3166
3167         hsw_set_linetime_wm(new_crtc_state);
3168
3169         if (DISPLAY_VER(dev_priv) >= 11)
3170                 icl_set_pipe_chicken(new_crtc_state);
3171
3172         if (dev_priv->display.initial_watermarks)
3173                 dev_priv->display.initial_watermarks(state, crtc);
3174
3175         if (DISPLAY_VER(dev_priv) >= 11) {
3176                 const struct intel_dbuf_state *dbuf_state =
3177                                 intel_atomic_get_new_dbuf_state(state);
3178
3179                 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3180         }
3181
3182         if (new_crtc_state->bigjoiner_slave)
3183                 intel_crtc_vblank_on(new_crtc_state);
3184
3185         intel_encoders_enable(state, crtc);
3186
3187         if (psl_clkgate_wa) {
3188                 intel_wait_for_vblank(dev_priv, pipe);
3189                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3190         }
3191
3192         /* If we change the relative order between pipe/planes enabling, we need
3193          * to change the workaround. */
3194         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3195         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3196                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3197                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3198         }
3199 }
3200
3201 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3202 {
3203         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3204         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3205         enum pipe pipe = crtc->pipe;
3206
3207         /* To avoid upsetting the power well on haswell only disable the pfit if
3208          * it's in use. The hw state code will make sure we get this right. */
3209         if (!old_crtc_state->pch_pfit.enabled)
3210                 return;
3211
3212         intel_de_write(dev_priv, PF_CTL(pipe), 0);
3213         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3214         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3215 }
3216
3217 static void ilk_crtc_disable(struct intel_atomic_state *state,
3218                              struct intel_crtc *crtc)
3219 {
3220         const struct intel_crtc_state *old_crtc_state =
3221                 intel_atomic_get_old_crtc_state(state, crtc);
3222         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3223         enum pipe pipe = crtc->pipe;
3224
3225         /*
3226          * Sometimes spurious CPU pipe underruns happen when the
3227          * pipe is already disabled, but FDI RX/TX is still enabled.
3228          * Happens at least with VGA+HDMI cloning. Suppress them.
3229          */
3230         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3231         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3232
3233         intel_encoders_disable(state, crtc);
3234
3235         intel_crtc_vblank_off(old_crtc_state);
3236
3237         intel_disable_pipe(old_crtc_state);
3238
3239         ilk_pfit_disable(old_crtc_state);
3240
3241         if (old_crtc_state->has_pch_encoder)
3242                 ilk_fdi_disable(crtc);
3243
3244         intel_encoders_post_disable(state, crtc);
3245
3246         if (old_crtc_state->has_pch_encoder) {
3247                 ilk_disable_pch_transcoder(dev_priv, pipe);
3248
3249                 if (HAS_PCH_CPT(dev_priv)) {
3250                         i915_reg_t reg;
3251                         u32 temp;
3252
3253                         /* disable TRANS_DP_CTL */
3254                         reg = TRANS_DP_CTL(pipe);
3255                         temp = intel_de_read(dev_priv, reg);
3256                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3257                                   TRANS_DP_PORT_SEL_MASK);
3258                         temp |= TRANS_DP_PORT_SEL_NONE;
3259                         intel_de_write(dev_priv, reg, temp);
3260
3261                         /* disable DPLL_SEL */
3262                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3263                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3264                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3265                 }
3266
3267                 ilk_fdi_pll_disable(crtc);
3268         }
3269
3270         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3271         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3272 }
3273
3274 static void hsw_crtc_disable(struct intel_atomic_state *state,
3275                              struct intel_crtc *crtc)
3276 {
3277         /*
3278          * FIXME collapse everything to one hook.
3279          * Need care with mst->ddi interactions.
3280          */
3281         intel_encoders_disable(state, crtc);
3282         intel_encoders_post_disable(state, crtc);
3283 }
3284
3285 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3286 {
3287         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3288         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3289
3290         if (!crtc_state->gmch_pfit.control)
3291                 return;
3292
3293         /*
3294          * The panel fitter should only be adjusted whilst the pipe is disabled,
3295          * according to register description and PRM.
3296          */
3297         drm_WARN_ON(&dev_priv->drm,
3298                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3299         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3300
3301         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3302                        crtc_state->gmch_pfit.pgm_ratios);
3303         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3304
3305         /* Border color in case we don't scale up to the full screen. Black by
3306          * default, change to something else for debugging. */
3307         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3308 }
3309
3310 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3311 {
3312         if (phy == PHY_NONE)
3313                 return false;
3314         else if (IS_DG2(dev_priv))
3315                 /*
3316                  * DG2 outputs labelled as "combo PHY" in the bspec use
3317                  * SNPS PHYs with completely different programming,
3318                  * hence we always return false here.
3319                  */
3320                 return false;
3321         else if (IS_ALDERLAKE_S(dev_priv))
3322                 return phy <= PHY_E;
3323         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3324                 return phy <= PHY_D;
3325         else if (IS_JSL_EHL(dev_priv))
3326                 return phy <= PHY_C;
3327         else if (DISPLAY_VER(dev_priv) >= 11)
3328                 return phy <= PHY_B;
3329         else
3330                 return false;
3331 }
3332
3333 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3334 {
3335         if (IS_DG2(dev_priv))
3336                 /* DG2's "TC1" output uses a SNPS PHY */
3337                 return false;
3338         else if (IS_ALDERLAKE_P(dev_priv))
3339                 return phy >= PHY_F && phy <= PHY_I;
3340         else if (IS_TIGERLAKE(dev_priv))
3341                 return phy >= PHY_D && phy <= PHY_I;
3342         else if (IS_ICELAKE(dev_priv))
3343                 return phy >= PHY_C && phy <= PHY_F;
3344         else
3345                 return false;
3346 }
3347
3348 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3349 {
3350         if (phy == PHY_NONE)
3351                 return false;
3352         else if (IS_DG2(dev_priv))
3353                 /*
3354                  * All four "combo" ports and the TC1 port (PHY E) use
3355                  * Synopsis PHYs.
3356                  */
3357                 return phy <= PHY_E;
3358
3359         return false;
3360 }
3361
3362 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3363 {
3364         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3365                 return PHY_D + port - PORT_D_XELPD;
3366         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3367                 return PHY_F + port - PORT_TC1;
3368         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3369                 return PHY_B + port - PORT_TC1;
3370         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3371                 return PHY_C + port - PORT_TC1;
3372         else if (IS_JSL_EHL(i915) && port == PORT_D)
3373                 return PHY_A;
3374
3375         return PHY_A + port - PORT_A;
3376 }
3377
3378 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3379 {
3380         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3381                 return TC_PORT_NONE;
3382
3383         if (DISPLAY_VER(dev_priv) >= 12)
3384                 return TC_PORT_1 + port - PORT_TC1;
3385         else
3386                 return TC_PORT_1 + port - PORT_C;
3387 }
3388
3389 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3390 {
3391         switch (port) {
3392         case PORT_A:
3393                 return POWER_DOMAIN_PORT_DDI_A_LANES;
3394         case PORT_B:
3395                 return POWER_DOMAIN_PORT_DDI_B_LANES;
3396         case PORT_C:
3397                 return POWER_DOMAIN_PORT_DDI_C_LANES;
3398         case PORT_D:
3399                 return POWER_DOMAIN_PORT_DDI_D_LANES;
3400         case PORT_E:
3401                 return POWER_DOMAIN_PORT_DDI_E_LANES;
3402         case PORT_F:
3403                 return POWER_DOMAIN_PORT_DDI_F_LANES;
3404         case PORT_G:
3405                 return POWER_DOMAIN_PORT_DDI_G_LANES;
3406         case PORT_H:
3407                 return POWER_DOMAIN_PORT_DDI_H_LANES;
3408         case PORT_I:
3409                 return POWER_DOMAIN_PORT_DDI_I_LANES;
3410         default:
3411                 MISSING_CASE(port);
3412                 return POWER_DOMAIN_PORT_OTHER;
3413         }
3414 }
3415
3416 enum intel_display_power_domain
3417 intel_aux_power_domain(struct intel_digital_port *dig_port)
3418 {
3419         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3420         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3421
3422         if (intel_phy_is_tc(dev_priv, phy) &&
3423             dig_port->tc_mode == TC_PORT_TBT_ALT) {
3424                 switch (dig_port->aux_ch) {
3425                 case AUX_CH_C:
3426                         return POWER_DOMAIN_AUX_C_TBT;
3427                 case AUX_CH_D:
3428                         return POWER_DOMAIN_AUX_D_TBT;
3429                 case AUX_CH_E:
3430                         return POWER_DOMAIN_AUX_E_TBT;
3431                 case AUX_CH_F:
3432                         return POWER_DOMAIN_AUX_F_TBT;
3433                 case AUX_CH_G:
3434                         return POWER_DOMAIN_AUX_G_TBT;
3435                 case AUX_CH_H:
3436                         return POWER_DOMAIN_AUX_H_TBT;
3437                 case AUX_CH_I:
3438                         return POWER_DOMAIN_AUX_I_TBT;
3439                 default:
3440                         MISSING_CASE(dig_port->aux_ch);
3441                         return POWER_DOMAIN_AUX_C_TBT;
3442                 }
3443         }
3444
3445         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3446 }
3447
3448 /*
3449  * Converts aux_ch to power_domain without caring about TBT ports for that use
3450  * intel_aux_power_domain()
3451  */
3452 enum intel_display_power_domain
3453 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3454 {
3455         switch (aux_ch) {
3456         case AUX_CH_A:
3457                 return POWER_DOMAIN_AUX_A;
3458         case AUX_CH_B:
3459                 return POWER_DOMAIN_AUX_B;
3460         case AUX_CH_C:
3461                 return POWER_DOMAIN_AUX_C;
3462         case AUX_CH_D:
3463                 return POWER_DOMAIN_AUX_D;
3464         case AUX_CH_E:
3465                 return POWER_DOMAIN_AUX_E;
3466         case AUX_CH_F:
3467                 return POWER_DOMAIN_AUX_F;
3468         case AUX_CH_G:
3469                 return POWER_DOMAIN_AUX_G;
3470         case AUX_CH_H:
3471                 return POWER_DOMAIN_AUX_H;
3472         case AUX_CH_I:
3473                 return POWER_DOMAIN_AUX_I;
3474         default:
3475                 MISSING_CASE(aux_ch);
3476                 return POWER_DOMAIN_AUX_A;
3477         }
3478 }
3479
3480 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3481 {
3482         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3483         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3484         struct drm_encoder *encoder;
3485         enum pipe pipe = crtc->pipe;
3486         u64 mask;
3487         enum transcoder transcoder = crtc_state->cpu_transcoder;
3488
3489         if (!crtc_state->hw.active)
3490                 return 0;
3491
3492         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3493         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3494         if (crtc_state->pch_pfit.enabled ||
3495             crtc_state->pch_pfit.force_thru)
3496                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3497
3498         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3499                                   crtc_state->uapi.encoder_mask) {
3500                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3501
3502                 mask |= BIT_ULL(intel_encoder->power_domain);
3503         }
3504
3505         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3506                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3507
3508         if (crtc_state->shared_dpll)
3509                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3510
3511         if (crtc_state->dsc.compression_enable)
3512                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3513
3514         return mask;
3515 }
3516
3517 static u64
3518 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3519 {
3520         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3521         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3522         enum intel_display_power_domain domain;
3523         u64 domains, new_domains, old_domains;
3524
3525         domains = get_crtc_power_domains(crtc_state);
3526
3527         new_domains = domains & ~crtc->enabled_power_domains.mask;
3528         old_domains = crtc->enabled_power_domains.mask & ~domains;
3529
3530         for_each_power_domain(domain, new_domains)
3531                 intel_display_power_get_in_set(dev_priv,
3532                                                &crtc->enabled_power_domains,
3533                                                domain);
3534
3535         return old_domains;
3536 }
3537
3538 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3539                                            u64 domains)
3540 {
3541         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3542                                             &crtc->enabled_power_domains,
3543                                             domains);
3544 }
3545
3546 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3547                                    struct intel_crtc *crtc)
3548 {
3549         const struct intel_crtc_state *new_crtc_state =
3550                 intel_atomic_get_new_crtc_state(state, crtc);
3551         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3552         enum pipe pipe = crtc->pipe;
3553
3554         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3555                 return;
3556
3557         if (intel_crtc_has_dp_encoder(new_crtc_state))
3558                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3559
3560         intel_set_transcoder_timings(new_crtc_state);
3561         intel_set_pipe_src_size(new_crtc_state);
3562
3563         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3564                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3565                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3566         }
3567
3568         i9xx_set_pipeconf(new_crtc_state);
3569
3570         crtc->active = true;
3571
3572         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3573
3574         intel_encoders_pre_pll_enable(state, crtc);
3575
3576         if (IS_CHERRYVIEW(dev_priv))
3577                 chv_enable_pll(new_crtc_state);
3578         else
3579                 vlv_enable_pll(new_crtc_state);
3580
3581         intel_encoders_pre_enable(state, crtc);
3582
3583         i9xx_pfit_enable(new_crtc_state);
3584
3585         intel_color_load_luts(new_crtc_state);
3586         intel_color_commit(new_crtc_state);
3587         /* update DSPCNTR to configure gamma for pipe bottom color */
3588         intel_disable_primary_plane(new_crtc_state);
3589
3590         dev_priv->display.initial_watermarks(state, crtc);
3591         intel_enable_pipe(new_crtc_state);
3592
3593         intel_crtc_vblank_on(new_crtc_state);
3594
3595         intel_encoders_enable(state, crtc);
3596 }
3597
3598 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3599                              struct intel_crtc *crtc)
3600 {
3601         const struct intel_crtc_state *new_crtc_state =
3602                 intel_atomic_get_new_crtc_state(state, crtc);
3603         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3604         enum pipe pipe = crtc->pipe;
3605
3606         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3607                 return;
3608
3609         if (intel_crtc_has_dp_encoder(new_crtc_state))
3610                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3611
3612         intel_set_transcoder_timings(new_crtc_state);
3613         intel_set_pipe_src_size(new_crtc_state);
3614
3615         i9xx_set_pipeconf(new_crtc_state);
3616
3617         crtc->active = true;
3618
3619         if (DISPLAY_VER(dev_priv) != 2)
3620                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3621
3622         intel_encoders_pre_enable(state, crtc);
3623
3624         i9xx_enable_pll(new_crtc_state);
3625
3626         i9xx_pfit_enable(new_crtc_state);
3627
3628         intel_color_load_luts(new_crtc_state);
3629         intel_color_commit(new_crtc_state);
3630         /* update DSPCNTR to configure gamma for pipe bottom color */
3631         intel_disable_primary_plane(new_crtc_state);
3632
3633         if (dev_priv->display.initial_watermarks)
3634                 dev_priv->display.initial_watermarks(state, crtc);
3635         else
3636                 intel_update_watermarks(crtc);
3637         intel_enable_pipe(new_crtc_state);
3638
3639         intel_crtc_vblank_on(new_crtc_state);
3640
3641         intel_encoders_enable(state, crtc);
3642
3643         /* prevents spurious underruns */
3644         if (DISPLAY_VER(dev_priv) == 2)
3645                 intel_wait_for_vblank(dev_priv, pipe);
3646 }
3647
3648 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3649 {
3650         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3651         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3652
3653         if (!old_crtc_state->gmch_pfit.control)
3654                 return;
3655
3656         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3657
3658         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3659                     intel_de_read(dev_priv, PFIT_CONTROL));
3660         intel_de_write(dev_priv, PFIT_CONTROL, 0);
3661 }
3662
3663 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3664                               struct intel_crtc *crtc)
3665 {
3666         struct intel_crtc_state *old_crtc_state =
3667                 intel_atomic_get_old_crtc_state(state, crtc);
3668         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3669         enum pipe pipe = crtc->pipe;
3670
3671         /*
3672          * On gen2 planes are double buffered but the pipe isn't, so we must
3673          * wait for planes to fully turn off before disabling the pipe.
3674          */
3675         if (DISPLAY_VER(dev_priv) == 2)
3676                 intel_wait_for_vblank(dev_priv, pipe);
3677
3678         intel_encoders_disable(state, crtc);
3679
3680         intel_crtc_vblank_off(old_crtc_state);
3681
3682         intel_disable_pipe(old_crtc_state);
3683
3684         i9xx_pfit_disable(old_crtc_state);
3685
3686         intel_encoders_post_disable(state, crtc);
3687
3688         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3689                 if (IS_CHERRYVIEW(dev_priv))
3690                         chv_disable_pll(dev_priv, pipe);
3691                 else if (IS_VALLEYVIEW(dev_priv))
3692                         vlv_disable_pll(dev_priv, pipe);
3693                 else
3694                         i9xx_disable_pll(old_crtc_state);
3695         }
3696
3697         intel_encoders_post_pll_disable(state, crtc);
3698
3699         if (DISPLAY_VER(dev_priv) != 2)
3700                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3701
3702         if (!dev_priv->display.initial_watermarks)
3703                 intel_update_watermarks(crtc);
3704
3705         /* clock the pipe down to 640x480@60 to potentially save power */
3706         if (IS_I830(dev_priv))
3707                 i830_enable_pipe(dev_priv, pipe);
3708 }
3709
3710 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3711                                         struct drm_modeset_acquire_ctx *ctx)
3712 {
3713         struct intel_encoder *encoder;
3714         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3715         struct intel_bw_state *bw_state =
3716                 to_intel_bw_state(dev_priv->bw_obj.state);
3717         struct intel_cdclk_state *cdclk_state =
3718                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3719         struct intel_dbuf_state *dbuf_state =
3720                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3721         struct intel_crtc_state *crtc_state =
3722                 to_intel_crtc_state(crtc->base.state);
3723         struct intel_plane *plane;
3724         struct drm_atomic_state *state;
3725         struct intel_crtc_state *temp_crtc_state;
3726         enum pipe pipe = crtc->pipe;
3727         int ret;
3728
3729         if (!crtc_state->hw.active)
3730                 return;
3731
3732         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3733                 const struct intel_plane_state *plane_state =
3734                         to_intel_plane_state(plane->base.state);
3735
3736                 if (plane_state->uapi.visible)
3737                         intel_plane_disable_noatomic(crtc, plane);
3738         }
3739
3740         state = drm_atomic_state_alloc(&dev_priv->drm);
3741         if (!state) {
3742                 drm_dbg_kms(&dev_priv->drm,
3743                             "failed to disable [CRTC:%d:%s], out of memory",
3744                             crtc->base.base.id, crtc->base.name);
3745                 return;
3746         }
3747
3748         state->acquire_ctx = ctx;
3749
3750         /* Everything's already locked, -EDEADLK can't happen. */
3751         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3752         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3753
3754         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3755
3756         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3757
3758         drm_atomic_state_put(state);
3759
3760         drm_dbg_kms(&dev_priv->drm,
3761                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3762                     crtc->base.base.id, crtc->base.name);
3763
3764         crtc->active = false;
3765         crtc->base.enabled = false;
3766
3767         drm_WARN_ON(&dev_priv->drm,
3768                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3769         crtc_state->uapi.active = false;
3770         crtc_state->uapi.connector_mask = 0;
3771         crtc_state->uapi.encoder_mask = 0;
3772         intel_crtc_free_hw_state(crtc_state);
3773         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3774
3775         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3776                 encoder->base.crtc = NULL;
3777
3778         intel_fbc_disable(crtc);
3779         intel_update_watermarks(crtc);
3780         intel_disable_shared_dpll(crtc_state);
3781
3782         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3783
3784         dev_priv->active_pipes &= ~BIT(pipe);
3785         cdclk_state->min_cdclk[pipe] = 0;
3786         cdclk_state->min_voltage_level[pipe] = 0;
3787         cdclk_state->active_pipes &= ~BIT(pipe);
3788
3789         dbuf_state->active_pipes &= ~BIT(pipe);
3790
3791         bw_state->data_rate[pipe] = 0;
3792         bw_state->num_active_planes[pipe] = 0;
3793 }
3794
3795 /*
3796  * turn all crtc's off, but do not adjust state
3797  * This has to be paired with a call to intel_modeset_setup_hw_state.
3798  */
3799 int intel_display_suspend(struct drm_device *dev)
3800 {
3801         struct drm_i915_private *dev_priv = to_i915(dev);
3802         struct drm_atomic_state *state;
3803         int ret;
3804
3805         if (!HAS_DISPLAY(dev_priv))
3806                 return 0;
3807
3808         state = drm_atomic_helper_suspend(dev);
3809         ret = PTR_ERR_OR_ZERO(state);
3810         if (ret)
3811                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3812                         ret);
3813         else
3814                 dev_priv->modeset_restore_state = state;
3815         return ret;
3816 }
3817
3818 void intel_encoder_destroy(struct drm_encoder *encoder)
3819 {
3820         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3821
3822         drm_encoder_cleanup(encoder);
3823         kfree(intel_encoder);
3824 }
3825
3826 /* Cross check the actual hw state with our own modeset state tracking (and it's
3827  * internal consistency). */
3828 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3829                                          struct drm_connector_state *conn_state)
3830 {
3831         struct intel_connector *connector = to_intel_connector(conn_state->connector);
3832         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3833
3834         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3835                     connector->base.base.id, connector->base.name);
3836
3837         if (connector->get_hw_state(connector)) {
3838                 struct intel_encoder *encoder = intel_attached_encoder(connector);
3839
3840                 I915_STATE_WARN(!crtc_state,
3841                          "connector enabled without attached crtc\n");
3842
3843                 if (!crtc_state)
3844                         return;
3845
3846                 I915_STATE_WARN(!crtc_state->hw.active,
3847                                 "connector is active, but attached crtc isn't\n");
3848
3849                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3850                         return;
3851
3852                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3853                         "atomic encoder doesn't match attached encoder\n");
3854
3855                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3856                         "attached encoder crtc differs from connector crtc\n");
3857         } else {
3858                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3859                                 "attached crtc is active, but connector isn't\n");
3860                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3861                         "best encoder set without crtc!\n");
3862         }
3863 }
3864
3865 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3866 {
3867         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3868         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3869
3870         /* IPS only exists on ULT machines and is tied to pipe A. */
3871         if (!hsw_crtc_supports_ips(crtc))
3872                 return false;
3873
3874         if (!dev_priv->params.enable_ips)
3875                 return false;
3876
3877         if (crtc_state->pipe_bpp > 24)
3878                 return false;
3879
3880         /*
3881          * We compare against max which means we must take
3882          * the increased cdclk requirement into account when
3883          * calculating the new cdclk.
3884          *
3885          * Should measure whether using a lower cdclk w/o IPS
3886          */
3887         if (IS_BROADWELL(dev_priv) &&
3888             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3889                 return false;
3890
3891         return true;
3892 }
3893
3894 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3895 {
3896         struct drm_i915_private *dev_priv =
3897                 to_i915(crtc_state->uapi.crtc->dev);
3898         struct intel_atomic_state *state =
3899                 to_intel_atomic_state(crtc_state->uapi.state);
3900
3901         crtc_state->ips_enabled = false;
3902
3903         if (!hsw_crtc_state_ips_capable(crtc_state))
3904                 return 0;
3905
3906         /*
3907          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3908          * enabled and disabled dynamically based on package C states,
3909          * user space can't make reliable use of the CRCs, so let's just
3910          * completely disable it.
3911          */
3912         if (crtc_state->crc_enabled)
3913                 return 0;
3914
3915         /* IPS should be fine as long as at least one plane is enabled. */
3916         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3917                 return 0;
3918
3919         if (IS_BROADWELL(dev_priv)) {
3920                 const struct intel_cdclk_state *cdclk_state;
3921
3922                 cdclk_state = intel_atomic_get_cdclk_state(state);
3923                 if (IS_ERR(cdclk_state))
3924                         return PTR_ERR(cdclk_state);
3925
3926                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3927                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3928                         return 0;
3929         }
3930
3931         crtc_state->ips_enabled = true;
3932
3933         return 0;
3934 }
3935
3936 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3937 {
3938         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3939
3940         /* GDG double wide on either pipe, otherwise pipe A only */
3941         return DISPLAY_VER(dev_priv) < 4 &&
3942                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3943 }
3944
3945 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3946 {
3947         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3948         struct drm_rect src;
3949
3950         /*
3951          * We only use IF-ID interlacing. If we ever use
3952          * PF-ID we'll need to adjust the pixel_rate here.
3953          */
3954
3955         if (!crtc_state->pch_pfit.enabled)
3956                 return pixel_rate;
3957
3958         drm_rect_init(&src, 0, 0,
3959                       crtc_state->pipe_src_w << 16,
3960                       crtc_state->pipe_src_h << 16);
3961
3962         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
3963                                    pixel_rate);
3964 }
3965
3966 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
3967                                          const struct drm_display_mode *timings)
3968 {
3969         mode->hdisplay = timings->crtc_hdisplay;
3970         mode->htotal = timings->crtc_htotal;
3971         mode->hsync_start = timings->crtc_hsync_start;
3972         mode->hsync_end = timings->crtc_hsync_end;
3973
3974         mode->vdisplay = timings->crtc_vdisplay;
3975         mode->vtotal = timings->crtc_vtotal;
3976         mode->vsync_start = timings->crtc_vsync_start;
3977         mode->vsync_end = timings->crtc_vsync_end;
3978
3979         mode->flags = timings->flags;
3980         mode->type = DRM_MODE_TYPE_DRIVER;
3981
3982         mode->clock = timings->crtc_clock;
3983
3984         drm_mode_set_name(mode);
3985 }
3986
3987 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
3988 {
3989         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3990
3991         if (HAS_GMCH(dev_priv))
3992                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
3993                 crtc_state->pixel_rate =
3994                         crtc_state->hw.pipe_mode.crtc_clock;
3995         else
3996                 crtc_state->pixel_rate =
3997                         ilk_pipe_pixel_rate(crtc_state);
3998 }
3999
4000 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4001 {
4002         struct drm_display_mode *mode = &crtc_state->hw.mode;
4003         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4004         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4005
4006         drm_mode_copy(pipe_mode, adjusted_mode);
4007
4008         if (crtc_state->bigjoiner) {
4009                 /*
4010                  * transcoder is programmed to the full mode,
4011                  * but pipe timings are half of the transcoder mode
4012                  */
4013                 pipe_mode->crtc_hdisplay /= 2;
4014                 pipe_mode->crtc_hblank_start /= 2;
4015                 pipe_mode->crtc_hblank_end /= 2;
4016                 pipe_mode->crtc_hsync_start /= 2;
4017                 pipe_mode->crtc_hsync_end /= 2;
4018                 pipe_mode->crtc_htotal /= 2;
4019                 pipe_mode->crtc_clock /= 2;
4020         }
4021
4022         if (crtc_state->splitter.enable) {
4023                 int n = crtc_state->splitter.link_count;
4024                 int overlap = crtc_state->splitter.pixel_overlap;
4025
4026                 /*
4027                  * eDP MSO uses segment timings from EDID for transcoder
4028                  * timings, but full mode for everything else.
4029                  *
4030                  * h_full = (h_segment - pixel_overlap) * link_count
4031                  */
4032                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4033                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4034                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4035                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4036                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4037                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4038                 pipe_mode->crtc_clock *= n;
4039
4040                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4041                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4042         } else {
4043                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4044                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4045         }
4046
4047         intel_crtc_compute_pixel_rate(crtc_state);
4048
4049         drm_mode_copy(mode, adjusted_mode);
4050         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4051         mode->vdisplay = crtc_state->pipe_src_h;
4052 }
4053
4054 static void intel_encoder_get_config(struct intel_encoder *encoder,
4055                                      struct intel_crtc_state *crtc_state)
4056 {
4057         encoder->get_config(encoder, crtc_state);
4058
4059         intel_crtc_readout_derived_state(crtc_state);
4060 }
4061
4062 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4063                                      struct intel_crtc_state *pipe_config)
4064 {
4065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4066         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4067         int clock_limit = dev_priv->max_dotclk_freq;
4068
4069         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4070
4071         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4072         if (pipe_config->bigjoiner) {
4073                 pipe_mode->crtc_clock /= 2;
4074                 pipe_mode->crtc_hdisplay /= 2;
4075                 pipe_mode->crtc_hblank_start /= 2;
4076                 pipe_mode->crtc_hblank_end /= 2;
4077                 pipe_mode->crtc_hsync_start /= 2;
4078                 pipe_mode->crtc_hsync_end /= 2;
4079                 pipe_mode->crtc_htotal /= 2;
4080                 pipe_config->pipe_src_w /= 2;
4081         }
4082
4083         if (pipe_config->splitter.enable) {
4084                 int n = pipe_config->splitter.link_count;
4085                 int overlap = pipe_config->splitter.pixel_overlap;
4086
4087                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4088                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4089                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4090                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4091                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4092                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4093                 pipe_mode->crtc_clock *= n;
4094         }
4095
4096         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4097
4098         if (DISPLAY_VER(dev_priv) < 4) {
4099                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4100
4101                 /*
4102                  * Enable double wide mode when the dot clock
4103                  * is > 90% of the (display) core speed.
4104                  */
4105                 if (intel_crtc_supports_double_wide(crtc) &&
4106                     pipe_mode->crtc_clock > clock_limit) {
4107                         clock_limit = dev_priv->max_dotclk_freq;
4108                         pipe_config->double_wide = true;
4109                 }
4110         }
4111
4112         if (pipe_mode->crtc_clock > clock_limit) {
4113                 drm_dbg_kms(&dev_priv->drm,
4114                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4115                             pipe_mode->crtc_clock, clock_limit,
4116                             yesno(pipe_config->double_wide));
4117                 return -EINVAL;
4118         }
4119
4120         /*
4121          * Pipe horizontal size must be even in:
4122          * - DVO ganged mode
4123          * - LVDS dual channel mode
4124          * - Double wide pipe
4125          */
4126         if (pipe_config->pipe_src_w & 1) {
4127                 if (pipe_config->double_wide) {
4128                         drm_dbg_kms(&dev_priv->drm,
4129                                     "Odd pipe source width not supported with double wide pipe\n");
4130                         return -EINVAL;
4131                 }
4132
4133                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4134                     intel_is_dual_link_lvds(dev_priv)) {
4135                         drm_dbg_kms(&dev_priv->drm,
4136                                     "Odd pipe source width not supported with dual link LVDS\n");
4137                         return -EINVAL;
4138                 }
4139         }
4140
4141         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4142          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4143          */
4144         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4145             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4146                 return -EINVAL;
4147
4148         intel_crtc_compute_pixel_rate(pipe_config);
4149
4150         if (pipe_config->has_pch_encoder)
4151                 return ilk_fdi_compute_config(crtc, pipe_config);
4152
4153         return 0;
4154 }
4155
4156 static void
4157 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4158 {
4159         while (*num > DATA_LINK_M_N_MASK ||
4160                *den > DATA_LINK_M_N_MASK) {
4161                 *num >>= 1;
4162                 *den >>= 1;
4163         }
4164 }
4165
4166 static void compute_m_n(unsigned int m, unsigned int n,
4167                         u32 *ret_m, u32 *ret_n,
4168                         bool constant_n)
4169 {
4170         /*
4171          * Several DP dongles in particular seem to be fussy about
4172          * too large link M/N values. Give N value as 0x8000 that
4173          * should be acceptable by specific devices. 0x8000 is the
4174          * specified fixed N value for asynchronous clock mode,
4175          * which the devices expect also in synchronous clock mode.
4176          */
4177         if (constant_n)
4178                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4179         else
4180                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4181
4182         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4183         intel_reduce_m_n_ratio(ret_m, ret_n);
4184 }
4185
4186 void
4187 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4188                        int pixel_clock, int link_clock,
4189                        struct intel_link_m_n *m_n,
4190                        bool constant_n, bool fec_enable)
4191 {
4192         u32 data_clock = bits_per_pixel * pixel_clock;
4193
4194         if (fec_enable)
4195                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
4196
4197         m_n->tu = 64;
4198         compute_m_n(data_clock,
4199                     link_clock * nlanes * 8,
4200                     &m_n->gmch_m, &m_n->gmch_n,
4201                     constant_n);
4202
4203         compute_m_n(pixel_clock, link_clock,
4204                     &m_n->link_m, &m_n->link_n,
4205                     constant_n);
4206 }
4207
4208 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4209 {
4210         /*
4211          * There may be no VBT; and if the BIOS enabled SSC we can
4212          * just keep using it to avoid unnecessary flicker.  Whereas if the
4213          * BIOS isn't using it, don't assume it will work even if the VBT
4214          * indicates as much.
4215          */
4216         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4217                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4218                                                        PCH_DREF_CONTROL) &
4219                         DREF_SSC1_ENABLE;
4220
4221                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4222                         drm_dbg_kms(&dev_priv->drm,
4223                                     "SSC %s by BIOS, overriding VBT which says %s\n",
4224                                     enableddisabled(bios_lvds_use_ssc),
4225                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
4226                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4227                 }
4228         }
4229 }
4230
4231 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4232                                          const struct intel_link_m_n *m_n)
4233 {
4234         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4235         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4236         enum pipe pipe = crtc->pipe;
4237
4238         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4239                        TU_SIZE(m_n->tu) | m_n->gmch_m);
4240         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4241         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4242         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4243 }
4244
4245 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4246                                  enum transcoder transcoder)
4247 {
4248         if (IS_HASWELL(dev_priv))
4249                 return transcoder == TRANSCODER_EDP;
4250
4251         /*
4252          * Strictly speaking some registers are available before
4253          * gen7, but we only support DRRS on gen7+
4254          */
4255         return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4256 }
4257
4258 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4259                                          const struct intel_link_m_n *m_n,
4260                                          const struct intel_link_m_n *m2_n2)
4261 {
4262         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4263         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4264         enum pipe pipe = crtc->pipe;
4265         enum transcoder transcoder = crtc_state->cpu_transcoder;
4266
4267         if (DISPLAY_VER(dev_priv) >= 5) {
4268                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4269                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4270                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4271                                m_n->gmch_n);
4272                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4273                                m_n->link_m);
4274                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4275                                m_n->link_n);
4276                 /*
4277                  *  M2_N2 registers are set only if DRRS is supported
4278                  * (to make sure the registers are not unnecessarily accessed).
4279                  */
4280                 if (m2_n2 && crtc_state->has_drrs &&
4281                     transcoder_has_m2_n2(dev_priv, transcoder)) {
4282                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4283                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4284                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4285                                        m2_n2->gmch_n);
4286                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4287                                        m2_n2->link_m);
4288                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4289                                        m2_n2->link_n);
4290                 }
4291         } else {
4292                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4293                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4294                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4295                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4296                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4297         }
4298 }
4299
4300 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4301 {
4302         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4303         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4304
4305         if (m_n == M1_N1) {
4306                 dp_m_n = &crtc_state->dp_m_n;
4307                 dp_m2_n2 = &crtc_state->dp_m2_n2;
4308         } else if (m_n == M2_N2) {
4309
4310                 /*
4311                  * M2_N2 registers are not supported. Hence m2_n2 divider value
4312                  * needs to be programmed into M1_N1.
4313                  */
4314                 dp_m_n = &crtc_state->dp_m2_n2;
4315         } else {
4316                 drm_err(&i915->drm, "Unsupported divider value\n");
4317                 return;
4318         }
4319
4320         if (crtc_state->has_pch_encoder)
4321                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4322         else
4323                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4324 }
4325
4326 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4327 {
4328         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4329         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4330         enum pipe pipe = crtc->pipe;
4331         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4332         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4333         u32 crtc_vtotal, crtc_vblank_end;
4334         int vsyncshift = 0;
4335
4336         /* We need to be careful not to changed the adjusted mode, for otherwise
4337          * the hw state checker will get angry at the mismatch. */
4338         crtc_vtotal = adjusted_mode->crtc_vtotal;
4339         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4340
4341         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4342                 /* the chip adds 2 halflines automatically */
4343                 crtc_vtotal -= 1;
4344                 crtc_vblank_end -= 1;
4345
4346                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4347                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4348                 else
4349                         vsyncshift = adjusted_mode->crtc_hsync_start -
4350                                 adjusted_mode->crtc_htotal / 2;
4351                 if (vsyncshift < 0)
4352                         vsyncshift += adjusted_mode->crtc_htotal;
4353         }
4354
4355         if (DISPLAY_VER(dev_priv) > 3)
4356                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4357                                vsyncshift);
4358
4359         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4360                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4361         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4362                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4363         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4364                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4365
4366         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4367                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4368         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4369                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4370         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4371                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4372
4373         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4374          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4375          * documented on the DDI_FUNC_CTL register description, EDP Input Select
4376          * bits. */
4377         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4378             (pipe == PIPE_B || pipe == PIPE_C))
4379                 intel_de_write(dev_priv, VTOTAL(pipe),
4380                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4381
4382 }
4383
4384 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4385 {
4386         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4387         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4388         enum pipe pipe = crtc->pipe;
4389
4390         /* pipesrc controls the size that is scaled from, which should
4391          * always be the user's requested size.
4392          */
4393         intel_de_write(dev_priv, PIPESRC(pipe),
4394                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4395 }
4396
4397 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4398 {
4399         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4400         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4401
4402         if (DISPLAY_VER(dev_priv) == 2)
4403                 return false;
4404
4405         if (DISPLAY_VER(dev_priv) >= 9 ||
4406             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4407                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4408         else
4409                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4410 }
4411
4412 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4413                                          struct intel_crtc_state *pipe_config)
4414 {
4415         struct drm_device *dev = crtc->base.dev;
4416         struct drm_i915_private *dev_priv = to_i915(dev);
4417         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4418         u32 tmp;
4419
4420         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4421         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4422         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4423
4424         if (!transcoder_is_dsi(cpu_transcoder)) {
4425                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4426                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
4427                                                         (tmp & 0xffff) + 1;
4428                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
4429                                                 ((tmp >> 16) & 0xffff) + 1;
4430         }
4431         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4432         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4433         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4434
4435         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4436         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4437         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4438
4439         if (!transcoder_is_dsi(cpu_transcoder)) {
4440                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4441                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
4442                                                         (tmp & 0xffff) + 1;
4443                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
4444                                                 ((tmp >> 16) & 0xffff) + 1;
4445         }
4446         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4447         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4448         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4449
4450         if (intel_pipe_is_interlaced(pipe_config)) {
4451                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4452                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4453                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4454         }
4455 }
4456
4457 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4458                                     struct intel_crtc_state *pipe_config)
4459 {
4460         struct drm_device *dev = crtc->base.dev;
4461         struct drm_i915_private *dev_priv = to_i915(dev);
4462         u32 tmp;
4463
4464         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4465         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4466         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4467 }
4468
4469 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4470 {
4471         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4472         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4473         u32 pipeconf;
4474
4475         pipeconf = 0;
4476
4477         /* we keep both pipes enabled on 830 */
4478         if (IS_I830(dev_priv))
4479                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4480
4481         if (crtc_state->double_wide)
4482                 pipeconf |= PIPECONF_DOUBLE_WIDE;
4483
4484         /* only g4x and later have fancy bpc/dither controls */
4485         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4486             IS_CHERRYVIEW(dev_priv)) {
4487                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4488                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4489                         pipeconf |= PIPECONF_DITHER_EN |
4490                                     PIPECONF_DITHER_TYPE_SP;
4491
4492                 switch (crtc_state->pipe_bpp) {
4493                 case 18:
4494                         pipeconf |= PIPECONF_6BPC;
4495                         break;
4496                 case 24:
4497                         pipeconf |= PIPECONF_8BPC;
4498                         break;
4499                 case 30:
4500                         pipeconf |= PIPECONF_10BPC;
4501                         break;
4502                 default:
4503                         /* Case prevented by intel_choose_pipe_bpp_dither. */
4504                         BUG();
4505                 }
4506         }
4507
4508         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4509                 if (DISPLAY_VER(dev_priv) < 4 ||
4510                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4511                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4512                 else
4513                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4514         } else {
4515                 pipeconf |= PIPECONF_PROGRESSIVE;
4516         }
4517
4518         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4519              crtc_state->limited_color_range)
4520                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4521
4522         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4523
4524         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4525
4526         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4527         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4528 }
4529
4530 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4531 {
4532         if (IS_I830(dev_priv))
4533                 return false;
4534
4535         return DISPLAY_VER(dev_priv) >= 4 ||
4536                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4537 }
4538
4539 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4540 {
4541         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4542         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4543         u32 tmp;
4544
4545         if (!i9xx_has_pfit(dev_priv))
4546                 return;
4547
4548         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4549         if (!(tmp & PFIT_ENABLE))
4550                 return;
4551
4552         /* Check whether the pfit is attached to our pipe. */
4553         if (DISPLAY_VER(dev_priv) < 4) {
4554                 if (crtc->pipe != PIPE_B)
4555                         return;
4556         } else {
4557                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4558                         return;
4559         }
4560
4561         crtc_state->gmch_pfit.control = tmp;
4562         crtc_state->gmch_pfit.pgm_ratios =
4563                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4564 }
4565
4566 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4567                                struct intel_crtc_state *pipe_config)
4568 {
4569         struct drm_device *dev = crtc->base.dev;
4570         struct drm_i915_private *dev_priv = to_i915(dev);
4571         enum pipe pipe = crtc->pipe;
4572         struct dpll clock;
4573         u32 mdiv;
4574         int refclk = 100000;
4575
4576         /* In case of DSI, DPLL will not be used */
4577         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4578                 return;
4579
4580         vlv_dpio_get(dev_priv);
4581         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4582         vlv_dpio_put(dev_priv);
4583
4584         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4585         clock.m2 = mdiv & DPIO_M2DIV_MASK;
4586         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4587         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4588         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4589
4590         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4591 }
4592
4593 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4594                                struct intel_crtc_state *pipe_config)
4595 {
4596         struct drm_device *dev = crtc->base.dev;
4597         struct drm_i915_private *dev_priv = to_i915(dev);
4598         enum pipe pipe = crtc->pipe;
4599         enum dpio_channel port = vlv_pipe_to_channel(pipe);
4600         struct dpll clock;
4601         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4602         int refclk = 100000;
4603
4604         /* In case of DSI, DPLL will not be used */
4605         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4606                 return;
4607
4608         vlv_dpio_get(dev_priv);
4609         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4610         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4611         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4612         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4613         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4614         vlv_dpio_put(dev_priv);
4615
4616         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4617         clock.m2 = (pll_dw0 & 0xff) << 22;
4618         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4619                 clock.m2 |= pll_dw2 & 0x3fffff;
4620         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4621         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4622         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4623
4624         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4625 }
4626
4627 static enum intel_output_format
4628 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4629 {
4630         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4631         u32 tmp;
4632
4633         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4634
4635         if (tmp & PIPEMISC_YUV420_ENABLE) {
4636                 /* We support 4:2:0 in full blend mode only */
4637                 drm_WARN_ON(&dev_priv->drm,
4638                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4639
4640                 return INTEL_OUTPUT_FORMAT_YCBCR420;
4641         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4642                 return INTEL_OUTPUT_FORMAT_YCBCR444;
4643         } else {
4644                 return INTEL_OUTPUT_FORMAT_RGB;
4645         }
4646 }
4647
4648 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4649 {
4650         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4651         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4652         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4653         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4654         u32 tmp;
4655
4656         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4657
4658         if (tmp & DISPPLANE_GAMMA_ENABLE)
4659                 crtc_state->gamma_enable = true;
4660
4661         if (!HAS_GMCH(dev_priv) &&
4662             tmp & DISPPLANE_PIPE_CSC_ENABLE)
4663                 crtc_state->csc_enable = true;
4664 }
4665
4666 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4667                                  struct intel_crtc_state *pipe_config)
4668 {
4669         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4670         enum intel_display_power_domain power_domain;
4671         intel_wakeref_t wakeref;
4672         u32 tmp;
4673         bool ret;
4674
4675         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4676         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4677         if (!wakeref)
4678                 return false;
4679
4680         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4681         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4682         pipe_config->shared_dpll = NULL;
4683
4684         ret = false;
4685
4686         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4687         if (!(tmp & PIPECONF_ENABLE))
4688                 goto out;
4689
4690         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4691             IS_CHERRYVIEW(dev_priv)) {
4692                 switch (tmp & PIPECONF_BPC_MASK) {
4693                 case PIPECONF_6BPC:
4694                         pipe_config->pipe_bpp = 18;
4695                         break;
4696                 case PIPECONF_8BPC:
4697                         pipe_config->pipe_bpp = 24;
4698                         break;
4699                 case PIPECONF_10BPC:
4700                         pipe_config->pipe_bpp = 30;
4701                         break;
4702                 default:
4703                         break;
4704                 }
4705         }
4706
4707         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4708             (tmp & PIPECONF_COLOR_RANGE_SELECT))
4709                 pipe_config->limited_color_range = true;
4710
4711         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4712                 PIPECONF_GAMMA_MODE_SHIFT;
4713
4714         if (IS_CHERRYVIEW(dev_priv))
4715                 pipe_config->cgm_mode = intel_de_read(dev_priv,
4716                                                       CGM_PIPE_MODE(crtc->pipe));
4717
4718         i9xx_get_pipe_color_config(pipe_config);
4719         intel_color_get_config(pipe_config);
4720
4721         if (DISPLAY_VER(dev_priv) < 4)
4722                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4723
4724         intel_get_transcoder_timings(crtc, pipe_config);
4725         intel_get_pipe_src_size(crtc, pipe_config);
4726
4727         i9xx_get_pfit_config(pipe_config);
4728
4729         if (DISPLAY_VER(dev_priv) >= 4) {
4730                 /* No way to read it out on pipes B and C */
4731                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4732                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
4733                 else
4734                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4735                 pipe_config->pixel_multiplier =
4736                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4737                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4738                 pipe_config->dpll_hw_state.dpll_md = tmp;
4739         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4740                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4741                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4742                 pipe_config->pixel_multiplier =
4743                         ((tmp & SDVO_MULTIPLIER_MASK)
4744                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4745         } else {
4746                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
4747                  * port and will be fixed up in the encoder->get_config
4748                  * function. */
4749                 pipe_config->pixel_multiplier = 1;
4750         }
4751         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4752                                                         DPLL(crtc->pipe));
4753         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4754                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4755                                                                FP0(crtc->pipe));
4756                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4757                                                                FP1(crtc->pipe));
4758         } else {
4759                 /* Mask out read-only status bits. */
4760                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4761                                                      DPLL_PORTC_READY_MASK |
4762                                                      DPLL_PORTB_READY_MASK);
4763         }
4764
4765         if (IS_CHERRYVIEW(dev_priv))
4766                 chv_crtc_clock_get(crtc, pipe_config);
4767         else if (IS_VALLEYVIEW(dev_priv))
4768                 vlv_crtc_clock_get(crtc, pipe_config);
4769         else
4770                 i9xx_crtc_clock_get(crtc, pipe_config);
4771
4772         /*
4773          * Normally the dotclock is filled in by the encoder .get_config()
4774          * but in case the pipe is enabled w/o any ports we need a sane
4775          * default.
4776          */
4777         pipe_config->hw.adjusted_mode.crtc_clock =
4778                 pipe_config->port_clock / pipe_config->pixel_multiplier;
4779
4780         ret = true;
4781
4782 out:
4783         intel_display_power_put(dev_priv, power_domain, wakeref);
4784
4785         return ret;
4786 }
4787
4788 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4789 {
4790         struct intel_encoder *encoder;
4791         int i;
4792         u32 val, final;
4793         bool has_lvds = false;
4794         bool has_cpu_edp = false;
4795         bool has_panel = false;
4796         bool has_ck505 = false;
4797         bool can_ssc = false;
4798         bool using_ssc_source = false;
4799
4800         /* We need to take the global config into account */
4801         for_each_intel_encoder(&dev_priv->drm, encoder) {
4802                 switch (encoder->type) {
4803                 case INTEL_OUTPUT_LVDS:
4804                         has_panel = true;
4805                         has_lvds = true;
4806                         break;
4807                 case INTEL_OUTPUT_EDP:
4808                         has_panel = true;
4809                         if (encoder->port == PORT_A)
4810                                 has_cpu_edp = true;
4811                         break;
4812                 default:
4813                         break;
4814                 }
4815         }
4816
4817         if (HAS_PCH_IBX(dev_priv)) {
4818                 has_ck505 = dev_priv->vbt.display_clock_mode;
4819                 can_ssc = has_ck505;
4820         } else {
4821                 has_ck505 = false;
4822                 can_ssc = true;
4823         }
4824
4825         /* Check if any DPLLs are using the SSC source */
4826         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4827                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4828
4829                 if (!(temp & DPLL_VCO_ENABLE))
4830                         continue;
4831
4832                 if ((temp & PLL_REF_INPUT_MASK) ==
4833                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4834                         using_ssc_source = true;
4835                         break;
4836                 }
4837         }
4838
4839         drm_dbg_kms(&dev_priv->drm,
4840                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4841                     has_panel, has_lvds, has_ck505, using_ssc_source);
4842
4843         /* Ironlake: try to setup display ref clock before DPLL
4844          * enabling. This is only under driver's control after
4845          * PCH B stepping, previous chipset stepping should be
4846          * ignoring this setting.
4847          */
4848         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4849
4850         /* As we must carefully and slowly disable/enable each source in turn,
4851          * compute the final state we want first and check if we need to
4852          * make any changes at all.
4853          */
4854         final = val;
4855         final &= ~DREF_NONSPREAD_SOURCE_MASK;
4856         if (has_ck505)
4857                 final |= DREF_NONSPREAD_CK505_ENABLE;
4858         else
4859                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
4860
4861         final &= ~DREF_SSC_SOURCE_MASK;
4862         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4863         final &= ~DREF_SSC1_ENABLE;
4864
4865         if (has_panel) {
4866                 final |= DREF_SSC_SOURCE_ENABLE;
4867
4868                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4869                         final |= DREF_SSC1_ENABLE;
4870
4871                 if (has_cpu_edp) {
4872                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
4873                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4874                         else
4875                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4876                 } else
4877                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4878         } else if (using_ssc_source) {
4879                 final |= DREF_SSC_SOURCE_ENABLE;
4880                 final |= DREF_SSC1_ENABLE;
4881         }
4882
4883         if (final == val)
4884                 return;
4885
4886         /* Always enable nonspread source */
4887         val &= ~DREF_NONSPREAD_SOURCE_MASK;
4888
4889         if (has_ck505)
4890                 val |= DREF_NONSPREAD_CK505_ENABLE;
4891         else
4892                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
4893
4894         if (has_panel) {
4895                 val &= ~DREF_SSC_SOURCE_MASK;
4896                 val |= DREF_SSC_SOURCE_ENABLE;
4897
4898                 /* SSC must be turned on before enabling the CPU output  */
4899                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4900                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4901                         val |= DREF_SSC1_ENABLE;
4902                 } else
4903                         val &= ~DREF_SSC1_ENABLE;
4904
4905                 /* Get SSC going before enabling the outputs */
4906                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4907                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4908                 udelay(200);
4909
4910                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4911
4912                 /* Enable CPU source on CPU attached eDP */
4913                 if (has_cpu_edp) {
4914                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4915                                 drm_dbg_kms(&dev_priv->drm,
4916                                             "Using SSC on eDP\n");
4917                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4918                         } else
4919                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4920                 } else
4921                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4922
4923                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4924                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4925                 udelay(200);
4926         } else {
4927                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4928
4929                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4930
4931                 /* Turn off CPU output */
4932                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4933
4934                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4935                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4936                 udelay(200);
4937
4938                 if (!using_ssc_source) {
4939                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4940
4941                         /* Turn off the SSC source */
4942                         val &= ~DREF_SSC_SOURCE_MASK;
4943                         val |= DREF_SSC_SOURCE_DISABLE;
4944
4945                         /* Turn off SSC1 */
4946                         val &= ~DREF_SSC1_ENABLE;
4947
4948                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4949                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4950                         udelay(200);
4951                 }
4952         }
4953
4954         BUG_ON(val != final);
4955 }
4956
4957 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
4958 {
4959         u32 tmp;
4960
4961         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
4962         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
4963         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
4964
4965         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
4966                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
4967                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
4968
4969         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
4970         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
4971         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
4972
4973         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
4974                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
4975                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
4976 }
4977
4978 /* WaMPhyProgramming:hsw */
4979 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
4980 {
4981         u32 tmp;
4982
4983         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
4984         tmp &= ~(0xFF << 24);
4985         tmp |= (0x12 << 24);
4986         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
4987
4988         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
4989         tmp |= (1 << 11);
4990         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
4991
4992         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
4993         tmp |= (1 << 11);
4994         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
4995
4996         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
4997         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
4998         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
4999
5000         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5001         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5002         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5003
5004         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5005         tmp &= ~(7 << 13);
5006         tmp |= (5 << 13);
5007         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5008
5009         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5010         tmp &= ~(7 << 13);
5011         tmp |= (5 << 13);
5012         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5013
5014         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5015         tmp &= ~0xFF;
5016         tmp |= 0x1C;
5017         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5018
5019         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5020         tmp &= ~0xFF;
5021         tmp |= 0x1C;
5022         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5023
5024         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5025         tmp &= ~(0xFF << 16);
5026         tmp |= (0x1C << 16);
5027         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5028
5029         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5030         tmp &= ~(0xFF << 16);
5031         tmp |= (0x1C << 16);
5032         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5033
5034         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5035         tmp |= (1 << 27);
5036         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5037
5038         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5039         tmp |= (1 << 27);
5040         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5041
5042         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5043         tmp &= ~(0xF << 28);
5044         tmp |= (4 << 28);
5045         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5046
5047         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5048         tmp &= ~(0xF << 28);
5049         tmp |= (4 << 28);
5050         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5051 }
5052
5053 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5054  * Programming" based on the parameters passed:
5055  * - Sequence to enable CLKOUT_DP
5056  * - Sequence to enable CLKOUT_DP without spread
5057  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5058  */
5059 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5060                                  bool with_spread, bool with_fdi)
5061 {
5062         u32 reg, tmp;
5063
5064         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5065                      "FDI requires downspread\n"))
5066                 with_spread = true;
5067         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5068                      with_fdi, "LP PCH doesn't have FDI\n"))
5069                 with_fdi = false;
5070
5071         mutex_lock(&dev_priv->sb_lock);
5072
5073         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5074         tmp &= ~SBI_SSCCTL_DISABLE;
5075         tmp |= SBI_SSCCTL_PATHALT;
5076         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5077
5078         udelay(24);
5079
5080         if (with_spread) {
5081                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5082                 tmp &= ~SBI_SSCCTL_PATHALT;
5083                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5084
5085                 if (with_fdi) {
5086                         lpt_reset_fdi_mphy(dev_priv);
5087                         lpt_program_fdi_mphy(dev_priv);
5088                 }
5089         }
5090
5091         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5092         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5093         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5094         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5095
5096         mutex_unlock(&dev_priv->sb_lock);
5097 }
5098
5099 /* Sequence to disable CLKOUT_DP */
5100 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5101 {
5102         u32 reg, tmp;
5103
5104         mutex_lock(&dev_priv->sb_lock);
5105
5106         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5107         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5108         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5109         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5110
5111         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5112         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5113                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5114                         tmp |= SBI_SSCCTL_PATHALT;
5115                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5116                         udelay(32);
5117                 }
5118                 tmp |= SBI_SSCCTL_DISABLE;
5119                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5120         }
5121
5122         mutex_unlock(&dev_priv->sb_lock);
5123 }
5124
5125 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5126
5127 static const u16 sscdivintphase[] = {
5128         [BEND_IDX( 50)] = 0x3B23,
5129         [BEND_IDX( 45)] = 0x3B23,
5130         [BEND_IDX( 40)] = 0x3C23,
5131         [BEND_IDX( 35)] = 0x3C23,
5132         [BEND_IDX( 30)] = 0x3D23,
5133         [BEND_IDX( 25)] = 0x3D23,
5134         [BEND_IDX( 20)] = 0x3E23,
5135         [BEND_IDX( 15)] = 0x3E23,
5136         [BEND_IDX( 10)] = 0x3F23,
5137         [BEND_IDX(  5)] = 0x3F23,
5138         [BEND_IDX(  0)] = 0x0025,
5139         [BEND_IDX( -5)] = 0x0025,
5140         [BEND_IDX(-10)] = 0x0125,
5141         [BEND_IDX(-15)] = 0x0125,
5142         [BEND_IDX(-20)] = 0x0225,
5143         [BEND_IDX(-25)] = 0x0225,
5144         [BEND_IDX(-30)] = 0x0325,
5145         [BEND_IDX(-35)] = 0x0325,
5146         [BEND_IDX(-40)] = 0x0425,
5147         [BEND_IDX(-45)] = 0x0425,
5148         [BEND_IDX(-50)] = 0x0525,
5149 };
5150
5151 /*
5152  * Bend CLKOUT_DP
5153  * steps -50 to 50 inclusive, in steps of 5
5154  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5155  * change in clock period = -(steps / 10) * 5.787 ps
5156  */
5157 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5158 {
5159         u32 tmp;
5160         int idx = BEND_IDX(steps);
5161
5162         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5163                 return;
5164
5165         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5166                 return;
5167
5168         mutex_lock(&dev_priv->sb_lock);
5169
5170         if (steps % 10 != 0)
5171                 tmp = 0xAAAAAAAB;
5172         else
5173                 tmp = 0x00000000;
5174         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5175
5176         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5177         tmp &= 0xffff0000;
5178         tmp |= sscdivintphase[idx];
5179         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5180
5181         mutex_unlock(&dev_priv->sb_lock);
5182 }
5183
5184 #undef BEND_IDX
5185
5186 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5187 {
5188         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5189         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5190
5191         if ((ctl & SPLL_PLL_ENABLE) == 0)
5192                 return false;
5193
5194         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5195             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5196                 return true;
5197
5198         if (IS_BROADWELL(dev_priv) &&
5199             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5200                 return true;
5201
5202         return false;
5203 }
5204
5205 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5206                                enum intel_dpll_id id)
5207 {
5208         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5209         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5210
5211         if ((ctl & WRPLL_PLL_ENABLE) == 0)
5212                 return false;
5213
5214         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5215                 return true;
5216
5217         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5218             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5219             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5220                 return true;
5221
5222         return false;
5223 }
5224
5225 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5226 {
5227         struct intel_encoder *encoder;
5228         bool has_fdi = false;
5229
5230         for_each_intel_encoder(&dev_priv->drm, encoder) {
5231                 switch (encoder->type) {
5232                 case INTEL_OUTPUT_ANALOG:
5233                         has_fdi = true;
5234                         break;
5235                 default:
5236                         break;
5237                 }
5238         }
5239
5240         /*
5241          * The BIOS may have decided to use the PCH SSC
5242          * reference so we must not disable it until the
5243          * relevant PLLs have stopped relying on it. We'll
5244          * just leave the PCH SSC reference enabled in case
5245          * any active PLL is using it. It will get disabled
5246          * after runtime suspend if we don't have FDI.
5247          *
5248          * TODO: Move the whole reference clock handling
5249          * to the modeset sequence proper so that we can
5250          * actually enable/disable/reconfigure these things
5251          * safely. To do that we need to introduce a real
5252          * clock hierarchy. That would also allow us to do
5253          * clock bending finally.
5254          */
5255         dev_priv->pch_ssc_use = 0;
5256
5257         if (spll_uses_pch_ssc(dev_priv)) {
5258                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5259                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5260         }
5261
5262         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5263                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5264                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5265         }
5266
5267         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5268                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5269                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5270         }
5271
5272         if (dev_priv->pch_ssc_use)
5273                 return;
5274
5275         if (has_fdi) {
5276                 lpt_bend_clkout_dp(dev_priv, 0);
5277                 lpt_enable_clkout_dp(dev_priv, true, true);
5278         } else {
5279                 lpt_disable_clkout_dp(dev_priv);
5280         }
5281 }
5282
5283 /*
5284  * Initialize reference clocks when the driver loads
5285  */
5286 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5287 {
5288         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5289                 ilk_init_pch_refclk(dev_priv);
5290         else if (HAS_PCH_LPT(dev_priv))
5291                 lpt_init_pch_refclk(dev_priv);
5292 }
5293
5294 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5295 {
5296         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5297         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5298         enum pipe pipe = crtc->pipe;
5299         u32 val;
5300
5301         val = 0;
5302
5303         switch (crtc_state->pipe_bpp) {
5304         case 18:
5305                 val |= PIPECONF_6BPC;
5306                 break;
5307         case 24:
5308                 val |= PIPECONF_8BPC;
5309                 break;
5310         case 30:
5311                 val |= PIPECONF_10BPC;
5312                 break;
5313         case 36:
5314                 val |= PIPECONF_12BPC;
5315                 break;
5316         default:
5317                 /* Case prevented by intel_choose_pipe_bpp_dither. */
5318                 BUG();
5319         }
5320
5321         if (crtc_state->dither)
5322                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5323
5324         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5325                 val |= PIPECONF_INTERLACED_ILK;
5326         else
5327                 val |= PIPECONF_PROGRESSIVE;
5328
5329         /*
5330          * This would end up with an odd purple hue over
5331          * the entire display. Make sure we don't do it.
5332          */
5333         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5334                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5335
5336         if (crtc_state->limited_color_range &&
5337             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5338                 val |= PIPECONF_COLOR_RANGE_SELECT;
5339
5340         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5341                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5342
5343         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5344
5345         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5346
5347         intel_de_write(dev_priv, PIPECONF(pipe), val);
5348         intel_de_posting_read(dev_priv, PIPECONF(pipe));
5349 }
5350
5351 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5352 {
5353         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5354         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5355         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5356         u32 val = 0;
5357
5358         if (IS_HASWELL(dev_priv) && crtc_state->dither)
5359                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5360
5361         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5362                 val |= PIPECONF_INTERLACED_ILK;
5363         else
5364                 val |= PIPECONF_PROGRESSIVE;
5365
5366         if (IS_HASWELL(dev_priv) &&
5367             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5368                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5369
5370         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5371         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5372 }
5373
5374 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5375 {
5376         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5377         const struct intel_crtc_scaler_state *scaler_state =
5378                 &crtc_state->scaler_state;
5379
5380         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5381         u32 val = 0;
5382         int i;
5383
5384         switch (crtc_state->pipe_bpp) {
5385         case 18:
5386                 val |= PIPEMISC_6_BPC;
5387                 break;
5388         case 24:
5389                 val |= PIPEMISC_8_BPC;
5390                 break;
5391         case 30:
5392                 val |= PIPEMISC_10_BPC;
5393                 break;
5394         case 36:
5395                 /* Port output 12BPC defined for ADLP+ */
5396                 if (DISPLAY_VER(dev_priv) > 12)
5397                         val |= PIPEMISC_12_BPC_ADLP;
5398                 break;
5399         default:
5400                 MISSING_CASE(crtc_state->pipe_bpp);
5401                 break;
5402         }
5403
5404         if (crtc_state->dither)
5405                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5406
5407         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5408             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5409                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5410
5411         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5412                 val |= PIPEMISC_YUV420_ENABLE |
5413                         PIPEMISC_YUV420_MODE_FULL_BLEND;
5414
5415         if (DISPLAY_VER(dev_priv) >= 11 &&
5416             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5417                                            BIT(PLANE_CURSOR))) == 0)
5418                 val |= PIPEMISC_HDR_MODE_PRECISION;
5419
5420         if (DISPLAY_VER(dev_priv) >= 12)
5421                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5422
5423         if (IS_ALDERLAKE_P(dev_priv)) {
5424                 bool scaler_in_use = false;
5425
5426                 for (i = 0; i < crtc->num_scalers; i++) {
5427                         if (!scaler_state->scalers[i].in_use)
5428                                 continue;
5429
5430                         scaler_in_use = true;
5431                         break;
5432                 }
5433
5434                 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5435                              PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5436                              scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5437                              PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5438         }
5439
5440         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5441 }
5442
5443 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5444 {
5445         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5446         u32 tmp;
5447
5448         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5449
5450         switch (tmp & PIPEMISC_BPC_MASK) {
5451         case PIPEMISC_6_BPC:
5452                 return 18;
5453         case PIPEMISC_8_BPC:
5454                 return 24;
5455         case PIPEMISC_10_BPC:
5456                 return 30;
5457         /*
5458          * PORT OUTPUT 12 BPC defined for ADLP+.
5459          *
5460          * TODO:
5461          * For previous platforms with DSI interface, bits 5:7
5462          * are used for storing pipe_bpp irrespective of dithering.
5463          * Since the value of 12 BPC is not defined for these bits
5464          * on older platforms, need to find a workaround for 12 BPC
5465          * MIPI DSI HW readout.
5466          */
5467         case PIPEMISC_12_BPC_ADLP:
5468                 if (DISPLAY_VER(dev_priv) > 12)
5469                         return 36;
5470                 fallthrough;
5471         default:
5472                 MISSING_CASE(tmp);
5473                 return 0;
5474         }
5475 }
5476
5477 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5478 {
5479         /*
5480          * Account for spread spectrum to avoid
5481          * oversubscribing the link. Max center spread
5482          * is 2.5%; use 5% for safety's sake.
5483          */
5484         u32 bps = target_clock * bpp * 21 / 20;
5485         return DIV_ROUND_UP(bps, link_bw * 8);
5486 }
5487
5488 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5489                                          struct intel_link_m_n *m_n)
5490 {
5491         struct drm_device *dev = crtc->base.dev;
5492         struct drm_i915_private *dev_priv = to_i915(dev);
5493         enum pipe pipe = crtc->pipe;
5494
5495         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5496         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5497         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5498                 & ~TU_SIZE_MASK;
5499         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5500         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5501                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5502 }
5503
5504 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5505                                          enum transcoder transcoder,
5506                                          struct intel_link_m_n *m_n,
5507                                          struct intel_link_m_n *m2_n2)
5508 {
5509         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5510         enum pipe pipe = crtc->pipe;
5511
5512         if (DISPLAY_VER(dev_priv) >= 5) {
5513                 m_n->link_m = intel_de_read(dev_priv,
5514                                             PIPE_LINK_M1(transcoder));
5515                 m_n->link_n = intel_de_read(dev_priv,
5516                                             PIPE_LINK_N1(transcoder));
5517                 m_n->gmch_m = intel_de_read(dev_priv,
5518                                             PIPE_DATA_M1(transcoder))
5519                         & ~TU_SIZE_MASK;
5520                 m_n->gmch_n = intel_de_read(dev_priv,
5521                                             PIPE_DATA_N1(transcoder));
5522                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5523                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5524
5525                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5526                         m2_n2->link_m = intel_de_read(dev_priv,
5527                                                       PIPE_LINK_M2(transcoder));
5528                         m2_n2->link_n = intel_de_read(dev_priv,
5529                                                              PIPE_LINK_N2(transcoder));
5530                         m2_n2->gmch_m = intel_de_read(dev_priv,
5531                                                              PIPE_DATA_M2(transcoder))
5532                                         & ~TU_SIZE_MASK;
5533                         m2_n2->gmch_n = intel_de_read(dev_priv,
5534                                                              PIPE_DATA_N2(transcoder));
5535                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5536                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5537                 }
5538         } else {
5539                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5540                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5541                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5542                         & ~TU_SIZE_MASK;
5543                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5544                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5545                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5546         }
5547 }
5548
5549 void intel_dp_get_m_n(struct intel_crtc *crtc,
5550                       struct intel_crtc_state *pipe_config)
5551 {
5552         if (pipe_config->has_pch_encoder)
5553                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5554         else
5555                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5556                                              &pipe_config->dp_m_n,
5557                                              &pipe_config->dp_m2_n2);
5558 }
5559
5560 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5561                                    struct intel_crtc_state *pipe_config)
5562 {
5563         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5564                                      &pipe_config->fdi_m_n, NULL);
5565 }
5566
5567 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5568                                   u32 pos, u32 size)
5569 {
5570         drm_rect_init(&crtc_state->pch_pfit.dst,
5571                       pos >> 16, pos & 0xffff,
5572                       size >> 16, size & 0xffff);
5573 }
5574
5575 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5576 {
5577         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5578         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5579         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5580         int id = -1;
5581         int i;
5582
5583         /* find scaler attached to this pipe */
5584         for (i = 0; i < crtc->num_scalers; i++) {
5585                 u32 ctl, pos, size;
5586
5587                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5588                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5589                         continue;
5590
5591                 id = i;
5592                 crtc_state->pch_pfit.enabled = true;
5593
5594                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5595                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5596
5597                 ilk_get_pfit_pos_size(crtc_state, pos, size);
5598
5599                 scaler_state->scalers[i].in_use = true;
5600                 break;
5601         }
5602
5603         scaler_state->scaler_id = id;
5604         if (id >= 0)
5605                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5606         else
5607                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5608 }
5609
5610 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5611 {
5612         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5613         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5614         u32 ctl, pos, size;
5615
5616         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5617         if ((ctl & PF_ENABLE) == 0)
5618                 return;
5619
5620         crtc_state->pch_pfit.enabled = true;
5621
5622         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5623         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5624
5625         ilk_get_pfit_pos_size(crtc_state, pos, size);
5626
5627         /*
5628          * We currently do not free assignements of panel fitters on
5629          * ivb/hsw (since we don't use the higher upscaling modes which
5630          * differentiates them) so just WARN about this case for now.
5631          */
5632         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5633                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5634 }
5635
5636 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5637                                 struct intel_crtc_state *pipe_config)
5638 {
5639         struct drm_device *dev = crtc->base.dev;
5640         struct drm_i915_private *dev_priv = to_i915(dev);
5641         enum intel_display_power_domain power_domain;
5642         intel_wakeref_t wakeref;
5643         u32 tmp;
5644         bool ret;
5645
5646         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5647         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5648         if (!wakeref)
5649                 return false;
5650
5651         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5652         pipe_config->shared_dpll = NULL;
5653
5654         ret = false;
5655         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5656         if (!(tmp & PIPECONF_ENABLE))
5657                 goto out;
5658
5659         switch (tmp & PIPECONF_BPC_MASK) {
5660         case PIPECONF_6BPC:
5661                 pipe_config->pipe_bpp = 18;
5662                 break;
5663         case PIPECONF_8BPC:
5664                 pipe_config->pipe_bpp = 24;
5665                 break;
5666         case PIPECONF_10BPC:
5667                 pipe_config->pipe_bpp = 30;
5668                 break;
5669         case PIPECONF_12BPC:
5670                 pipe_config->pipe_bpp = 36;
5671                 break;
5672         default:
5673                 break;
5674         }
5675
5676         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5677                 pipe_config->limited_color_range = true;
5678
5679         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5680         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5681         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5682                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5683                 break;
5684         default:
5685                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5686                 break;
5687         }
5688
5689         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5690                 PIPECONF_GAMMA_MODE_SHIFT;
5691
5692         pipe_config->csc_mode = intel_de_read(dev_priv,
5693                                               PIPE_CSC_MODE(crtc->pipe));
5694
5695         i9xx_get_pipe_color_config(pipe_config);
5696         intel_color_get_config(pipe_config);
5697
5698         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5699                 struct intel_shared_dpll *pll;
5700                 enum intel_dpll_id pll_id;
5701                 bool pll_active;
5702
5703                 pipe_config->has_pch_encoder = true;
5704
5705                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5706                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5707                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5708
5709                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5710
5711                 if (HAS_PCH_IBX(dev_priv)) {
5712                         /*
5713                          * The pipe->pch transcoder and pch transcoder->pll
5714                          * mapping is fixed.
5715                          */
5716                         pll_id = (enum intel_dpll_id) crtc->pipe;
5717                 } else {
5718                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5719                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5720                                 pll_id = DPLL_ID_PCH_PLL_B;
5721                         else
5722                                 pll_id= DPLL_ID_PCH_PLL_A;
5723                 }
5724
5725                 pipe_config->shared_dpll =
5726                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
5727                 pll = pipe_config->shared_dpll;
5728
5729                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5730                                                      &pipe_config->dpll_hw_state);
5731                 drm_WARN_ON(dev, !pll_active);
5732
5733                 tmp = pipe_config->dpll_hw_state.dpll;
5734                 pipe_config->pixel_multiplier =
5735                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5736                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5737
5738                 ilk_pch_clock_get(crtc, pipe_config);
5739         } else {
5740                 pipe_config->pixel_multiplier = 1;
5741         }
5742
5743         intel_get_transcoder_timings(crtc, pipe_config);
5744         intel_get_pipe_src_size(crtc, pipe_config);
5745
5746         ilk_get_pfit_config(pipe_config);
5747
5748         ret = true;
5749
5750 out:
5751         intel_display_power_put(dev_priv, power_domain, wakeref);
5752
5753         return ret;
5754 }
5755
5756 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5757                                      struct intel_crtc_state *pipe_config,
5758                                      struct intel_display_power_domain_set *power_domain_set)
5759 {
5760         struct drm_device *dev = crtc->base.dev;
5761         struct drm_i915_private *dev_priv = to_i915(dev);
5762         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
5763         unsigned long enabled_panel_transcoders = 0;
5764         enum transcoder panel_transcoder;
5765         u32 tmp;
5766
5767         if (DISPLAY_VER(dev_priv) >= 11)
5768                 panel_transcoder_mask |=
5769                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5770
5771         /*
5772          * The pipe->transcoder mapping is fixed with the exception of the eDP
5773          * and DSI transcoders handled below.
5774          */
5775         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5776
5777         /*
5778          * XXX: Do intel_display_power_get_if_enabled before reading this (for
5779          * consistency and less surprising code; it's in always on power).
5780          */
5781         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
5782                                        panel_transcoder_mask) {
5783                 bool force_thru = false;
5784                 enum pipe trans_pipe;
5785
5786                 tmp = intel_de_read(dev_priv,
5787                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
5788                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5789                         continue;
5790
5791                 /*
5792                  * Log all enabled ones, only use the first one.
5793                  *
5794                  * FIXME: This won't work for two separate DSI displays.
5795                  */
5796                 enabled_panel_transcoders |= BIT(panel_transcoder);
5797                 if (enabled_panel_transcoders != BIT(panel_transcoder))
5798                         continue;
5799
5800                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5801                 default:
5802                         drm_WARN(dev, 1,
5803                                  "unknown pipe linked to transcoder %s\n",
5804                                  transcoder_name(panel_transcoder));
5805                         fallthrough;
5806                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5807                         force_thru = true;
5808                         fallthrough;
5809                 case TRANS_DDI_EDP_INPUT_A_ON:
5810                         trans_pipe = PIPE_A;
5811                         break;
5812                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5813                         trans_pipe = PIPE_B;
5814                         break;
5815                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5816                         trans_pipe = PIPE_C;
5817                         break;
5818                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
5819                         trans_pipe = PIPE_D;
5820                         break;
5821                 }
5822
5823                 if (trans_pipe == crtc->pipe) {
5824                         pipe_config->cpu_transcoder = panel_transcoder;
5825                         pipe_config->pch_pfit.force_thru = force_thru;
5826                 }
5827         }
5828
5829         /*
5830          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
5831          */
5832         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
5833                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
5834
5835         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5836                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5837                 return false;
5838
5839         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5840
5841         return tmp & PIPECONF_ENABLE;
5842 }
5843
5844 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5845                                          struct intel_crtc_state *pipe_config,
5846                                          struct intel_display_power_domain_set *power_domain_set)
5847 {
5848         struct drm_device *dev = crtc->base.dev;
5849         struct drm_i915_private *dev_priv = to_i915(dev);
5850         enum transcoder cpu_transcoder;
5851         enum port port;
5852         u32 tmp;
5853
5854         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5855                 if (port == PORT_A)
5856                         cpu_transcoder = TRANSCODER_DSI_A;
5857                 else
5858                         cpu_transcoder = TRANSCODER_DSI_C;
5859
5860                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5861                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5862                         continue;
5863
5864                 /*
5865                  * The PLL needs to be enabled with a valid divider
5866                  * configuration, otherwise accessing DSI registers will hang
5867                  * the machine. See BSpec North Display Engine
5868                  * registers/MIPI[BXT]. We can break out here early, since we
5869                  * need the same DSI PLL to be enabled for both DSI ports.
5870                  */
5871                 if (!bxt_dsi_pll_is_enabled(dev_priv))
5872                         break;
5873
5874                 /* XXX: this works for video mode only */
5875                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5876                 if (!(tmp & DPI_ENABLE))
5877                         continue;
5878
5879                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5880                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5881                         continue;
5882
5883                 pipe_config->cpu_transcoder = cpu_transcoder;
5884                 break;
5885         }
5886
5887         return transcoder_is_dsi(pipe_config->cpu_transcoder);
5888 }
5889
5890 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5891                                    struct intel_crtc_state *pipe_config)
5892 {
5893         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5894         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5895         enum port port;
5896         u32 tmp;
5897
5898         if (transcoder_is_dsi(cpu_transcoder)) {
5899                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5900                                                 PORT_A : PORT_B;
5901         } else {
5902                 tmp = intel_de_read(dev_priv,
5903                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
5904                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5905                         return;
5906                 if (DISPLAY_VER(dev_priv) >= 12)
5907                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5908                 else
5909                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5910         }
5911
5912         /*
5913          * Haswell has only FDI/PCH transcoder A. It is which is connected to
5914          * DDI E. So just check whether this pipe is wired to DDI E and whether
5915          * the PCH transcoder is on.
5916          */
5917         if (DISPLAY_VER(dev_priv) < 9 &&
5918             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5919                 pipe_config->has_pch_encoder = true;
5920
5921                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5922                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5923                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5924
5925                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5926         }
5927 }
5928
5929 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5930                                 struct intel_crtc_state *pipe_config)
5931 {
5932         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5933         struct intel_display_power_domain_set power_domain_set = { };
5934         bool active;
5935         u32 tmp;
5936
5937         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5938                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
5939                 return false;
5940
5941         pipe_config->shared_dpll = NULL;
5942
5943         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5944
5945         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5946             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5947                 drm_WARN_ON(&dev_priv->drm, active);
5948                 active = true;
5949         }
5950
5951         intel_dsc_get_config(pipe_config);
5952         if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
5953                 intel_uncompressed_joiner_get_config(pipe_config);
5954
5955         if (!active) {
5956                 /* bigjoiner slave doesn't enable transcoder */
5957                 if (!pipe_config->bigjoiner_slave)
5958                         goto out;
5959
5960                 active = true;
5961                 pipe_config->pixel_multiplier = 1;
5962
5963                 /* we cannot read out most state, so don't bother.. */
5964                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5965         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5966             DISPLAY_VER(dev_priv) >= 11) {
5967                 hsw_get_ddi_port_state(crtc, pipe_config);
5968                 intel_get_transcoder_timings(crtc, pipe_config);
5969         }
5970
5971         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5972                 intel_vrr_get_config(crtc, pipe_config);
5973
5974         intel_get_pipe_src_size(crtc, pipe_config);
5975
5976         if (IS_HASWELL(dev_priv)) {
5977                 u32 tmp = intel_de_read(dev_priv,
5978                                         PIPECONF(pipe_config->cpu_transcoder));
5979
5980                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5981                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5982                 else
5983                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5984         } else {
5985                 pipe_config->output_format =
5986                         bdw_get_pipemisc_output_format(crtc);
5987         }
5988
5989         pipe_config->gamma_mode = intel_de_read(dev_priv,
5990                                                 GAMMA_MODE(crtc->pipe));
5991
5992         pipe_config->csc_mode = intel_de_read(dev_priv,
5993                                               PIPE_CSC_MODE(crtc->pipe));
5994
5995         if (DISPLAY_VER(dev_priv) >= 9) {
5996                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
5997
5998                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
5999                         pipe_config->gamma_enable = true;
6000
6001                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6002                         pipe_config->csc_enable = true;
6003         } else {
6004                 i9xx_get_pipe_color_config(pipe_config);
6005         }
6006
6007         intel_color_get_config(pipe_config);
6008
6009         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6010         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6011         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6012                 pipe_config->ips_linetime =
6013                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6014
6015         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6016                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6017                 if (DISPLAY_VER(dev_priv) >= 9)
6018                         skl_get_pfit_config(pipe_config);
6019                 else
6020                         ilk_get_pfit_config(pipe_config);
6021         }
6022
6023         if (hsw_crtc_supports_ips(crtc)) {
6024                 if (IS_HASWELL(dev_priv))
6025                         pipe_config->ips_enabled = intel_de_read(dev_priv,
6026                                                                  IPS_CTL) & IPS_ENABLE;
6027                 else {
6028                         /*
6029                          * We cannot readout IPS state on broadwell, set to
6030                          * true so we can set it to a defined state on first
6031                          * commit.
6032                          */
6033                         pipe_config->ips_enabled = true;
6034                 }
6035         }
6036
6037         if (pipe_config->bigjoiner_slave) {
6038                 /* Cannot be read out as a slave, set to 0. */
6039                 pipe_config->pixel_multiplier = 0;
6040         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6041             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6042                 pipe_config->pixel_multiplier =
6043                         intel_de_read(dev_priv,
6044                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6045         } else {
6046                 pipe_config->pixel_multiplier = 1;
6047         }
6048
6049 out:
6050         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6051
6052         return active;
6053 }
6054
6055 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6056 {
6057         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6058         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6059
6060         if (!i915->display.get_pipe_config(crtc, crtc_state))
6061                 return false;
6062
6063         crtc_state->hw.active = true;
6064
6065         intel_crtc_readout_derived_state(crtc_state);
6066
6067         return true;
6068 }
6069
6070 /* VESA 640x480x72Hz mode to set on the pipe */
6071 static const struct drm_display_mode load_detect_mode = {
6072         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6073                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6074 };
6075
6076 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6077                                         struct drm_crtc *crtc)
6078 {
6079         struct drm_plane *plane;
6080         struct drm_plane_state *plane_state;
6081         int ret, i;
6082
6083         ret = drm_atomic_add_affected_planes(state, crtc);
6084         if (ret)
6085                 return ret;
6086
6087         for_each_new_plane_in_state(state, plane, plane_state, i) {
6088                 if (plane_state->crtc != crtc)
6089                         continue;
6090
6091                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6092                 if (ret)
6093                         return ret;
6094
6095                 drm_atomic_set_fb_for_plane(plane_state, NULL);
6096         }
6097
6098         return 0;
6099 }
6100
6101 int intel_get_load_detect_pipe(struct drm_connector *connector,
6102                                struct intel_load_detect_pipe *old,
6103                                struct drm_modeset_acquire_ctx *ctx)
6104 {
6105         struct intel_encoder *encoder =
6106                 intel_attached_encoder(to_intel_connector(connector));
6107         struct intel_crtc *possible_crtc;
6108         struct intel_crtc *crtc = NULL;
6109         struct drm_device *dev = encoder->base.dev;
6110         struct drm_i915_private *dev_priv = to_i915(dev);
6111         struct drm_mode_config *config = &dev->mode_config;
6112         struct drm_atomic_state *state = NULL, *restore_state = NULL;
6113         struct drm_connector_state *connector_state;
6114         struct intel_crtc_state *crtc_state;
6115         int ret;
6116
6117         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6118                     connector->base.id, connector->name,
6119                     encoder->base.base.id, encoder->base.name);
6120
6121         old->restore_state = NULL;
6122
6123         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6124
6125         /*
6126          * Algorithm gets a little messy:
6127          *
6128          *   - if the connector already has an assigned crtc, use it (but make
6129          *     sure it's on first)
6130          *
6131          *   - try to find the first unused crtc that can drive this connector,
6132          *     and use that if we find one
6133          */
6134
6135         /* See if we already have a CRTC for this connector */
6136         if (connector->state->crtc) {
6137                 crtc = to_intel_crtc(connector->state->crtc);
6138
6139                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6140                 if (ret)
6141                         goto fail;
6142
6143                 /* Make sure the crtc and connector are running */
6144                 goto found;
6145         }
6146
6147         /* Find an unused one (if possible) */
6148         for_each_intel_crtc(dev, possible_crtc) {
6149                 if (!(encoder->base.possible_crtcs &
6150                       drm_crtc_mask(&possible_crtc->base)))
6151                         continue;
6152
6153                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6154                 if (ret)
6155                         goto fail;
6156
6157                 if (possible_crtc->base.state->enable) {
6158                         drm_modeset_unlock(&possible_crtc->base.mutex);
6159                         continue;
6160                 }
6161
6162                 crtc = possible_crtc;
6163                 break;
6164         }
6165
6166         /*
6167          * If we didn't find an unused CRTC, don't use any.
6168          */
6169         if (!crtc) {
6170                 drm_dbg_kms(&dev_priv->drm,
6171                             "no pipe available for load-detect\n");
6172                 ret = -ENODEV;
6173                 goto fail;
6174         }
6175
6176 found:
6177         state = drm_atomic_state_alloc(dev);
6178         restore_state = drm_atomic_state_alloc(dev);
6179         if (!state || !restore_state) {
6180                 ret = -ENOMEM;
6181                 goto fail;
6182         }
6183
6184         state->acquire_ctx = ctx;
6185         restore_state->acquire_ctx = ctx;
6186
6187         connector_state = drm_atomic_get_connector_state(state, connector);
6188         if (IS_ERR(connector_state)) {
6189                 ret = PTR_ERR(connector_state);
6190                 goto fail;
6191         }
6192
6193         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6194         if (ret)
6195                 goto fail;
6196
6197         crtc_state = intel_atomic_get_crtc_state(state, crtc);
6198         if (IS_ERR(crtc_state)) {
6199                 ret = PTR_ERR(crtc_state);
6200                 goto fail;
6201         }
6202
6203         crtc_state->uapi.active = true;
6204
6205         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6206                                            &load_detect_mode);
6207         if (ret)
6208                 goto fail;
6209
6210         ret = intel_modeset_disable_planes(state, &crtc->base);
6211         if (ret)
6212                 goto fail;
6213
6214         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6215         if (!ret)
6216                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6217         if (!ret)
6218                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6219         if (ret) {
6220                 drm_dbg_kms(&dev_priv->drm,
6221                             "Failed to create a copy of old state to restore: %i\n",
6222                             ret);
6223                 goto fail;
6224         }
6225
6226         ret = drm_atomic_commit(state);
6227         if (ret) {
6228                 drm_dbg_kms(&dev_priv->drm,
6229                             "failed to set mode on load-detect pipe\n");
6230                 goto fail;
6231         }
6232
6233         old->restore_state = restore_state;
6234         drm_atomic_state_put(state);
6235
6236         /* let the connector get through one full cycle before testing */
6237         intel_wait_for_vblank(dev_priv, crtc->pipe);
6238         return true;
6239
6240 fail:
6241         if (state) {
6242                 drm_atomic_state_put(state);
6243                 state = NULL;
6244         }
6245         if (restore_state) {
6246                 drm_atomic_state_put(restore_state);
6247                 restore_state = NULL;
6248         }
6249
6250         if (ret == -EDEADLK)
6251                 return ret;
6252
6253         return false;
6254 }
6255
6256 void intel_release_load_detect_pipe(struct drm_connector *connector,
6257                                     struct intel_load_detect_pipe *old,
6258                                     struct drm_modeset_acquire_ctx *ctx)
6259 {
6260         struct intel_encoder *intel_encoder =
6261                 intel_attached_encoder(to_intel_connector(connector));
6262         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6263         struct drm_encoder *encoder = &intel_encoder->base;
6264         struct drm_atomic_state *state = old->restore_state;
6265         int ret;
6266
6267         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6268                     connector->base.id, connector->name,
6269                     encoder->base.id, encoder->name);
6270
6271         if (!state)
6272                 return;
6273
6274         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6275         if (ret)
6276                 drm_dbg_kms(&i915->drm,
6277                             "Couldn't release load detect pipe: %i\n", ret);
6278         drm_atomic_state_put(state);
6279 }
6280
6281 static int i9xx_pll_refclk(struct drm_device *dev,
6282                            const struct intel_crtc_state *pipe_config)
6283 {
6284         struct drm_i915_private *dev_priv = to_i915(dev);
6285         u32 dpll = pipe_config->dpll_hw_state.dpll;
6286
6287         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6288                 return dev_priv->vbt.lvds_ssc_freq;
6289         else if (HAS_PCH_SPLIT(dev_priv))
6290                 return 120000;
6291         else if (DISPLAY_VER(dev_priv) != 2)
6292                 return 96000;
6293         else
6294                 return 48000;
6295 }
6296
6297 /* Returns the clock of the currently programmed mode of the given pipe. */
6298 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6299                                 struct intel_crtc_state *pipe_config)
6300 {
6301         struct drm_device *dev = crtc->base.dev;
6302         struct drm_i915_private *dev_priv = to_i915(dev);
6303         u32 dpll = pipe_config->dpll_hw_state.dpll;
6304         u32 fp;
6305         struct dpll clock;
6306         int port_clock;
6307         int refclk = i9xx_pll_refclk(dev, pipe_config);
6308
6309         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6310                 fp = pipe_config->dpll_hw_state.fp0;
6311         else
6312                 fp = pipe_config->dpll_hw_state.fp1;
6313
6314         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6315         if (IS_PINEVIEW(dev_priv)) {
6316                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6317                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6318         } else {
6319                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6320                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6321         }
6322
6323         if (DISPLAY_VER(dev_priv) != 2) {
6324                 if (IS_PINEVIEW(dev_priv))
6325                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6326                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6327                 else
6328                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6329                                DPLL_FPA01_P1_POST_DIV_SHIFT);
6330
6331                 switch (dpll & DPLL_MODE_MASK) {
6332                 case DPLLB_MODE_DAC_SERIAL:
6333                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6334                                 5 : 10;
6335                         break;
6336                 case DPLLB_MODE_LVDS:
6337                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6338                                 7 : 14;
6339                         break;
6340                 default:
6341                         drm_dbg_kms(&dev_priv->drm,
6342                                     "Unknown DPLL mode %08x in programmed "
6343                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
6344                         return;
6345                 }
6346
6347                 if (IS_PINEVIEW(dev_priv))
6348                         port_clock = pnv_calc_dpll_params(refclk, &clock);
6349                 else
6350                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
6351         } else {
6352                 enum pipe lvds_pipe;
6353
6354                 if (IS_I85X(dev_priv) &&
6355                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
6356                     lvds_pipe == crtc->pipe) {
6357                         u32 lvds = intel_de_read(dev_priv, LVDS);
6358
6359                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6360                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
6361
6362                         if (lvds & LVDS_CLKB_POWER_UP)
6363                                 clock.p2 = 7;
6364                         else
6365                                 clock.p2 = 14;
6366                 } else {
6367                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
6368                                 clock.p1 = 2;
6369                         else {
6370                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6371                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6372                         }
6373                         if (dpll & PLL_P2_DIVIDE_BY_4)
6374                                 clock.p2 = 4;
6375                         else
6376                                 clock.p2 = 2;
6377                 }
6378
6379                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
6380         }
6381
6382         /*
6383          * This value includes pixel_multiplier. We will use
6384          * port_clock to compute adjusted_mode.crtc_clock in the
6385          * encoder's get_config() function.
6386          */
6387         pipe_config->port_clock = port_clock;
6388 }
6389
6390 int intel_dotclock_calculate(int link_freq,
6391                              const struct intel_link_m_n *m_n)
6392 {
6393         /*
6394          * The calculation for the data clock is:
6395          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6396          * But we want to avoid losing precison if possible, so:
6397          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6398          *
6399          * and the link clock is simpler:
6400          * link_clock = (m * link_clock) / n
6401          */
6402
6403         if (!m_n->link_n)
6404                 return 0;
6405
6406         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6407 }
6408
6409 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6410                               struct intel_crtc_state *pipe_config)
6411 {
6412         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6413
6414         /* read out port_clock from the DPLL */
6415         i9xx_crtc_clock_get(crtc, pipe_config);
6416
6417         /*
6418          * In case there is an active pipe without active ports,
6419          * we may need some idea for the dotclock anyway.
6420          * Calculate one based on the FDI configuration.
6421          */
6422         pipe_config->hw.adjusted_mode.crtc_clock =
6423                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6424                                          &pipe_config->fdi_m_n);
6425 }
6426
6427 /* Returns the currently programmed mode of the given encoder. */
6428 struct drm_display_mode *
6429 intel_encoder_current_mode(struct intel_encoder *encoder)
6430 {
6431         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6432         struct intel_crtc_state *crtc_state;
6433         struct drm_display_mode *mode;
6434         struct intel_crtc *crtc;
6435         enum pipe pipe;
6436
6437         if (!encoder->get_hw_state(encoder, &pipe))
6438                 return NULL;
6439
6440         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6441
6442         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6443         if (!mode)
6444                 return NULL;
6445
6446         crtc_state = intel_crtc_state_alloc(crtc);
6447         if (!crtc_state) {
6448                 kfree(mode);
6449                 return NULL;
6450         }
6451
6452         if (!intel_crtc_get_pipe_config(crtc_state)) {
6453                 kfree(crtc_state);
6454                 kfree(mode);
6455                 return NULL;
6456         }
6457
6458         intel_encoder_get_config(encoder, crtc_state);
6459
6460         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6461
6462         kfree(crtc_state);
6463
6464         return mode;
6465 }
6466
6467 /**
6468  * intel_wm_need_update - Check whether watermarks need updating
6469  * @cur: current plane state
6470  * @new: new plane state
6471  *
6472  * Check current plane state versus the new one to determine whether
6473  * watermarks need to be recalculated.
6474  *
6475  * Returns true or false.
6476  */
6477 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6478                                  struct intel_plane_state *new)
6479 {
6480         /* Update watermarks on tiling or size changes. */
6481         if (new->uapi.visible != cur->uapi.visible)
6482                 return true;
6483
6484         if (!cur->hw.fb || !new->hw.fb)
6485                 return false;
6486
6487         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6488             cur->hw.rotation != new->hw.rotation ||
6489             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6490             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6491             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6492             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6493                 return true;
6494
6495         return false;
6496 }
6497
6498 static bool needs_scaling(const struct intel_plane_state *state)
6499 {
6500         int src_w = drm_rect_width(&state->uapi.src) >> 16;
6501         int src_h = drm_rect_height(&state->uapi.src) >> 16;
6502         int dst_w = drm_rect_width(&state->uapi.dst);
6503         int dst_h = drm_rect_height(&state->uapi.dst);
6504
6505         return (src_w != dst_w || src_h != dst_h);
6506 }
6507
6508 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6509                                     struct intel_crtc_state *crtc_state,
6510                                     const struct intel_plane_state *old_plane_state,
6511                                     struct intel_plane_state *plane_state)
6512 {
6513         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6514         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6515         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6516         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6517         bool was_crtc_enabled = old_crtc_state->hw.active;
6518         bool is_crtc_enabled = crtc_state->hw.active;
6519         bool turn_off, turn_on, visible, was_visible;
6520         int ret;
6521
6522         if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6523                 ret = skl_update_scaler_plane(crtc_state, plane_state);
6524                 if (ret)
6525                         return ret;
6526         }
6527
6528         was_visible = old_plane_state->uapi.visible;
6529         visible = plane_state->uapi.visible;
6530
6531         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6532                 was_visible = false;
6533
6534         /*
6535          * Visibility is calculated as if the crtc was on, but
6536          * after scaler setup everything depends on it being off
6537          * when the crtc isn't active.
6538          *
6539          * FIXME this is wrong for watermarks. Watermarks should also
6540          * be computed as if the pipe would be active. Perhaps move
6541          * per-plane wm computation to the .check_plane() hook, and
6542          * only combine the results from all planes in the current place?
6543          */
6544         if (!is_crtc_enabled) {
6545                 intel_plane_set_invisible(crtc_state, plane_state);
6546                 visible = false;
6547         }
6548
6549         if (!was_visible && !visible)
6550                 return 0;
6551
6552         turn_off = was_visible && (!visible || mode_changed);
6553         turn_on = visible && (!was_visible || mode_changed);
6554
6555         drm_dbg_atomic(&dev_priv->drm,
6556                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6557                        crtc->base.base.id, crtc->base.name,
6558                        plane->base.base.id, plane->base.name,
6559                        was_visible, visible,
6560                        turn_off, turn_on, mode_changed);
6561
6562         if (turn_on) {
6563                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6564                         crtc_state->update_wm_pre = true;
6565
6566                 /* must disable cxsr around plane enable/disable */
6567                 if (plane->id != PLANE_CURSOR)
6568                         crtc_state->disable_cxsr = true;
6569         } else if (turn_off) {
6570                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6571                         crtc_state->update_wm_post = true;
6572
6573                 /* must disable cxsr around plane enable/disable */
6574                 if (plane->id != PLANE_CURSOR)
6575                         crtc_state->disable_cxsr = true;
6576         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
6577                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6578                         /* FIXME bollocks */
6579                         crtc_state->update_wm_pre = true;
6580                         crtc_state->update_wm_post = true;
6581                 }
6582         }
6583
6584         if (visible || was_visible)
6585                 crtc_state->fb_bits |= plane->frontbuffer_bit;
6586
6587         /*
6588          * ILK/SNB DVSACNTR/Sprite Enable
6589          * IVB SPR_CTL/Sprite Enable
6590          * "When in Self Refresh Big FIFO mode, a write to enable the
6591          *  plane will be internally buffered and delayed while Big FIFO
6592          *  mode is exiting."
6593          *
6594          * Which means that enabling the sprite can take an extra frame
6595          * when we start in big FIFO mode (LP1+). Thus we need to drop
6596          * down to LP0 and wait for vblank in order to make sure the
6597          * sprite gets enabled on the next vblank after the register write.
6598          * Doing otherwise would risk enabling the sprite one frame after
6599          * we've already signalled flip completion. We can resume LP1+
6600          * once the sprite has been enabled.
6601          *
6602          *
6603          * WaCxSRDisabledForSpriteScaling:ivb
6604          * IVB SPR_SCALE/Scaling Enable
6605          * "Low Power watermarks must be disabled for at least one
6606          *  frame before enabling sprite scaling, and kept disabled
6607          *  until sprite scaling is disabled."
6608          *
6609          * ILK/SNB DVSASCALE/Scaling Enable
6610          * "When in Self Refresh Big FIFO mode, scaling enable will be
6611          *  masked off while Big FIFO mode is exiting."
6612          *
6613          * Despite the w/a only being listed for IVB we assume that
6614          * the ILK/SNB note has similar ramifications, hence we apply
6615          * the w/a on all three platforms.
6616          *
6617          * With experimental results seems this is needed also for primary
6618          * plane, not only sprite plane.
6619          */
6620         if (plane->id != PLANE_CURSOR &&
6621             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6622              IS_IVYBRIDGE(dev_priv)) &&
6623             (turn_on || (!needs_scaling(old_plane_state) &&
6624                          needs_scaling(plane_state))))
6625                 crtc_state->disable_lp_wm = true;
6626
6627         return 0;
6628 }
6629
6630 static bool encoders_cloneable(const struct intel_encoder *a,
6631                                const struct intel_encoder *b)
6632 {
6633         /* masks could be asymmetric, so check both ways */
6634         return a == b || (a->cloneable & (1 << b->type) &&
6635                           b->cloneable & (1 << a->type));
6636 }
6637
6638 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6639                                          struct intel_crtc *crtc,
6640                                          struct intel_encoder *encoder)
6641 {
6642         struct intel_encoder *source_encoder;
6643         struct drm_connector *connector;
6644         struct drm_connector_state *connector_state;
6645         int i;
6646
6647         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6648                 if (connector_state->crtc != &crtc->base)
6649                         continue;
6650
6651                 source_encoder =
6652                         to_intel_encoder(connector_state->best_encoder);
6653                 if (!encoders_cloneable(encoder, source_encoder))
6654                         return false;
6655         }
6656
6657         return true;
6658 }
6659
6660 static int icl_add_linked_planes(struct intel_atomic_state *state)
6661 {
6662         struct intel_plane *plane, *linked;
6663         struct intel_plane_state *plane_state, *linked_plane_state;
6664         int i;
6665
6666         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6667                 linked = plane_state->planar_linked_plane;
6668
6669                 if (!linked)
6670                         continue;
6671
6672                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
6673                 if (IS_ERR(linked_plane_state))
6674                         return PTR_ERR(linked_plane_state);
6675
6676                 drm_WARN_ON(state->base.dev,
6677                             linked_plane_state->planar_linked_plane != plane);
6678                 drm_WARN_ON(state->base.dev,
6679                             linked_plane_state->planar_slave == plane_state->planar_slave);
6680         }
6681
6682         return 0;
6683 }
6684
6685 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6686 {
6687         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6688         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6689         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6690         struct intel_plane *plane, *linked;
6691         struct intel_plane_state *plane_state;
6692         int i;
6693
6694         if (DISPLAY_VER(dev_priv) < 11)
6695                 return 0;
6696
6697         /*
6698          * Destroy all old plane links and make the slave plane invisible
6699          * in the crtc_state->active_planes mask.
6700          */
6701         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6702                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6703                         continue;
6704
6705                 plane_state->planar_linked_plane = NULL;
6706                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
6707                         crtc_state->enabled_planes &= ~BIT(plane->id);
6708                         crtc_state->active_planes &= ~BIT(plane->id);
6709                         crtc_state->update_planes |= BIT(plane->id);
6710                 }
6711
6712                 plane_state->planar_slave = false;
6713         }
6714
6715         if (!crtc_state->nv12_planes)
6716                 return 0;
6717
6718         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6719                 struct intel_plane_state *linked_state = NULL;
6720
6721                 if (plane->pipe != crtc->pipe ||
6722                     !(crtc_state->nv12_planes & BIT(plane->id)))
6723                         continue;
6724
6725                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6726                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6727                                 continue;
6728
6729                         if (crtc_state->active_planes & BIT(linked->id))
6730                                 continue;
6731
6732                         linked_state = intel_atomic_get_plane_state(state, linked);
6733                         if (IS_ERR(linked_state))
6734                                 return PTR_ERR(linked_state);
6735
6736                         break;
6737                 }
6738
6739                 if (!linked_state) {
6740                         drm_dbg_kms(&dev_priv->drm,
6741                                     "Need %d free Y planes for planar YUV\n",
6742                                     hweight8(crtc_state->nv12_planes));
6743
6744                         return -EINVAL;
6745                 }
6746
6747                 plane_state->planar_linked_plane = linked;
6748
6749                 linked_state->planar_slave = true;
6750                 linked_state->planar_linked_plane = plane;
6751                 crtc_state->enabled_planes |= BIT(linked->id);
6752                 crtc_state->active_planes |= BIT(linked->id);
6753                 crtc_state->update_planes |= BIT(linked->id);
6754                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6755                             linked->base.name, plane->base.name);
6756
6757                 /* Copy parameters to slave plane */
6758                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6759                 linked_state->color_ctl = plane_state->color_ctl;
6760                 linked_state->view = plane_state->view;
6761
6762                 intel_plane_copy_hw_state(linked_state, plane_state);
6763                 linked_state->uapi.src = plane_state->uapi.src;
6764                 linked_state->uapi.dst = plane_state->uapi.dst;
6765
6766                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
6767                         if (linked->id == PLANE_SPRITE5)
6768                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6769                         else if (linked->id == PLANE_SPRITE4)
6770                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6771                         else if (linked->id == PLANE_SPRITE3)
6772                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6773                         else if (linked->id == PLANE_SPRITE2)
6774                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6775                         else
6776                                 MISSING_CASE(linked->id);
6777                 }
6778         }
6779
6780         return 0;
6781 }
6782
6783 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6784 {
6785         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6786         struct intel_atomic_state *state =
6787                 to_intel_atomic_state(new_crtc_state->uapi.state);
6788         const struct intel_crtc_state *old_crtc_state =
6789                 intel_atomic_get_old_crtc_state(state, crtc);
6790
6791         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6792 }
6793
6794 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6795 {
6796         const struct drm_display_mode *pipe_mode =
6797                 &crtc_state->hw.pipe_mode;
6798         int linetime_wm;
6799
6800         if (!crtc_state->hw.enable)
6801                 return 0;
6802
6803         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6804                                         pipe_mode->crtc_clock);
6805
6806         return min(linetime_wm, 0x1ff);
6807 }
6808
6809 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6810                                const struct intel_cdclk_state *cdclk_state)
6811 {
6812         const struct drm_display_mode *pipe_mode =
6813                 &crtc_state->hw.pipe_mode;
6814         int linetime_wm;
6815
6816         if (!crtc_state->hw.enable)
6817                 return 0;
6818
6819         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6820                                         cdclk_state->logical.cdclk);
6821
6822         return min(linetime_wm, 0x1ff);
6823 }
6824
6825 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6826 {
6827         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6828         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6829         const struct drm_display_mode *pipe_mode =
6830                 &crtc_state->hw.pipe_mode;
6831         int linetime_wm;
6832
6833         if (!crtc_state->hw.enable)
6834                 return 0;
6835
6836         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6837                                    crtc_state->pixel_rate);
6838
6839         /* Display WA #1135: BXT:ALL GLK:ALL */
6840         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6841             dev_priv->ipc_enabled)
6842                 linetime_wm /= 2;
6843
6844         return min(linetime_wm, 0x1ff);
6845 }
6846
6847 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6848                                    struct intel_crtc *crtc)
6849 {
6850         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6851         struct intel_crtc_state *crtc_state =
6852                 intel_atomic_get_new_crtc_state(state, crtc);
6853         const struct intel_cdclk_state *cdclk_state;
6854
6855         if (DISPLAY_VER(dev_priv) >= 9)
6856                 crtc_state->linetime = skl_linetime_wm(crtc_state);
6857         else
6858                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
6859
6860         if (!hsw_crtc_supports_ips(crtc))
6861                 return 0;
6862
6863         cdclk_state = intel_atomic_get_cdclk_state(state);
6864         if (IS_ERR(cdclk_state))
6865                 return PTR_ERR(cdclk_state);
6866
6867         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6868                                                        cdclk_state);
6869
6870         return 0;
6871 }
6872
6873 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6874                                    struct intel_crtc *crtc)
6875 {
6876         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6877         struct intel_crtc_state *crtc_state =
6878                 intel_atomic_get_new_crtc_state(state, crtc);
6879         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6880         int ret;
6881
6882         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6883             mode_changed && !crtc_state->hw.active)
6884                 crtc_state->update_wm_post = true;
6885
6886         if (mode_changed && crtc_state->hw.enable &&
6887             dev_priv->display.crtc_compute_clock &&
6888             !crtc_state->bigjoiner_slave &&
6889             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6890                 ret = dev_priv->display.crtc_compute_clock(crtc_state);
6891                 if (ret)
6892                         return ret;
6893         }
6894
6895         /*
6896          * May need to update pipe gamma enable bits
6897          * when C8 planes are getting enabled/disabled.
6898          */
6899         if (c8_planes_changed(crtc_state))
6900                 crtc_state->uapi.color_mgmt_changed = true;
6901
6902         if (mode_changed || crtc_state->update_pipe ||
6903             crtc_state->uapi.color_mgmt_changed) {
6904                 ret = intel_color_check(crtc_state);
6905                 if (ret)
6906                         return ret;
6907         }
6908
6909         if (dev_priv->display.compute_pipe_wm) {
6910                 ret = dev_priv->display.compute_pipe_wm(state, crtc);
6911                 if (ret) {
6912                         drm_dbg_kms(&dev_priv->drm,
6913                                     "Target pipe watermarks are invalid\n");
6914                         return ret;
6915                 }
6916
6917         }
6918
6919         if (dev_priv->display.compute_intermediate_wm) {
6920                 if (drm_WARN_ON(&dev_priv->drm,
6921                                 !dev_priv->display.compute_pipe_wm))
6922                         return 0;
6923
6924                 /*
6925                  * Calculate 'intermediate' watermarks that satisfy both the
6926                  * old state and the new state.  We can program these
6927                  * immediately.
6928                  */
6929                 ret = dev_priv->display.compute_intermediate_wm(state, crtc);
6930                 if (ret) {
6931                         drm_dbg_kms(&dev_priv->drm,
6932                                     "No valid intermediate pipe watermarks are possible\n");
6933                         return ret;
6934                 }
6935         }
6936
6937         if (DISPLAY_VER(dev_priv) >= 9) {
6938                 if (mode_changed || crtc_state->update_pipe) {
6939                         ret = skl_update_scaler_crtc(crtc_state);
6940                         if (ret)
6941                                 return ret;
6942                 }
6943
6944                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6945                 if (ret)
6946                         return ret;
6947         }
6948
6949         if (HAS_IPS(dev_priv)) {
6950                 ret = hsw_compute_ips_config(crtc_state);
6951                 if (ret)
6952                         return ret;
6953         }
6954
6955         if (DISPLAY_VER(dev_priv) >= 9 ||
6956             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6957                 ret = hsw_compute_linetime_wm(state, crtc);
6958                 if (ret)
6959                         return ret;
6960
6961         }
6962
6963         if (!mode_changed) {
6964                 ret = intel_psr2_sel_fetch_update(state, crtc);
6965                 if (ret)
6966                         return ret;
6967         }
6968
6969         return 0;
6970 }
6971
6972 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
6973 {
6974         struct intel_connector *connector;
6975         struct drm_connector_list_iter conn_iter;
6976
6977         drm_connector_list_iter_begin(dev, &conn_iter);
6978         for_each_intel_connector_iter(connector, &conn_iter) {
6979                 struct drm_connector_state *conn_state = connector->base.state;
6980                 struct intel_encoder *encoder =
6981                         to_intel_encoder(connector->base.encoder);
6982
6983                 if (conn_state->crtc)
6984                         drm_connector_put(&connector->base);
6985
6986                 if (encoder) {
6987                         struct intel_crtc *crtc =
6988                                 to_intel_crtc(encoder->base.crtc);
6989                         const struct intel_crtc_state *crtc_state =
6990                                 to_intel_crtc_state(crtc->base.state);
6991
6992                         conn_state->best_encoder = &encoder->base;
6993                         conn_state->crtc = &crtc->base;
6994                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
6995
6996                         drm_connector_get(&connector->base);
6997                 } else {
6998                         conn_state->best_encoder = NULL;
6999                         conn_state->crtc = NULL;
7000                 }
7001         }
7002         drm_connector_list_iter_end(&conn_iter);
7003 }
7004
7005 static int
7006 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7007                       struct intel_crtc_state *pipe_config)
7008 {
7009         struct drm_connector *connector = conn_state->connector;
7010         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7011         const struct drm_display_info *info = &connector->display_info;
7012         int bpp;
7013
7014         switch (conn_state->max_bpc) {
7015         case 6 ... 7:
7016                 bpp = 6 * 3;
7017                 break;
7018         case 8 ... 9:
7019                 bpp = 8 * 3;
7020                 break;
7021         case 10 ... 11:
7022                 bpp = 10 * 3;
7023                 break;
7024         case 12 ... 16:
7025                 bpp = 12 * 3;
7026                 break;
7027         default:
7028                 MISSING_CASE(conn_state->max_bpc);
7029                 return -EINVAL;
7030         }
7031
7032         if (bpp < pipe_config->pipe_bpp) {
7033                 drm_dbg_kms(&i915->drm,
7034                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7035                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7036                             connector->base.id, connector->name,
7037                             bpp, 3 * info->bpc,
7038                             3 * conn_state->max_requested_bpc,
7039                             pipe_config->pipe_bpp);
7040
7041                 pipe_config->pipe_bpp = bpp;
7042         }
7043
7044         return 0;
7045 }
7046
7047 static int
7048 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7049                           struct intel_crtc_state *pipe_config)
7050 {
7051         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7052         struct drm_atomic_state *state = pipe_config->uapi.state;
7053         struct drm_connector *connector;
7054         struct drm_connector_state *connector_state;
7055         int bpp, i;
7056
7057         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7058             IS_CHERRYVIEW(dev_priv)))
7059                 bpp = 10*3;
7060         else if (DISPLAY_VER(dev_priv) >= 5)
7061                 bpp = 12*3;
7062         else
7063                 bpp = 8*3;
7064
7065         pipe_config->pipe_bpp = bpp;
7066
7067         /* Clamp display bpp to connector max bpp */
7068         for_each_new_connector_in_state(state, connector, connector_state, i) {
7069                 int ret;
7070
7071                 if (connector_state->crtc != &crtc->base)
7072                         continue;
7073
7074                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7075                 if (ret)
7076                         return ret;
7077         }
7078
7079         return 0;
7080 }
7081
7082 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7083                                     const struct drm_display_mode *mode)
7084 {
7085         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7086                     "type: 0x%x flags: 0x%x\n",
7087                     mode->crtc_clock,
7088                     mode->crtc_hdisplay, mode->crtc_hsync_start,
7089                     mode->crtc_hsync_end, mode->crtc_htotal,
7090                     mode->crtc_vdisplay, mode->crtc_vsync_start,
7091                     mode->crtc_vsync_end, mode->crtc_vtotal,
7092                     mode->type, mode->flags);
7093 }
7094
7095 static void
7096 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7097                       const char *id, unsigned int lane_count,
7098                       const struct intel_link_m_n *m_n)
7099 {
7100         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7101
7102         drm_dbg_kms(&i915->drm,
7103                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7104                     id, lane_count,
7105                     m_n->gmch_m, m_n->gmch_n,
7106                     m_n->link_m, m_n->link_n, m_n->tu);
7107 }
7108
7109 static void
7110 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7111                      const union hdmi_infoframe *frame)
7112 {
7113         if (!drm_debug_enabled(DRM_UT_KMS))
7114                 return;
7115
7116         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7117 }
7118
7119 static void
7120 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7121                       const struct drm_dp_vsc_sdp *vsc)
7122 {
7123         if (!drm_debug_enabled(DRM_UT_KMS))
7124                 return;
7125
7126         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7127 }
7128
7129 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7130
7131 static const char * const output_type_str[] = {
7132         OUTPUT_TYPE(UNUSED),
7133         OUTPUT_TYPE(ANALOG),
7134         OUTPUT_TYPE(DVO),
7135         OUTPUT_TYPE(SDVO),
7136         OUTPUT_TYPE(LVDS),
7137         OUTPUT_TYPE(TVOUT),
7138         OUTPUT_TYPE(HDMI),
7139         OUTPUT_TYPE(DP),
7140         OUTPUT_TYPE(EDP),
7141         OUTPUT_TYPE(DSI),
7142         OUTPUT_TYPE(DDI),
7143         OUTPUT_TYPE(DP_MST),
7144 };
7145
7146 #undef OUTPUT_TYPE
7147
7148 static void snprintf_output_types(char *buf, size_t len,
7149                                   unsigned int output_types)
7150 {
7151         char *str = buf;
7152         int i;
7153
7154         str[0] = '\0';
7155
7156         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7157                 int r;
7158
7159                 if ((output_types & BIT(i)) == 0)
7160                         continue;
7161
7162                 r = snprintf(str, len, "%s%s",
7163                              str != buf ? "," : "", output_type_str[i]);
7164                 if (r >= len)
7165                         break;
7166                 str += r;
7167                 len -= r;
7168
7169                 output_types &= ~BIT(i);
7170         }
7171
7172         WARN_ON_ONCE(output_types != 0);
7173 }
7174
7175 static const char * const output_format_str[] = {
7176         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7177         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7178         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7179 };
7180
7181 static const char *output_formats(enum intel_output_format format)
7182 {
7183         if (format >= ARRAY_SIZE(output_format_str))
7184                 return "invalid";
7185         return output_format_str[format];
7186 }
7187
7188 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7189 {
7190         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7191         struct drm_i915_private *i915 = to_i915(plane->base.dev);
7192         const struct drm_framebuffer *fb = plane_state->hw.fb;
7193
7194         if (!fb) {
7195                 drm_dbg_kms(&i915->drm,
7196                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7197                             plane->base.base.id, plane->base.name,
7198                             yesno(plane_state->uapi.visible));
7199                 return;
7200         }
7201
7202         drm_dbg_kms(&i915->drm,
7203                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7204                     plane->base.base.id, plane->base.name,
7205                     fb->base.id, fb->width, fb->height, &fb->format->format,
7206                     fb->modifier, yesno(plane_state->uapi.visible));
7207         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7208                     plane_state->hw.rotation, plane_state->scaler_id);
7209         if (plane_state->uapi.visible)
7210                 drm_dbg_kms(&i915->drm,
7211                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7212                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
7213                             DRM_RECT_ARG(&plane_state->uapi.dst));
7214 }
7215
7216 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7217                                    struct intel_atomic_state *state,
7218                                    const char *context)
7219 {
7220         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7221         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7222         const struct intel_plane_state *plane_state;
7223         struct intel_plane *plane;
7224         char buf[64];
7225         int i;
7226
7227         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7228                     crtc->base.base.id, crtc->base.name,
7229                     yesno(pipe_config->hw.enable), context);
7230
7231         if (!pipe_config->hw.enable)
7232                 goto dump_planes;
7233
7234         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7235         drm_dbg_kms(&dev_priv->drm,
7236                     "active: %s, output_types: %s (0x%x), output format: %s\n",
7237                     yesno(pipe_config->hw.active),
7238                     buf, pipe_config->output_types,
7239                     output_formats(pipe_config->output_format));
7240
7241         drm_dbg_kms(&dev_priv->drm,
7242                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7243                     transcoder_name(pipe_config->cpu_transcoder),
7244                     pipe_config->pipe_bpp, pipe_config->dither);
7245
7246         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7247                     transcoder_name(pipe_config->mst_master_transcoder));
7248
7249         drm_dbg_kms(&dev_priv->drm,
7250                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7251                     transcoder_name(pipe_config->master_transcoder),
7252                     pipe_config->sync_mode_slaves_mask);
7253
7254         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7255                     pipe_config->bigjoiner_slave ? "slave" :
7256                     pipe_config->bigjoiner ? "master" : "no");
7257
7258         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7259                     enableddisabled(pipe_config->splitter.enable),
7260                     pipe_config->splitter.link_count,
7261                     pipe_config->splitter.pixel_overlap);
7262
7263         if (pipe_config->has_pch_encoder)
7264                 intel_dump_m_n_config(pipe_config, "fdi",
7265                                       pipe_config->fdi_lanes,
7266                                       &pipe_config->fdi_m_n);
7267
7268         if (intel_crtc_has_dp_encoder(pipe_config)) {
7269                 intel_dump_m_n_config(pipe_config, "dp m_n",
7270                                 pipe_config->lane_count, &pipe_config->dp_m_n);
7271                 if (pipe_config->has_drrs)
7272                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
7273                                               pipe_config->lane_count,
7274                                               &pipe_config->dp_m2_n2);
7275         }
7276
7277         drm_dbg_kms(&dev_priv->drm,
7278                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7279                     pipe_config->has_audio, pipe_config->has_infoframe,
7280                     pipe_config->infoframes.enable);
7281
7282         if (pipe_config->infoframes.enable &
7283             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7284                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7285                             pipe_config->infoframes.gcp);
7286         if (pipe_config->infoframes.enable &
7287             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7288                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7289         if (pipe_config->infoframes.enable &
7290             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7291                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7292         if (pipe_config->infoframes.enable &
7293             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7294                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7295         if (pipe_config->infoframes.enable &
7296             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7297                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7298         if (pipe_config->infoframes.enable &
7299             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7300                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7301         if (pipe_config->infoframes.enable &
7302             intel_hdmi_infoframe_enable(DP_SDP_VSC))
7303                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7304
7305         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7306                     yesno(pipe_config->vrr.enable),
7307                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7308                     pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7309                     pipe_config->vrr.flipline,
7310                     intel_vrr_vmin_vblank_start(pipe_config),
7311                     intel_vrr_vmax_vblank_start(pipe_config));
7312
7313         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7314         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7315         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7316         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7317         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7318         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7319         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7320         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7321         drm_dbg_kms(&dev_priv->drm,
7322                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7323                     pipe_config->port_clock,
7324                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7325                     pipe_config->pixel_rate);
7326
7327         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7328                     pipe_config->linetime, pipe_config->ips_linetime);
7329
7330         if (DISPLAY_VER(dev_priv) >= 9)
7331                 drm_dbg_kms(&dev_priv->drm,
7332                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7333                             crtc->num_scalers,
7334                             pipe_config->scaler_state.scaler_users,
7335                             pipe_config->scaler_state.scaler_id);
7336
7337         if (HAS_GMCH(dev_priv))
7338                 drm_dbg_kms(&dev_priv->drm,
7339                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7340                             pipe_config->gmch_pfit.control,
7341                             pipe_config->gmch_pfit.pgm_ratios,
7342                             pipe_config->gmch_pfit.lvds_border_bits);
7343         else
7344                 drm_dbg_kms(&dev_priv->drm,
7345                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7346                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7347                             enableddisabled(pipe_config->pch_pfit.enabled),
7348                             yesno(pipe_config->pch_pfit.force_thru));
7349
7350         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7351                     pipe_config->ips_enabled, pipe_config->double_wide);
7352
7353         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7354
7355         if (IS_CHERRYVIEW(dev_priv))
7356                 drm_dbg_kms(&dev_priv->drm,
7357                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7358                             pipe_config->cgm_mode, pipe_config->gamma_mode,
7359                             pipe_config->gamma_enable, pipe_config->csc_enable);
7360         else
7361                 drm_dbg_kms(&dev_priv->drm,
7362                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7363                             pipe_config->csc_mode, pipe_config->gamma_mode,
7364                             pipe_config->gamma_enable, pipe_config->csc_enable);
7365
7366         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7367                     pipe_config->hw.degamma_lut ?
7368                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7369                     pipe_config->hw.gamma_lut ?
7370                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7371
7372 dump_planes:
7373         if (!state)
7374                 return;
7375
7376         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7377                 if (plane->pipe == crtc->pipe)
7378                         intel_dump_plane_state(plane_state);
7379         }
7380 }
7381
7382 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7383 {
7384         struct drm_device *dev = state->base.dev;
7385         struct drm_connector *connector;
7386         struct drm_connector_list_iter conn_iter;
7387         unsigned int used_ports = 0;
7388         unsigned int used_mst_ports = 0;
7389         bool ret = true;
7390
7391         /*
7392          * We're going to peek into connector->state,
7393          * hence connection_mutex must be held.
7394          */
7395         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7396
7397         /*
7398          * Walk the connector list instead of the encoder
7399          * list to detect the problem on ddi platforms
7400          * where there's just one encoder per digital port.
7401          */
7402         drm_connector_list_iter_begin(dev, &conn_iter);
7403         drm_for_each_connector_iter(connector, &conn_iter) {
7404                 struct drm_connector_state *connector_state;
7405                 struct intel_encoder *encoder;
7406
7407                 connector_state =
7408                         drm_atomic_get_new_connector_state(&state->base,
7409                                                            connector);
7410                 if (!connector_state)
7411                         connector_state = connector->state;
7412
7413                 if (!connector_state->best_encoder)
7414                         continue;
7415
7416                 encoder = to_intel_encoder(connector_state->best_encoder);
7417
7418                 drm_WARN_ON(dev, !connector_state->crtc);
7419
7420                 switch (encoder->type) {
7421                 case INTEL_OUTPUT_DDI:
7422                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7423                                 break;
7424                         fallthrough;
7425                 case INTEL_OUTPUT_DP:
7426                 case INTEL_OUTPUT_HDMI:
7427                 case INTEL_OUTPUT_EDP:
7428                         /* the same port mustn't appear more than once */
7429                         if (used_ports & BIT(encoder->port))
7430                                 ret = false;
7431
7432                         used_ports |= BIT(encoder->port);
7433                         break;
7434                 case INTEL_OUTPUT_DP_MST:
7435                         used_mst_ports |=
7436                                 1 << encoder->port;
7437                         break;
7438                 default:
7439                         break;
7440                 }
7441         }
7442         drm_connector_list_iter_end(&conn_iter);
7443
7444         /* can't mix MST and SST/HDMI on the same port */
7445         if (used_ports & used_mst_ports)
7446                 return false;
7447
7448         return ret;
7449 }
7450
7451 static void
7452 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7453                                            struct intel_crtc_state *crtc_state)
7454 {
7455         const struct intel_crtc_state *from_crtc_state = crtc_state;
7456
7457         if (crtc_state->bigjoiner_slave) {
7458                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
7459                                                                   crtc_state->bigjoiner_linked_crtc);
7460
7461                 /* No need to copy state if the master state is unchanged */
7462                 if (!from_crtc_state)
7463                         return;
7464         }
7465
7466         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7467 }
7468
7469 static void
7470 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7471                                  struct intel_crtc_state *crtc_state)
7472 {
7473         crtc_state->hw.enable = crtc_state->uapi.enable;
7474         crtc_state->hw.active = crtc_state->uapi.active;
7475         crtc_state->hw.mode = crtc_state->uapi.mode;
7476         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7477         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7478
7479         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7480 }
7481
7482 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7483 {
7484         if (crtc_state->bigjoiner_slave)
7485                 return;
7486
7487         crtc_state->uapi.enable = crtc_state->hw.enable;
7488         crtc_state->uapi.active = crtc_state->hw.active;
7489         drm_WARN_ON(crtc_state->uapi.crtc->dev,
7490                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7491
7492         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7493         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7494
7495         /* copy color blobs to uapi */
7496         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7497                                   crtc_state->hw.degamma_lut);
7498         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7499                                   crtc_state->hw.gamma_lut);
7500         drm_property_replace_blob(&crtc_state->uapi.ctm,
7501                                   crtc_state->hw.ctm);
7502 }
7503
7504 static int
7505 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7506                           const struct intel_crtc_state *from_crtc_state)
7507 {
7508         struct intel_crtc_state *saved_state;
7509         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7510
7511         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7512         if (!saved_state)
7513                 return -ENOMEM;
7514
7515         saved_state->uapi = crtc_state->uapi;
7516         saved_state->scaler_state = crtc_state->scaler_state;
7517         saved_state->shared_dpll = crtc_state->shared_dpll;
7518         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7519         saved_state->crc_enabled = crtc_state->crc_enabled;
7520
7521         intel_crtc_free_hw_state(crtc_state);
7522         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7523         kfree(saved_state);
7524
7525         /* Re-init hw state */
7526         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7527         crtc_state->hw.enable = from_crtc_state->hw.enable;
7528         crtc_state->hw.active = from_crtc_state->hw.active;
7529         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7530         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7531
7532         /* Some fixups */
7533         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7534         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7535         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7536         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7537         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7538         crtc_state->bigjoiner_slave = true;
7539         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7540         crtc_state->has_audio = false;
7541
7542         return 0;
7543 }
7544
7545 static int
7546 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7547                                  struct intel_crtc_state *crtc_state)
7548 {
7549         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7550         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7551         struct intel_crtc_state *saved_state;
7552
7553         saved_state = intel_crtc_state_alloc(crtc);
7554         if (!saved_state)
7555                 return -ENOMEM;
7556
7557         /* free the old crtc_state->hw members */
7558         intel_crtc_free_hw_state(crtc_state);
7559
7560         /* FIXME: before the switch to atomic started, a new pipe_config was
7561          * kzalloc'd. Code that depends on any field being zero should be
7562          * fixed, so that the crtc_state can be safely duplicated. For now,
7563          * only fields that are know to not cause problems are preserved. */
7564
7565         saved_state->uapi = crtc_state->uapi;
7566         saved_state->scaler_state = crtc_state->scaler_state;
7567         saved_state->shared_dpll = crtc_state->shared_dpll;
7568         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7569         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7570                sizeof(saved_state->icl_port_dplls));
7571         saved_state->crc_enabled = crtc_state->crc_enabled;
7572         if (IS_G4X(dev_priv) ||
7573             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7574                 saved_state->wm = crtc_state->wm;
7575
7576         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7577         kfree(saved_state);
7578
7579         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7580
7581         return 0;
7582 }
7583
7584 static int
7585 intel_modeset_pipe_config(struct intel_atomic_state *state,
7586                           struct intel_crtc_state *pipe_config)
7587 {
7588         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7589         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7590         struct drm_connector *connector;
7591         struct drm_connector_state *connector_state;
7592         int base_bpp, ret, i;
7593         bool retry = true;
7594
7595         pipe_config->cpu_transcoder =
7596                 (enum transcoder) to_intel_crtc(crtc)->pipe;
7597
7598         /*
7599          * Sanitize sync polarity flags based on requested ones. If neither
7600          * positive or negative polarity is requested, treat this as meaning
7601          * negative polarity.
7602          */
7603         if (!(pipe_config->hw.adjusted_mode.flags &
7604               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7605                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7606
7607         if (!(pipe_config->hw.adjusted_mode.flags &
7608               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7609                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7610
7611         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7612                                         pipe_config);
7613         if (ret)
7614                 return ret;
7615
7616         base_bpp = pipe_config->pipe_bpp;
7617
7618         /*
7619          * Determine the real pipe dimensions. Note that stereo modes can
7620          * increase the actual pipe size due to the frame doubling and
7621          * insertion of additional space for blanks between the frame. This
7622          * is stored in the crtc timings. We use the requested mode to do this
7623          * computation to clearly distinguish it from the adjusted mode, which
7624          * can be changed by the connectors in the below retry loop.
7625          */
7626         drm_mode_get_hv_timing(&pipe_config->hw.mode,
7627                                &pipe_config->pipe_src_w,
7628                                &pipe_config->pipe_src_h);
7629
7630         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7631                 struct intel_encoder *encoder =
7632                         to_intel_encoder(connector_state->best_encoder);
7633
7634                 if (connector_state->crtc != crtc)
7635                         continue;
7636
7637                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7638                         drm_dbg_kms(&i915->drm,
7639                                     "rejecting invalid cloning configuration\n");
7640                         return -EINVAL;
7641                 }
7642
7643                 /*
7644                  * Determine output_types before calling the .compute_config()
7645                  * hooks so that the hooks can use this information safely.
7646                  */
7647                 if (encoder->compute_output_type)
7648                         pipe_config->output_types |=
7649                                 BIT(encoder->compute_output_type(encoder, pipe_config,
7650                                                                  connector_state));
7651                 else
7652                         pipe_config->output_types |= BIT(encoder->type);
7653         }
7654
7655 encoder_retry:
7656         /* Ensure the port clock defaults are reset when retrying. */
7657         pipe_config->port_clock = 0;
7658         pipe_config->pixel_multiplier = 1;
7659
7660         /* Fill in default crtc timings, allow encoders to overwrite them. */
7661         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7662                               CRTC_STEREO_DOUBLE);
7663
7664         /* Pass our mode to the connectors and the CRTC to give them a chance to
7665          * adjust it according to limitations or connector properties, and also
7666          * a chance to reject the mode entirely.
7667          */
7668         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7669                 struct intel_encoder *encoder =
7670                         to_intel_encoder(connector_state->best_encoder);
7671
7672                 if (connector_state->crtc != crtc)
7673                         continue;
7674
7675                 ret = encoder->compute_config(encoder, pipe_config,
7676                                               connector_state);
7677                 if (ret < 0) {
7678                         if (ret != -EDEADLK)
7679                                 drm_dbg_kms(&i915->drm,
7680                                             "Encoder config failure: %d\n",
7681                                             ret);
7682                         return ret;
7683                 }
7684         }
7685
7686         /* Set default port clock if not overwritten by the encoder. Needs to be
7687          * done afterwards in case the encoder adjusts the mode. */
7688         if (!pipe_config->port_clock)
7689                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7690                         * pipe_config->pixel_multiplier;
7691
7692         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7693         if (ret == -EDEADLK)
7694                 return ret;
7695         if (ret < 0) {
7696                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7697                 return ret;
7698         }
7699
7700         if (ret == I915_DISPLAY_CONFIG_RETRY) {
7701                 if (drm_WARN(&i915->drm, !retry,
7702                              "loop in pipe configuration computation\n"))
7703                         return -EINVAL;
7704
7705                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7706                 retry = false;
7707                 goto encoder_retry;
7708         }
7709
7710         /* Dithering seems to not pass-through bits correctly when it should, so
7711          * only enable it on 6bpc panels and when its not a compliance
7712          * test requesting 6bpc video pattern.
7713          */
7714         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7715                 !pipe_config->dither_force_disable;
7716         drm_dbg_kms(&i915->drm,
7717                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7718                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7719
7720         return 0;
7721 }
7722
7723 static int
7724 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7725 {
7726         struct intel_atomic_state *state =
7727                 to_intel_atomic_state(crtc_state->uapi.state);
7728         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7729         struct drm_connector_state *conn_state;
7730         struct drm_connector *connector;
7731         int i;
7732
7733         for_each_new_connector_in_state(&state->base, connector,
7734                                         conn_state, i) {
7735                 struct intel_encoder *encoder =
7736                         to_intel_encoder(conn_state->best_encoder);
7737                 int ret;
7738
7739                 if (conn_state->crtc != &crtc->base ||
7740                     !encoder->compute_config_late)
7741                         continue;
7742
7743                 ret = encoder->compute_config_late(encoder, crtc_state,
7744                                                    conn_state);
7745                 if (ret)
7746                         return ret;
7747         }
7748
7749         return 0;
7750 }
7751
7752 bool intel_fuzzy_clock_check(int clock1, int clock2)
7753 {
7754         int diff;
7755
7756         if (clock1 == clock2)
7757                 return true;
7758
7759         if (!clock1 || !clock2)
7760                 return false;
7761
7762         diff = abs(clock1 - clock2);
7763
7764         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7765                 return true;
7766
7767         return false;
7768 }
7769
7770 static bool
7771 intel_compare_m_n(unsigned int m, unsigned int n,
7772                   unsigned int m2, unsigned int n2,
7773                   bool exact)
7774 {
7775         if (m == m2 && n == n2)
7776                 return true;
7777
7778         if (exact || !m || !n || !m2 || !n2)
7779                 return false;
7780
7781         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7782
7783         if (n > n2) {
7784                 while (n > n2) {
7785                         m2 <<= 1;
7786                         n2 <<= 1;
7787                 }
7788         } else if (n < n2) {
7789                 while (n < n2) {
7790                         m <<= 1;
7791                         n <<= 1;
7792                 }
7793         }
7794
7795         if (n != n2)
7796                 return false;
7797
7798         return intel_fuzzy_clock_check(m, m2);
7799 }
7800
7801 static bool
7802 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7803                        const struct intel_link_m_n *m2_n2,
7804                        bool exact)
7805 {
7806         return m_n->tu == m2_n2->tu &&
7807                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7808                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7809                 intel_compare_m_n(m_n->link_m, m_n->link_n,
7810                                   m2_n2->link_m, m2_n2->link_n, exact);
7811 }
7812
7813 static bool
7814 intel_compare_infoframe(const union hdmi_infoframe *a,
7815                         const union hdmi_infoframe *b)
7816 {
7817         return memcmp(a, b, sizeof(*a)) == 0;
7818 }
7819
7820 static bool
7821 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7822                          const struct drm_dp_vsc_sdp *b)
7823 {
7824         return memcmp(a, b, sizeof(*a)) == 0;
7825 }
7826
7827 static void
7828 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7829                                bool fastset, const char *name,
7830                                const union hdmi_infoframe *a,
7831                                const union hdmi_infoframe *b)
7832 {
7833         if (fastset) {
7834                 if (!drm_debug_enabled(DRM_UT_KMS))
7835                         return;
7836
7837                 drm_dbg_kms(&dev_priv->drm,
7838                             "fastset mismatch in %s infoframe\n", name);
7839                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7840                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7841                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7842                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7843         } else {
7844                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7845                 drm_err(&dev_priv->drm, "expected:\n");
7846                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7847                 drm_err(&dev_priv->drm, "found:\n");
7848                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7849         }
7850 }
7851
7852 static void
7853 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7854                                 bool fastset, const char *name,
7855                                 const struct drm_dp_vsc_sdp *a,
7856                                 const struct drm_dp_vsc_sdp *b)
7857 {
7858         if (fastset) {
7859                 if (!drm_debug_enabled(DRM_UT_KMS))
7860                         return;
7861
7862                 drm_dbg_kms(&dev_priv->drm,
7863                             "fastset mismatch in %s dp sdp\n", name);
7864                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7865                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7866                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7867                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7868         } else {
7869                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7870                 drm_err(&dev_priv->drm, "expected:\n");
7871                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7872                 drm_err(&dev_priv->drm, "found:\n");
7873                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7874         }
7875 }
7876
7877 static void __printf(4, 5)
7878 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7879                      const char *name, const char *format, ...)
7880 {
7881         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7882         struct va_format vaf;
7883         va_list args;
7884
7885         va_start(args, format);
7886         vaf.fmt = format;
7887         vaf.va = &args;
7888
7889         if (fastset)
7890                 drm_dbg_kms(&i915->drm,
7891                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7892                             crtc->base.base.id, crtc->base.name, name, &vaf);
7893         else
7894                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7895                         crtc->base.base.id, crtc->base.name, name, &vaf);
7896
7897         va_end(args);
7898 }
7899
7900 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7901 {
7902         if (dev_priv->params.fastboot != -1)
7903                 return dev_priv->params.fastboot;
7904
7905         /* Enable fastboot by default on Skylake and newer */
7906         if (DISPLAY_VER(dev_priv) >= 9)
7907                 return true;
7908
7909         /* Enable fastboot by default on VLV and CHV */
7910         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7911                 return true;
7912
7913         /* Disabled by default on all others */
7914         return false;
7915 }
7916
7917 static bool
7918 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7919                           const struct intel_crtc_state *pipe_config,
7920                           bool fastset)
7921 {
7922         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7923         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7924         bool ret = true;
7925         u32 bp_gamma = 0;
7926         bool fixup_inherited = fastset &&
7927                 current_config->inherited && !pipe_config->inherited;
7928
7929         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7930                 drm_dbg_kms(&dev_priv->drm,
7931                             "initial modeset and fastboot not set\n");
7932                 ret = false;
7933         }
7934
7935 #define PIPE_CONF_CHECK_X(name) do { \
7936         if (current_config->name != pipe_config->name) { \
7937                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7938                                      "(expected 0x%08x, found 0x%08x)", \
7939                                      current_config->name, \
7940                                      pipe_config->name); \
7941                 ret = false; \
7942         } \
7943 } while (0)
7944
7945 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
7946         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
7947                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7948                                      "(expected 0x%08x, found 0x%08x)", \
7949                                      current_config->name & (mask), \
7950                                      pipe_config->name & (mask)); \
7951                 ret = false; \
7952         } \
7953 } while (0)
7954
7955 #define PIPE_CONF_CHECK_I(name) do { \
7956         if (current_config->name != pipe_config->name) { \
7957                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7958                                      "(expected %i, found %i)", \
7959                                      current_config->name, \
7960                                      pipe_config->name); \
7961                 ret = false; \
7962         } \
7963 } while (0)
7964
7965 #define PIPE_CONF_CHECK_BOOL(name) do { \
7966         if (current_config->name != pipe_config->name) { \
7967                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
7968                                      "(expected %s, found %s)", \
7969                                      yesno(current_config->name), \
7970                                      yesno(pipe_config->name)); \
7971                 ret = false; \
7972         } \
7973 } while (0)
7974
7975 /*
7976  * Checks state where we only read out the enabling, but not the entire
7977  * state itself (like full infoframes or ELD for audio). These states
7978  * require a full modeset on bootup to fix up.
7979  */
7980 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
7981         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
7982                 PIPE_CONF_CHECK_BOOL(name); \
7983         } else { \
7984                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7985                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
7986                                      yesno(current_config->name), \
7987                                      yesno(pipe_config->name)); \
7988                 ret = false; \
7989         } \
7990 } while (0)
7991
7992 #define PIPE_CONF_CHECK_P(name) do { \
7993         if (current_config->name != pipe_config->name) { \
7994                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7995                                      "(expected %p, found %p)", \
7996                                      current_config->name, \
7997                                      pipe_config->name); \
7998                 ret = false; \
7999         } \
8000 } while (0)
8001
8002 #define PIPE_CONF_CHECK_M_N(name) do { \
8003         if (!intel_compare_link_m_n(&current_config->name, \
8004                                     &pipe_config->name,\
8005                                     !fastset)) { \
8006                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8007                                      "(expected tu %i gmch %i/%i link %i/%i, " \
8008                                      "found tu %i, gmch %i/%i link %i/%i)", \
8009                                      current_config->name.tu, \
8010                                      current_config->name.gmch_m, \
8011                                      current_config->name.gmch_n, \
8012                                      current_config->name.link_m, \
8013                                      current_config->name.link_n, \
8014                                      pipe_config->name.tu, \
8015                                      pipe_config->name.gmch_m, \
8016                                      pipe_config->name.gmch_n, \
8017                                      pipe_config->name.link_m, \
8018                                      pipe_config->name.link_n); \
8019                 ret = false; \
8020         } \
8021 } while (0)
8022
8023 /* This is required for BDW+ where there is only one set of registers for
8024  * switching between high and low RR.
8025  * This macro can be used whenever a comparison has to be made between one
8026  * hw state and multiple sw state variables.
8027  */
8028 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8029         if (!intel_compare_link_m_n(&current_config->name, \
8030                                     &pipe_config->name, !fastset) && \
8031             !intel_compare_link_m_n(&current_config->alt_name, \
8032                                     &pipe_config->name, !fastset)) { \
8033                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8034                                      "(expected tu %i gmch %i/%i link %i/%i, " \
8035                                      "or tu %i gmch %i/%i link %i/%i, " \
8036                                      "found tu %i, gmch %i/%i link %i/%i)", \
8037                                      current_config->name.tu, \
8038                                      current_config->name.gmch_m, \
8039                                      current_config->name.gmch_n, \
8040                                      current_config->name.link_m, \
8041                                      current_config->name.link_n, \
8042                                      current_config->alt_name.tu, \
8043                                      current_config->alt_name.gmch_m, \
8044                                      current_config->alt_name.gmch_n, \
8045                                      current_config->alt_name.link_m, \
8046                                      current_config->alt_name.link_n, \
8047                                      pipe_config->name.tu, \
8048                                      pipe_config->name.gmch_m, \
8049                                      pipe_config->name.gmch_n, \
8050                                      pipe_config->name.link_m, \
8051                                      pipe_config->name.link_n); \
8052                 ret = false; \
8053         } \
8054 } while (0)
8055
8056 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8057         if ((current_config->name ^ pipe_config->name) & (mask)) { \
8058                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8059                                      "(%x) (expected %i, found %i)", \
8060                                      (mask), \
8061                                      current_config->name & (mask), \
8062                                      pipe_config->name & (mask)); \
8063                 ret = false; \
8064         } \
8065 } while (0)
8066
8067 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8068         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8069                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8070                                      "(expected %i, found %i)", \
8071                                      current_config->name, \
8072                                      pipe_config->name); \
8073                 ret = false; \
8074         } \
8075 } while (0)
8076
8077 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8078         if (!intel_compare_infoframe(&current_config->infoframes.name, \
8079                                      &pipe_config->infoframes.name)) { \
8080                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8081                                                &current_config->infoframes.name, \
8082                                                &pipe_config->infoframes.name); \
8083                 ret = false; \
8084         } \
8085 } while (0)
8086
8087 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8088         if (!current_config->has_psr && !pipe_config->has_psr && \
8089             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8090                                       &pipe_config->infoframes.name)) { \
8091                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8092                                                 &current_config->infoframes.name, \
8093                                                 &pipe_config->infoframes.name); \
8094                 ret = false; \
8095         } \
8096 } while (0)
8097
8098 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8099         if (current_config->name1 != pipe_config->name1) { \
8100                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8101                                 "(expected %i, found %i, won't compare lut values)", \
8102                                 current_config->name1, \
8103                                 pipe_config->name1); \
8104                 ret = false;\
8105         } else { \
8106                 if (!intel_color_lut_equal(current_config->name2, \
8107                                         pipe_config->name2, pipe_config->name1, \
8108                                         bit_precision)) { \
8109                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8110                                         "hw_state doesn't match sw_state"); \
8111                         ret = false; \
8112                 } \
8113         } \
8114 } while (0)
8115
8116 #define PIPE_CONF_QUIRK(quirk) \
8117         ((current_config->quirks | pipe_config->quirks) & (quirk))
8118
8119         PIPE_CONF_CHECK_I(cpu_transcoder);
8120
8121         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8122         PIPE_CONF_CHECK_I(fdi_lanes);
8123         PIPE_CONF_CHECK_M_N(fdi_m_n);
8124
8125         PIPE_CONF_CHECK_I(lane_count);
8126         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8127
8128         if (DISPLAY_VER(dev_priv) < 8) {
8129                 PIPE_CONF_CHECK_M_N(dp_m_n);
8130
8131                 if (current_config->has_drrs)
8132                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
8133         } else
8134                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8135
8136         PIPE_CONF_CHECK_X(output_types);
8137
8138         /* FIXME do the readout properly and get rid of this quirk */
8139         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8140                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8141                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8142                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8143                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8144                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8145                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8146
8147                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8148                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8149                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8150                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8151                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8152                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8153
8154                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8155                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8156                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8157                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8158                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8159                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8160
8161                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8162                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8163                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8164                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8165                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8166                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8167
8168                 PIPE_CONF_CHECK_I(pixel_multiplier);
8169
8170                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8171                                       DRM_MODE_FLAG_INTERLACE);
8172
8173                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8174                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8175                                               DRM_MODE_FLAG_PHSYNC);
8176                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8177                                               DRM_MODE_FLAG_NHSYNC);
8178                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8179                                               DRM_MODE_FLAG_PVSYNC);
8180                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8181                                               DRM_MODE_FLAG_NVSYNC);
8182                 }
8183         }
8184
8185         PIPE_CONF_CHECK_I(output_format);
8186         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8187         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8188             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8189                 PIPE_CONF_CHECK_BOOL(limited_color_range);
8190
8191         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8192         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8193         PIPE_CONF_CHECK_BOOL(has_infoframe);
8194         /* FIXME do the readout properly and get rid of this quirk */
8195         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8196                 PIPE_CONF_CHECK_BOOL(fec_enable);
8197
8198         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8199
8200         PIPE_CONF_CHECK_X(gmch_pfit.control);
8201         /* pfit ratios are autocomputed by the hw on gen4+ */
8202         if (DISPLAY_VER(dev_priv) < 4)
8203                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8204         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8205
8206         /*
8207          * Changing the EDP transcoder input mux
8208          * (A_ONOFF vs. A_ON) requires a full modeset.
8209          */
8210         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8211
8212         if (!fastset) {
8213                 PIPE_CONF_CHECK_I(pipe_src_w);
8214                 PIPE_CONF_CHECK_I(pipe_src_h);
8215
8216                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8217                 if (current_config->pch_pfit.enabled) {
8218                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8219                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8220                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8221                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8222                 }
8223
8224                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8225                 /* FIXME do the readout properly and get rid of this quirk */
8226                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8227                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8228
8229                 PIPE_CONF_CHECK_X(gamma_mode);
8230                 if (IS_CHERRYVIEW(dev_priv))
8231                         PIPE_CONF_CHECK_X(cgm_mode);
8232                 else
8233                         PIPE_CONF_CHECK_X(csc_mode);
8234                 PIPE_CONF_CHECK_BOOL(gamma_enable);
8235                 PIPE_CONF_CHECK_BOOL(csc_enable);
8236
8237                 PIPE_CONF_CHECK_I(linetime);
8238                 PIPE_CONF_CHECK_I(ips_linetime);
8239
8240                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8241                 if (bp_gamma)
8242                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8243
8244                 PIPE_CONF_CHECK_BOOL(has_psr);
8245                 PIPE_CONF_CHECK_BOOL(has_psr2);
8246                 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8247                 PIPE_CONF_CHECK_I(dc3co_exitline);
8248         }
8249
8250         PIPE_CONF_CHECK_BOOL(double_wide);
8251
8252         if (dev_priv->dpll.mgr)
8253                 PIPE_CONF_CHECK_P(shared_dpll);
8254
8255         /* FIXME do the readout properly and get rid of this quirk */
8256         if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8257                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8258                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8259                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8260                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8261                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8262                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8263                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8264                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8265                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8266                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8267                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8268                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8269                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8270                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8271                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8272                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8273                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8274                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8275                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8276                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8277                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8278                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8279                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8280                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8281                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8282                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8283                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8284                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8285                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8286                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8287                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8288         }
8289
8290         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8291                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8292                 PIPE_CONF_CHECK_X(dsi_pll.div);
8293
8294                 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8295                         PIPE_CONF_CHECK_I(pipe_bpp);
8296
8297                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8298                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8299                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8300
8301                 PIPE_CONF_CHECK_I(min_voltage_level);
8302         }
8303
8304         if (fastset && (current_config->has_psr || pipe_config->has_psr))
8305                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8306                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8307         else
8308                 PIPE_CONF_CHECK_X(infoframes.enable);
8309
8310         PIPE_CONF_CHECK_X(infoframes.gcp);
8311         PIPE_CONF_CHECK_INFOFRAME(avi);
8312         PIPE_CONF_CHECK_INFOFRAME(spd);
8313         PIPE_CONF_CHECK_INFOFRAME(hdmi);
8314         PIPE_CONF_CHECK_INFOFRAME(drm);
8315         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8316
8317         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8318         PIPE_CONF_CHECK_I(master_transcoder);
8319         PIPE_CONF_CHECK_BOOL(bigjoiner);
8320         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8321         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8322
8323         PIPE_CONF_CHECK_I(dsc.compression_enable);
8324         PIPE_CONF_CHECK_I(dsc.dsc_split);
8325         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8326
8327         PIPE_CONF_CHECK_BOOL(splitter.enable);
8328         PIPE_CONF_CHECK_I(splitter.link_count);
8329         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8330
8331         PIPE_CONF_CHECK_I(mst_master_transcoder);
8332
8333         PIPE_CONF_CHECK_BOOL(vrr.enable);
8334         PIPE_CONF_CHECK_I(vrr.vmin);
8335         PIPE_CONF_CHECK_I(vrr.vmax);
8336         PIPE_CONF_CHECK_I(vrr.flipline);
8337         PIPE_CONF_CHECK_I(vrr.pipeline_full);
8338         PIPE_CONF_CHECK_I(vrr.guardband);
8339
8340 #undef PIPE_CONF_CHECK_X
8341 #undef PIPE_CONF_CHECK_I
8342 #undef PIPE_CONF_CHECK_BOOL
8343 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8344 #undef PIPE_CONF_CHECK_P
8345 #undef PIPE_CONF_CHECK_FLAGS
8346 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8347 #undef PIPE_CONF_CHECK_COLOR_LUT
8348 #undef PIPE_CONF_QUIRK
8349
8350         return ret;
8351 }
8352
8353 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8354                                            const struct intel_crtc_state *pipe_config)
8355 {
8356         if (pipe_config->has_pch_encoder) {
8357                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8358                                                             &pipe_config->fdi_m_n);
8359                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8360
8361                 /*
8362                  * FDI already provided one idea for the dotclock.
8363                  * Yell if the encoder disagrees.
8364                  */
8365                 drm_WARN(&dev_priv->drm,
8366                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8367                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8368                          fdi_dotclock, dotclock);
8369         }
8370 }
8371
8372 static void verify_wm_state(struct intel_crtc *crtc,
8373                             struct intel_crtc_state *new_crtc_state)
8374 {
8375         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8376         struct skl_hw_state {
8377                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8378                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8379                 struct skl_pipe_wm wm;
8380         } *hw;
8381         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8382         int level, max_level = ilk_wm_max_level(dev_priv);
8383         struct intel_plane *plane;
8384         u8 hw_enabled_slices;
8385
8386         if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8387                 return;
8388
8389         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8390         if (!hw)
8391                 return;
8392
8393         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8394
8395         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8396
8397         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8398
8399         if (DISPLAY_VER(dev_priv) >= 11 &&
8400             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8401                 drm_err(&dev_priv->drm,
8402                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8403                         dev_priv->dbuf.enabled_slices,
8404                         hw_enabled_slices);
8405
8406         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8407                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8408                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8409
8410                 /* Watermarks */
8411                 for (level = 0; level <= max_level; level++) {
8412                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8413                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8414
8415                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8416                                 continue;
8417
8418                         drm_err(&dev_priv->drm,
8419                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8420                                 plane->base.base.id, plane->base.name, level,
8421                                 sw_wm_level->enable,
8422                                 sw_wm_level->blocks,
8423                                 sw_wm_level->lines,
8424                                 hw_wm_level->enable,
8425                                 hw_wm_level->blocks,
8426                                 hw_wm_level->lines);
8427                 }
8428
8429                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8430                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8431
8432                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8433                         drm_err(&dev_priv->drm,
8434                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8435                                 plane->base.base.id, plane->base.name,
8436                                 sw_wm_level->enable,
8437                                 sw_wm_level->blocks,
8438                                 sw_wm_level->lines,
8439                                 hw_wm_level->enable,
8440                                 hw_wm_level->blocks,
8441                                 hw_wm_level->lines);
8442                 }
8443
8444                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8445                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8446
8447                 if (HAS_HW_SAGV_WM(dev_priv) &&
8448                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8449                         drm_err(&dev_priv->drm,
8450                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8451                                 plane->base.base.id, plane->base.name,
8452                                 sw_wm_level->enable,
8453                                 sw_wm_level->blocks,
8454                                 sw_wm_level->lines,
8455                                 hw_wm_level->enable,
8456                                 hw_wm_level->blocks,
8457                                 hw_wm_level->lines);
8458                 }
8459
8460                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8461                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8462
8463                 if (HAS_HW_SAGV_WM(dev_priv) &&
8464                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8465                         drm_err(&dev_priv->drm,
8466                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8467                                 plane->base.base.id, plane->base.name,
8468                                 sw_wm_level->enable,
8469                                 sw_wm_level->blocks,
8470                                 sw_wm_level->lines,
8471                                 hw_wm_level->enable,
8472                                 hw_wm_level->blocks,
8473                                 hw_wm_level->lines);
8474                 }
8475
8476                 /* DDB */
8477                 hw_ddb_entry = &hw->ddb_y[plane->id];
8478                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8479
8480                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8481                         drm_err(&dev_priv->drm,
8482                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8483                                 plane->base.base.id, plane->base.name,
8484                                 sw_ddb_entry->start, sw_ddb_entry->end,
8485                                 hw_ddb_entry->start, hw_ddb_entry->end);
8486                 }
8487         }
8488
8489         kfree(hw);
8490 }
8491
8492 static void
8493 verify_connector_state(struct intel_atomic_state *state,
8494                        struct intel_crtc *crtc)
8495 {
8496         struct drm_connector *connector;
8497         struct drm_connector_state *new_conn_state;
8498         int i;
8499
8500         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8501                 struct drm_encoder *encoder = connector->encoder;
8502                 struct intel_crtc_state *crtc_state = NULL;
8503
8504                 if (new_conn_state->crtc != &crtc->base)
8505                         continue;
8506
8507                 if (crtc)
8508                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8509
8510                 intel_connector_verify_state(crtc_state, new_conn_state);
8511
8512                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8513                      "connector's atomic encoder doesn't match legacy encoder\n");
8514         }
8515 }
8516
8517 static void
8518 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8519 {
8520         struct intel_encoder *encoder;
8521         struct drm_connector *connector;
8522         struct drm_connector_state *old_conn_state, *new_conn_state;
8523         int i;
8524
8525         for_each_intel_encoder(&dev_priv->drm, encoder) {
8526                 bool enabled = false, found = false;
8527                 enum pipe pipe;
8528
8529                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8530                             encoder->base.base.id,
8531                             encoder->base.name);
8532
8533                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8534                                                    new_conn_state, i) {
8535                         if (old_conn_state->best_encoder == &encoder->base)
8536                                 found = true;
8537
8538                         if (new_conn_state->best_encoder != &encoder->base)
8539                                 continue;
8540                         found = enabled = true;
8541
8542                         I915_STATE_WARN(new_conn_state->crtc !=
8543                                         encoder->base.crtc,
8544                              "connector's crtc doesn't match encoder crtc\n");
8545                 }
8546
8547                 if (!found)
8548                         continue;
8549
8550                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8551                      "encoder's enabled state mismatch "
8552                      "(expected %i, found %i)\n",
8553                      !!encoder->base.crtc, enabled);
8554
8555                 if (!encoder->base.crtc) {
8556                         bool active;
8557
8558                         active = encoder->get_hw_state(encoder, &pipe);
8559                         I915_STATE_WARN(active,
8560                              "encoder detached but still enabled on pipe %c.\n",
8561                              pipe_name(pipe));
8562                 }
8563         }
8564 }
8565
8566 static void
8567 verify_crtc_state(struct intel_crtc *crtc,
8568                   struct intel_crtc_state *old_crtc_state,
8569                   struct intel_crtc_state *new_crtc_state)
8570 {
8571         struct drm_device *dev = crtc->base.dev;
8572         struct drm_i915_private *dev_priv = to_i915(dev);
8573         struct intel_encoder *encoder;
8574         struct intel_crtc_state *pipe_config = old_crtc_state;
8575         struct drm_atomic_state *state = old_crtc_state->uapi.state;
8576         struct intel_crtc *master = crtc;
8577
8578         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8579         intel_crtc_free_hw_state(old_crtc_state);
8580         intel_crtc_state_reset(old_crtc_state, crtc);
8581         old_crtc_state->uapi.state = state;
8582
8583         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8584                     crtc->base.name);
8585
8586         pipe_config->hw.enable = new_crtc_state->hw.enable;
8587
8588         intel_crtc_get_pipe_config(pipe_config);
8589
8590         /* we keep both pipes enabled on 830 */
8591         if (IS_I830(dev_priv) && pipe_config->hw.active)
8592                 pipe_config->hw.active = new_crtc_state->hw.active;
8593
8594         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8595                         "crtc active state doesn't match with hw state "
8596                         "(expected %i, found %i)\n",
8597                         new_crtc_state->hw.active, pipe_config->hw.active);
8598
8599         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8600                         "transitional active state does not match atomic hw state "
8601                         "(expected %i, found %i)\n",
8602                         new_crtc_state->hw.active, crtc->active);
8603
8604         if (new_crtc_state->bigjoiner_slave)
8605                 master = new_crtc_state->bigjoiner_linked_crtc;
8606
8607         for_each_encoder_on_crtc(dev, &master->base, encoder) {
8608                 enum pipe pipe;
8609                 bool active;
8610
8611                 active = encoder->get_hw_state(encoder, &pipe);
8612                 I915_STATE_WARN(active != new_crtc_state->hw.active,
8613                                 "[ENCODER:%i] active %i with crtc active %i\n",
8614                                 encoder->base.base.id, active,
8615                                 new_crtc_state->hw.active);
8616
8617                 I915_STATE_WARN(active && master->pipe != pipe,
8618                                 "Encoder connected to wrong pipe %c\n",
8619                                 pipe_name(pipe));
8620
8621                 if (active)
8622                         intel_encoder_get_config(encoder, pipe_config);
8623         }
8624
8625         if (!new_crtc_state->hw.active)
8626                 return;
8627
8628         if (new_crtc_state->bigjoiner_slave)
8629                 /* No PLLs set for slave */
8630                 pipe_config->shared_dpll = NULL;
8631
8632         intel_pipe_config_sanity_check(dev_priv, pipe_config);
8633
8634         if (!intel_pipe_config_compare(new_crtc_state,
8635                                        pipe_config, false)) {
8636                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
8637                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8638                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8639         }
8640 }
8641
8642 static void
8643 intel_verify_planes(struct intel_atomic_state *state)
8644 {
8645         struct intel_plane *plane;
8646         const struct intel_plane_state *plane_state;
8647         int i;
8648
8649         for_each_new_intel_plane_in_state(state, plane,
8650                                           plane_state, i)
8651                 assert_plane(plane, plane_state->planar_slave ||
8652                              plane_state->uapi.visible);
8653 }
8654
8655 static void
8656 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8657                          struct intel_shared_dpll *pll,
8658                          struct intel_crtc *crtc,
8659                          struct intel_crtc_state *new_crtc_state)
8660 {
8661         struct intel_dpll_hw_state dpll_hw_state;
8662         u8 pipe_mask;
8663         bool active;
8664
8665         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8666
8667         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8668
8669         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8670
8671         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8672                 I915_STATE_WARN(!pll->on && pll->active_mask,
8673                      "pll in active use but not on in sw tracking\n");
8674                 I915_STATE_WARN(pll->on && !pll->active_mask,
8675                      "pll is on but not used by any active pipe\n");
8676                 I915_STATE_WARN(pll->on != active,
8677                      "pll on state mismatch (expected %i, found %i)\n",
8678                      pll->on, active);
8679         }
8680
8681         if (!crtc) {
8682                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8683                                 "more active pll users than references: 0x%x vs 0x%x\n",
8684                                 pll->active_mask, pll->state.pipe_mask);
8685
8686                 return;
8687         }
8688
8689         pipe_mask = BIT(crtc->pipe);
8690
8691         if (new_crtc_state->hw.active)
8692                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8693                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8694                                 pipe_name(crtc->pipe), pll->active_mask);
8695         else
8696                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8697                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8698                                 pipe_name(crtc->pipe), pll->active_mask);
8699
8700         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8701                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8702                         pipe_mask, pll->state.pipe_mask);
8703
8704         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8705                                           &dpll_hw_state,
8706                                           sizeof(dpll_hw_state)),
8707                         "pll hw state mismatch\n");
8708 }
8709
8710 static void
8711 verify_shared_dpll_state(struct intel_crtc *crtc,
8712                          struct intel_crtc_state *old_crtc_state,
8713                          struct intel_crtc_state *new_crtc_state)
8714 {
8715         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8716
8717         if (new_crtc_state->shared_dpll)
8718                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8719
8720         if (old_crtc_state->shared_dpll &&
8721             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8722                 u8 pipe_mask = BIT(crtc->pipe);
8723                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8724
8725                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8726                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8727                                 pipe_name(crtc->pipe), pll->active_mask);
8728                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8729                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8730                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
8731         }
8732 }
8733
8734 static void
8735 verify_mpllb_state(struct intel_atomic_state *state,
8736                    struct intel_crtc_state *new_crtc_state)
8737 {
8738         struct drm_i915_private *i915 = to_i915(state->base.dev);
8739         struct intel_mpllb_state mpllb_hw_state = { 0 };
8740         struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
8741         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8742         struct intel_encoder *encoder;
8743
8744         if (!IS_DG2(i915))
8745                 return;
8746
8747         if (!new_crtc_state->hw.active)
8748                 return;
8749
8750         if (new_crtc_state->bigjoiner_slave)
8751                 return;
8752
8753         encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
8754         intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
8755
8756 #define MPLLB_CHECK(name) do { \
8757         if (mpllb_sw_state->name != mpllb_hw_state.name) { \
8758                 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
8759                                      "(expected 0x%08x, found 0x%08x)", \
8760                                      mpllb_sw_state->name, \
8761                                      mpllb_hw_state.name); \
8762         } \
8763 } while (0)
8764
8765         MPLLB_CHECK(mpllb_cp);
8766         MPLLB_CHECK(mpllb_div);
8767         MPLLB_CHECK(mpllb_div2);
8768         MPLLB_CHECK(mpllb_fracn1);
8769         MPLLB_CHECK(mpllb_fracn2);
8770         MPLLB_CHECK(mpllb_sscen);
8771         MPLLB_CHECK(mpllb_sscstep);
8772
8773         /*
8774          * ref_control is handled by the hardware/firemware and never
8775          * programmed by the software, but the proper values are supplied
8776          * in the bspec for verification purposes.
8777          */
8778         MPLLB_CHECK(ref_control);
8779
8780 #undef MPLLB_CHECK
8781 }
8782
8783 static void
8784 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8785                           struct intel_atomic_state *state,
8786                           struct intel_crtc_state *old_crtc_state,
8787                           struct intel_crtc_state *new_crtc_state)
8788 {
8789         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8790                 return;
8791
8792         verify_wm_state(crtc, new_crtc_state);
8793         verify_connector_state(state, crtc);
8794         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8795         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8796         verify_mpllb_state(state, new_crtc_state);
8797 }
8798
8799 static void
8800 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8801 {
8802         int i;
8803
8804         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8805                 verify_single_dpll_state(dev_priv,
8806                                          &dev_priv->dpll.shared_dplls[i],
8807                                          NULL, NULL);
8808 }
8809
8810 static void
8811 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8812                               struct intel_atomic_state *state)
8813 {
8814         verify_encoder_state(dev_priv, state);
8815         verify_connector_state(state, NULL);
8816         verify_disabled_dpll_state(dev_priv);
8817 }
8818
8819 int intel_modeset_all_pipes(struct intel_atomic_state *state)
8820 {
8821         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8822         struct intel_crtc *crtc;
8823
8824         /*
8825          * Add all pipes to the state, and force
8826          * a modeset on all the active ones.
8827          */
8828         for_each_intel_crtc(&dev_priv->drm, crtc) {
8829                 struct intel_crtc_state *crtc_state;
8830                 int ret;
8831
8832                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8833                 if (IS_ERR(crtc_state))
8834                         return PTR_ERR(crtc_state);
8835
8836                 if (!crtc_state->hw.active ||
8837                     drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
8838                         continue;
8839
8840                 crtc_state->uapi.mode_changed = true;
8841
8842                 ret = drm_atomic_add_affected_connectors(&state->base,
8843                                                          &crtc->base);
8844                 if (ret)
8845                         return ret;
8846
8847                 ret = intel_atomic_add_affected_planes(state, crtc);
8848                 if (ret)
8849                         return ret;
8850
8851                 crtc_state->update_planes |= crtc_state->active_planes;
8852         }
8853
8854         return 0;
8855 }
8856
8857 static void
8858 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8859 {
8860         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8862         struct drm_display_mode adjusted_mode =
8863                 crtc_state->hw.adjusted_mode;
8864
8865         if (crtc_state->vrr.enable) {
8866                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8867                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8868                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8869                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8870         }
8871
8872         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8873
8874         crtc->mode_flags = crtc_state->mode_flags;
8875
8876         /*
8877          * The scanline counter increments at the leading edge of hsync.
8878          *
8879          * On most platforms it starts counting from vtotal-1 on the
8880          * first active line. That means the scanline counter value is
8881          * always one less than what we would expect. Ie. just after
8882          * start of vblank, which also occurs at start of hsync (on the
8883          * last active line), the scanline counter will read vblank_start-1.
8884          *
8885          * On gen2 the scanline counter starts counting from 1 instead
8886          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8887          * to keep the value positive), instead of adding one.
8888          *
8889          * On HSW+ the behaviour of the scanline counter depends on the output
8890          * type. For DP ports it behaves like most other platforms, but on HDMI
8891          * there's an extra 1 line difference. So we need to add two instead of
8892          * one to the value.
8893          *
8894          * On VLV/CHV DSI the scanline counter would appear to increment
8895          * approx. 1/3 of a scanline before start of vblank. Unfortunately
8896          * that means we can't tell whether we're in vblank or not while
8897          * we're on that particular line. We must still set scanline_offset
8898          * to 1 so that the vblank timestamps come out correct when we query
8899          * the scanline counter from within the vblank interrupt handler.
8900          * However if queried just before the start of vblank we'll get an
8901          * answer that's slightly in the future.
8902          */
8903         if (DISPLAY_VER(dev_priv) == 2) {
8904                 int vtotal;
8905
8906                 vtotal = adjusted_mode.crtc_vtotal;
8907                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8908                         vtotal /= 2;
8909
8910                 crtc->scanline_offset = vtotal - 1;
8911         } else if (HAS_DDI(dev_priv) &&
8912                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8913                 crtc->scanline_offset = 2;
8914         } else {
8915                 crtc->scanline_offset = 1;
8916         }
8917 }
8918
8919 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8920 {
8921         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8922         struct intel_crtc_state *new_crtc_state;
8923         struct intel_crtc *crtc;
8924         int i;
8925
8926         if (!dev_priv->display.crtc_compute_clock)
8927                 return;
8928
8929         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8930                 if (!intel_crtc_needs_modeset(new_crtc_state))
8931                         continue;
8932
8933                 intel_release_shared_dplls(state, crtc);
8934         }
8935 }
8936
8937 /*
8938  * This implements the workaround described in the "notes" section of the mode
8939  * set sequence documentation. When going from no pipes or single pipe to
8940  * multiple pipes, and planes are enabled after the pipe, we need to wait at
8941  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8942  */
8943 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8944 {
8945         struct intel_crtc_state *crtc_state;
8946         struct intel_crtc *crtc;
8947         struct intel_crtc_state *first_crtc_state = NULL;
8948         struct intel_crtc_state *other_crtc_state = NULL;
8949         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8950         int i;
8951
8952         /* look at all crtc's that are going to be enabled in during modeset */
8953         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8954                 if (!crtc_state->hw.active ||
8955                     !intel_crtc_needs_modeset(crtc_state))
8956                         continue;
8957
8958                 if (first_crtc_state) {
8959                         other_crtc_state = crtc_state;
8960                         break;
8961                 } else {
8962                         first_crtc_state = crtc_state;
8963                         first_pipe = crtc->pipe;
8964                 }
8965         }
8966
8967         /* No workaround needed? */
8968         if (!first_crtc_state)
8969                 return 0;
8970
8971         /* w/a possibly needed, check how many crtc's are already enabled. */
8972         for_each_intel_crtc(state->base.dev, crtc) {
8973                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8974                 if (IS_ERR(crtc_state))
8975                         return PTR_ERR(crtc_state);
8976
8977                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8978
8979                 if (!crtc_state->hw.active ||
8980                     intel_crtc_needs_modeset(crtc_state))
8981                         continue;
8982
8983                 /* 2 or more enabled crtcs means no need for w/a */
8984                 if (enabled_pipe != INVALID_PIPE)
8985                         return 0;
8986
8987                 enabled_pipe = crtc->pipe;
8988         }
8989
8990         if (enabled_pipe != INVALID_PIPE)
8991                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8992         else if (other_crtc_state)
8993                 other_crtc_state->hsw_workaround_pipe = first_pipe;
8994
8995         return 0;
8996 }
8997
8998 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8999                            u8 active_pipes)
9000 {
9001         const struct intel_crtc_state *crtc_state;
9002         struct intel_crtc *crtc;
9003         int i;
9004
9005         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9006                 if (crtc_state->hw.active)
9007                         active_pipes |= BIT(crtc->pipe);
9008                 else
9009                         active_pipes &= ~BIT(crtc->pipe);
9010         }
9011
9012         return active_pipes;
9013 }
9014
9015 static int intel_modeset_checks(struct intel_atomic_state *state)
9016 {
9017         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9018
9019         state->modeset = true;
9020
9021         if (IS_HASWELL(dev_priv))
9022                 return hsw_mode_set_planes_workaround(state);
9023
9024         return 0;
9025 }
9026
9027 /*
9028  * Handle calculation of various watermark data at the end of the atomic check
9029  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9030  * handlers to ensure that all derived state has been updated.
9031  */
9032 static int calc_watermark_data(struct intel_atomic_state *state)
9033 {
9034         struct drm_device *dev = state->base.dev;
9035         struct drm_i915_private *dev_priv = to_i915(dev);
9036
9037         /* Is there platform-specific watermark information to calculate? */
9038         if (dev_priv->display.compute_global_watermarks)
9039                 return dev_priv->display.compute_global_watermarks(state);
9040
9041         return 0;
9042 }
9043
9044 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9045                                      struct intel_crtc_state *new_crtc_state)
9046 {
9047         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9048                 return;
9049
9050         new_crtc_state->uapi.mode_changed = false;
9051         new_crtc_state->update_pipe = true;
9052 }
9053
9054 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9055                                     struct intel_crtc_state *new_crtc_state)
9056 {
9057         /*
9058          * If we're not doing the full modeset we want to
9059          * keep the current M/N values as they may be
9060          * sufficiently different to the computed values
9061          * to cause problems.
9062          *
9063          * FIXME: should really copy more fuzzy state here
9064          */
9065         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9066         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9067         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9068         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9069 }
9070
9071 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9072                                           struct intel_crtc *crtc,
9073                                           u8 plane_ids_mask)
9074 {
9075         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9076         struct intel_plane *plane;
9077
9078         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9079                 struct intel_plane_state *plane_state;
9080
9081                 if ((plane_ids_mask & BIT(plane->id)) == 0)
9082                         continue;
9083
9084                 plane_state = intel_atomic_get_plane_state(state, plane);
9085                 if (IS_ERR(plane_state))
9086                         return PTR_ERR(plane_state);
9087         }
9088
9089         return 0;
9090 }
9091
9092 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9093                                      struct intel_crtc *crtc)
9094 {
9095         const struct intel_crtc_state *old_crtc_state =
9096                 intel_atomic_get_old_crtc_state(state, crtc);
9097         const struct intel_crtc_state *new_crtc_state =
9098                 intel_atomic_get_new_crtc_state(state, crtc);
9099
9100         return intel_crtc_add_planes_to_state(state, crtc,
9101                                               old_crtc_state->enabled_planes |
9102                                               new_crtc_state->enabled_planes);
9103 }
9104
9105 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9106 {
9107         /* See {hsw,vlv,ivb}_plane_ratio() */
9108         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9109                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9110                 IS_IVYBRIDGE(dev_priv);
9111 }
9112
9113 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9114                                            struct intel_crtc *crtc,
9115                                            struct intel_crtc *other)
9116 {
9117         const struct intel_plane_state *plane_state;
9118         struct intel_plane *plane;
9119         u8 plane_ids = 0;
9120         int i;
9121
9122         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9123                 if (plane->pipe == crtc->pipe)
9124                         plane_ids |= BIT(plane->id);
9125         }
9126
9127         return intel_crtc_add_planes_to_state(state, other, plane_ids);
9128 }
9129
9130 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9131 {
9132         const struct intel_crtc_state *crtc_state;
9133         struct intel_crtc *crtc;
9134         int i;
9135
9136         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9137                 int ret;
9138
9139                 if (!crtc_state->bigjoiner)
9140                         continue;
9141
9142                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9143                                                       crtc_state->bigjoiner_linked_crtc);
9144                 if (ret)
9145                         return ret;
9146         }
9147
9148         return 0;
9149 }
9150
9151 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9152 {
9153         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9154         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9155         struct intel_plane_state *plane_state;
9156         struct intel_plane *plane;
9157         struct intel_crtc *crtc;
9158         int i, ret;
9159
9160         ret = icl_add_linked_planes(state);
9161         if (ret)
9162                 return ret;
9163
9164         ret = intel_bigjoiner_add_affected_planes(state);
9165         if (ret)
9166                 return ret;
9167
9168         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9169                 ret = intel_plane_atomic_check(state, plane);
9170                 if (ret) {
9171                         drm_dbg_atomic(&dev_priv->drm,
9172                                        "[PLANE:%d:%s] atomic driver check failed\n",
9173                                        plane->base.base.id, plane->base.name);
9174                         return ret;
9175                 }
9176         }
9177
9178         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9179                                             new_crtc_state, i) {
9180                 u8 old_active_planes, new_active_planes;
9181
9182                 ret = icl_check_nv12_planes(new_crtc_state);
9183                 if (ret)
9184                         return ret;
9185
9186                 /*
9187                  * On some platforms the number of active planes affects
9188                  * the planes' minimum cdclk calculation. Add such planes
9189                  * to the state before we compute the minimum cdclk.
9190                  */
9191                 if (!active_planes_affects_min_cdclk(dev_priv))
9192                         continue;
9193
9194                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9195                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9196
9197                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
9198                         continue;
9199
9200                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9201                 if (ret)
9202                         return ret;
9203         }
9204
9205         return 0;
9206 }
9207
9208 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9209                                     bool *need_cdclk_calc)
9210 {
9211         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9212         const struct intel_cdclk_state *old_cdclk_state;
9213         const struct intel_cdclk_state *new_cdclk_state;
9214         struct intel_plane_state *plane_state;
9215         struct intel_bw_state *new_bw_state;
9216         struct intel_plane *plane;
9217         int min_cdclk = 0;
9218         enum pipe pipe;
9219         int ret;
9220         int i;
9221         /*
9222          * active_planes bitmask has been updated, and potentially
9223          * affected planes are part of the state. We can now
9224          * compute the minimum cdclk for each plane.
9225          */
9226         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9227                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9228                 if (ret)
9229                         return ret;
9230         }
9231
9232         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9233         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9234
9235         if (new_cdclk_state &&
9236             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9237                 *need_cdclk_calc = true;
9238
9239         ret = dev_priv->display.bw_calc_min_cdclk(state);
9240         if (ret)
9241                 return ret;
9242
9243         new_bw_state = intel_atomic_get_new_bw_state(state);
9244
9245         if (!new_cdclk_state || !new_bw_state)
9246                 return 0;
9247
9248         for_each_pipe(dev_priv, pipe) {
9249                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9250
9251                 /*
9252                  * Currently do this change only if we need to increase
9253                  */
9254                 if (new_bw_state->min_cdclk > min_cdclk)
9255                         *need_cdclk_calc = true;
9256         }
9257
9258         return 0;
9259 }
9260
9261 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9262 {
9263         struct intel_crtc_state *crtc_state;
9264         struct intel_crtc *crtc;
9265         int i;
9266
9267         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9268                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9269                 int ret;
9270
9271                 ret = intel_crtc_atomic_check(state, crtc);
9272                 if (ret) {
9273                         drm_dbg_atomic(&i915->drm,
9274                                        "[CRTC:%d:%s] atomic driver check failed\n",
9275                                        crtc->base.base.id, crtc->base.name);
9276                         return ret;
9277                 }
9278         }
9279
9280         return 0;
9281 }
9282
9283 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9284                                                u8 transcoders)
9285 {
9286         const struct intel_crtc_state *new_crtc_state;
9287         struct intel_crtc *crtc;
9288         int i;
9289
9290         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9291                 if (new_crtc_state->hw.enable &&
9292                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9293                     intel_crtc_needs_modeset(new_crtc_state))
9294                         return true;
9295         }
9296
9297         return false;
9298 }
9299
9300 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9301                                         struct intel_crtc *crtc,
9302                                         struct intel_crtc_state *old_crtc_state,
9303                                         struct intel_crtc_state *new_crtc_state)
9304 {
9305         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9306         struct intel_crtc *slave, *master;
9307
9308         /* slave being enabled, is master is still claiming this crtc? */
9309         if (old_crtc_state->bigjoiner_slave) {
9310                 slave = crtc;
9311                 master = old_crtc_state->bigjoiner_linked_crtc;
9312                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9313                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9314                         goto claimed;
9315         }
9316
9317         if (!new_crtc_state->bigjoiner)
9318                 return 0;
9319
9320         slave = intel_dsc_get_bigjoiner_secondary(crtc);
9321         if (!slave) {
9322                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9323                               "CRTC + 1 to be used, doesn't exist\n",
9324                               crtc->base.base.id, crtc->base.name);
9325                 return -EINVAL;
9326         }
9327
9328         new_crtc_state->bigjoiner_linked_crtc = slave;
9329         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9330         master = crtc;
9331         if (IS_ERR(slave_crtc_state))
9332                 return PTR_ERR(slave_crtc_state);
9333
9334         /* master being enabled, slave was already configured? */
9335         if (slave_crtc_state->uapi.enable)
9336                 goto claimed;
9337
9338         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9339                       slave->base.base.id, slave->base.name);
9340
9341         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9342
9343 claimed:
9344         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9345                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9346                       slave->base.base.id, slave->base.name,
9347                       master->base.base.id, master->base.name);
9348         return -EINVAL;
9349 }
9350
9351 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9352                                  struct intel_crtc_state *master_crtc_state)
9353 {
9354         struct intel_crtc_state *slave_crtc_state =
9355                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9356
9357         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9358         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9359         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9360         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9361 }
9362
9363 /**
9364  * DOC: asynchronous flip implementation
9365  *
9366  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9367  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9368  * Correspondingly, support is currently added for primary plane only.
9369  *
9370  * Async flip can only change the plane surface address, so anything else
9371  * changing is rejected from the intel_atomic_check_async() function.
9372  * Once this check is cleared, flip done interrupt is enabled using
9373  * the intel_crtc_enable_flip_done() function.
9374  *
9375  * As soon as the surface address register is written, flip done interrupt is
9376  * generated and the requested events are sent to the usersapce in the interrupt
9377  * handler itself. The timestamp and sequence sent during the flip done event
9378  * correspond to the last vblank and have no relation to the actual time when
9379  * the flip done event was sent.
9380  */
9381 static int intel_atomic_check_async(struct intel_atomic_state *state)
9382 {
9383         struct drm_i915_private *i915 = to_i915(state->base.dev);
9384         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9385         const struct intel_plane_state *new_plane_state, *old_plane_state;
9386         struct intel_crtc *crtc;
9387         struct intel_plane *plane;
9388         int i;
9389
9390         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9391                                             new_crtc_state, i) {
9392                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9393                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9394                         return -EINVAL;
9395                 }
9396
9397                 if (!new_crtc_state->hw.active) {
9398                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9399                         return -EINVAL;
9400                 }
9401                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9402                         drm_dbg_kms(&i915->drm,
9403                                     "Active planes cannot be changed during async flip\n");
9404                         return -EINVAL;
9405                 }
9406         }
9407
9408         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9409                                              new_plane_state, i) {
9410                 /*
9411                  * TODO: Async flip is only supported through the page flip IOCTL
9412                  * as of now. So support currently added for primary plane only.
9413                  * Support for other planes on platforms on which supports
9414                  * this(vlv/chv and icl+) should be added when async flip is
9415                  * enabled in the atomic IOCTL path.
9416                  */
9417                 if (!plane->async_flip)
9418                         return -EINVAL;
9419
9420                 /*
9421                  * FIXME: This check is kept generic for all platforms.
9422                  * Need to verify this for all gen9 platforms to enable
9423                  * this selectively if required.
9424                  */
9425                 switch (new_plane_state->hw.fb->modifier) {
9426                 case I915_FORMAT_MOD_X_TILED:
9427                 case I915_FORMAT_MOD_Y_TILED:
9428                 case I915_FORMAT_MOD_Yf_TILED:
9429                         break;
9430                 default:
9431                         drm_dbg_kms(&i915->drm,
9432                                     "Linear memory/CCS does not support async flips\n");
9433                         return -EINVAL;
9434                 }
9435
9436                 if (old_plane_state->view.color_plane[0].stride !=
9437                     new_plane_state->view.color_plane[0].stride) {
9438                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9439                         return -EINVAL;
9440                 }
9441
9442                 if (old_plane_state->hw.fb->modifier !=
9443                     new_plane_state->hw.fb->modifier) {
9444                         drm_dbg_kms(&i915->drm,
9445                                     "Framebuffer modifiers cannot be changed in async flip\n");
9446                         return -EINVAL;
9447                 }
9448
9449                 if (old_plane_state->hw.fb->format !=
9450                     new_plane_state->hw.fb->format) {
9451                         drm_dbg_kms(&i915->drm,
9452                                     "Framebuffer format cannot be changed in async flip\n");
9453                         return -EINVAL;
9454                 }
9455
9456                 if (old_plane_state->hw.rotation !=
9457                     new_plane_state->hw.rotation) {
9458                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9459                         return -EINVAL;
9460                 }
9461
9462                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9463                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9464                         drm_dbg_kms(&i915->drm,
9465                                     "Plane size/co-ordinates cannot be changed in async flip\n");
9466                         return -EINVAL;
9467                 }
9468
9469                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9470                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9471                         return -EINVAL;
9472                 }
9473
9474                 if (old_plane_state->hw.pixel_blend_mode !=
9475                     new_plane_state->hw.pixel_blend_mode) {
9476                         drm_dbg_kms(&i915->drm,
9477                                     "Pixel blend mode cannot be changed in async flip\n");
9478                         return -EINVAL;
9479                 }
9480
9481                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9482                         drm_dbg_kms(&i915->drm,
9483                                     "Color encoding cannot be changed in async flip\n");
9484                         return -EINVAL;
9485                 }
9486
9487                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9488                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9489                         return -EINVAL;
9490                 }
9491         }
9492
9493         return 0;
9494 }
9495
9496 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9497 {
9498         struct intel_crtc_state *crtc_state;
9499         struct intel_crtc *crtc;
9500         int i;
9501
9502         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9503                 struct intel_crtc_state *linked_crtc_state;
9504                 struct intel_crtc *linked_crtc;
9505                 int ret;
9506
9507                 if (!crtc_state->bigjoiner)
9508                         continue;
9509
9510                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
9511                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9512                 if (IS_ERR(linked_crtc_state))
9513                         return PTR_ERR(linked_crtc_state);
9514
9515                 if (!intel_crtc_needs_modeset(crtc_state))
9516                         continue;
9517
9518                 linked_crtc_state->uapi.mode_changed = true;
9519
9520                 ret = drm_atomic_add_affected_connectors(&state->base,
9521                                                          &linked_crtc->base);
9522                 if (ret)
9523                         return ret;
9524
9525                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
9526                 if (ret)
9527                         return ret;
9528         }
9529
9530         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9531                 /* Kill old bigjoiner link, we may re-establish afterwards */
9532                 if (intel_crtc_needs_modeset(crtc_state) &&
9533                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9534                         kill_bigjoiner_slave(state, crtc_state);
9535         }
9536
9537         return 0;
9538 }
9539
9540 /**
9541  * intel_atomic_check - validate state object
9542  * @dev: drm device
9543  * @_state: state to validate
9544  */
9545 static int intel_atomic_check(struct drm_device *dev,
9546                               struct drm_atomic_state *_state)
9547 {
9548         struct drm_i915_private *dev_priv = to_i915(dev);
9549         struct intel_atomic_state *state = to_intel_atomic_state(_state);
9550         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9551         struct intel_crtc *crtc;
9552         int ret, i;
9553         bool any_ms = false;
9554
9555         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9556                                             new_crtc_state, i) {
9557                 if (new_crtc_state->inherited != old_crtc_state->inherited)
9558                         new_crtc_state->uapi.mode_changed = true;
9559         }
9560
9561         intel_vrr_check_modeset(state);
9562
9563         ret = drm_atomic_helper_check_modeset(dev, &state->base);
9564         if (ret)
9565                 goto fail;
9566
9567         ret = intel_bigjoiner_add_affected_crtcs(state);
9568         if (ret)
9569                 goto fail;
9570
9571         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9572                                             new_crtc_state, i) {
9573                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
9574                         /* Light copy */
9575                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9576
9577                         continue;
9578                 }
9579
9580                 if (!new_crtc_state->uapi.enable) {
9581                         if (!new_crtc_state->bigjoiner_slave) {
9582                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9583                                 any_ms = true;
9584                         }
9585                         continue;
9586                 }
9587
9588                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9589                 if (ret)
9590                         goto fail;
9591
9592                 ret = intel_modeset_pipe_config(state, new_crtc_state);
9593                 if (ret)
9594                         goto fail;
9595
9596                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9597                                                    new_crtc_state);
9598                 if (ret)
9599                         goto fail;
9600         }
9601
9602         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9603                                             new_crtc_state, i) {
9604                 if (!intel_crtc_needs_modeset(new_crtc_state))
9605                         continue;
9606
9607                 ret = intel_modeset_pipe_config_late(new_crtc_state);
9608                 if (ret)
9609                         goto fail;
9610
9611                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9612         }
9613
9614         /**
9615          * Check if fastset is allowed by external dependencies like other
9616          * pipes and transcoders.
9617          *
9618          * Right now it only forces a fullmodeset when the MST master
9619          * transcoder did not changed but the pipe of the master transcoder
9620          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9621          * in case of port synced crtcs, if one of the synced crtcs
9622          * needs a full modeset, all other synced crtcs should be
9623          * forced a full modeset.
9624          */
9625         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9626                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9627                         continue;
9628
9629                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9630                         enum transcoder master = new_crtc_state->mst_master_transcoder;
9631
9632                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9633                                 new_crtc_state->uapi.mode_changed = true;
9634                                 new_crtc_state->update_pipe = false;
9635                         }
9636                 }
9637
9638                 if (is_trans_port_sync_mode(new_crtc_state)) {
9639                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
9640
9641                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9642                                 trans |= BIT(new_crtc_state->master_transcoder);
9643
9644                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
9645                                 new_crtc_state->uapi.mode_changed = true;
9646                                 new_crtc_state->update_pipe = false;
9647                         }
9648                 }
9649
9650                 if (new_crtc_state->bigjoiner) {
9651                         struct intel_crtc_state *linked_crtc_state =
9652                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9653
9654                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
9655                                 new_crtc_state->uapi.mode_changed = true;
9656                                 new_crtc_state->update_pipe = false;
9657                         }
9658                 }
9659         }
9660
9661         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9662                                             new_crtc_state, i) {
9663                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9664                         any_ms = true;
9665                         continue;
9666                 }
9667
9668                 if (!new_crtc_state->update_pipe)
9669                         continue;
9670
9671                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9672         }
9673
9674         if (any_ms && !check_digital_port_conflicts(state)) {
9675                 drm_dbg_kms(&dev_priv->drm,
9676                             "rejecting conflicting digital port configuration\n");
9677                 ret = -EINVAL;
9678                 goto fail;
9679         }
9680
9681         ret = drm_dp_mst_atomic_check(&state->base);
9682         if (ret)
9683                 goto fail;
9684
9685         ret = intel_atomic_check_planes(state);
9686         if (ret)
9687                 goto fail;
9688
9689         intel_fbc_choose_crtc(dev_priv, state);
9690         ret = calc_watermark_data(state);
9691         if (ret)
9692                 goto fail;
9693
9694         ret = intel_bw_atomic_check(state);
9695         if (ret)
9696                 goto fail;
9697
9698         ret = intel_atomic_check_cdclk(state, &any_ms);
9699         if (ret)
9700                 goto fail;
9701
9702         if (intel_any_crtc_needs_modeset(state))
9703                 any_ms = true;
9704
9705         if (any_ms) {
9706                 ret = intel_modeset_checks(state);
9707                 if (ret)
9708                         goto fail;
9709
9710                 ret = intel_modeset_calc_cdclk(state);
9711                 if (ret)
9712                         return ret;
9713
9714                 intel_modeset_clear_plls(state);
9715         }
9716
9717         ret = intel_atomic_check_crtcs(state);
9718         if (ret)
9719                 goto fail;
9720
9721         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9722                                             new_crtc_state, i) {
9723                 if (new_crtc_state->uapi.async_flip) {
9724                         ret = intel_atomic_check_async(state);
9725                         if (ret)
9726                                 goto fail;
9727                 }
9728
9729                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
9730                     !new_crtc_state->update_pipe)
9731                         continue;
9732
9733                 intel_dump_pipe_config(new_crtc_state, state,
9734                                        intel_crtc_needs_modeset(new_crtc_state) ?
9735                                        "[modeset]" : "[fastset]");
9736         }
9737
9738         return 0;
9739
9740  fail:
9741         if (ret == -EDEADLK)
9742                 return ret;
9743
9744         /*
9745          * FIXME would probably be nice to know which crtc specifically
9746          * caused the failure, in cases where we can pinpoint it.
9747          */
9748         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9749                                             new_crtc_state, i)
9750                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9751
9752         return ret;
9753 }
9754
9755 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9756 {
9757         struct intel_crtc_state *crtc_state;
9758         struct intel_crtc *crtc;
9759         int i, ret;
9760
9761         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9762         if (ret < 0)
9763                 return ret;
9764
9765         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9766                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9767
9768                 if (mode_changed || crtc_state->update_pipe ||
9769                     crtc_state->uapi.color_mgmt_changed) {
9770                         intel_dsb_prepare(crtc_state);
9771                 }
9772         }
9773
9774         return 0;
9775 }
9776
9777 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9778                                   struct intel_crtc_state *crtc_state)
9779 {
9780         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9781
9782         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
9783                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9784
9785         if (crtc_state->has_pch_encoder) {
9786                 enum pipe pch_transcoder =
9787                         intel_crtc_pch_transcoder(crtc);
9788
9789                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9790         }
9791 }
9792
9793 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9794                                const struct intel_crtc_state *new_crtc_state)
9795 {
9796         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9797         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9798
9799         /*
9800          * Update pipe size and adjust fitter if needed: the reason for this is
9801          * that in compute_mode_changes we check the native mode (not the pfit
9802          * mode) to see if we can flip rather than do a full mode set. In the
9803          * fastboot case, we'll flip, but if we don't update the pipesrc and
9804          * pfit state, we'll end up with a big fb scanned out into the wrong
9805          * sized surface.
9806          */
9807         intel_set_pipe_src_size(new_crtc_state);
9808
9809         /* on skylake this is done by detaching scalers */
9810         if (DISPLAY_VER(dev_priv) >= 9) {
9811                 if (new_crtc_state->pch_pfit.enabled)
9812                         skl_pfit_enable(new_crtc_state);
9813         } else if (HAS_PCH_SPLIT(dev_priv)) {
9814                 if (new_crtc_state->pch_pfit.enabled)
9815                         ilk_pfit_enable(new_crtc_state);
9816                 else if (old_crtc_state->pch_pfit.enabled)
9817                         ilk_pfit_disable(old_crtc_state);
9818         }
9819
9820         /*
9821          * The register is supposedly single buffered so perhaps
9822          * not 100% correct to do this here. But SKL+ calculate
9823          * this based on the adjust pixel rate so pfit changes do
9824          * affect it and so it must be updated for fastsets.
9825          * HSW/BDW only really need this here for fastboot, after
9826          * that the value should not change without a full modeset.
9827          */
9828         if (DISPLAY_VER(dev_priv) >= 9 ||
9829             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9830                 hsw_set_linetime_wm(new_crtc_state);
9831
9832         if (DISPLAY_VER(dev_priv) >= 11)
9833                 icl_set_pipe_chicken(new_crtc_state);
9834 }
9835
9836 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
9837                                    struct intel_crtc *crtc)
9838 {
9839         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9840         const struct intel_crtc_state *old_crtc_state =
9841                 intel_atomic_get_old_crtc_state(state, crtc);
9842         const struct intel_crtc_state *new_crtc_state =
9843                 intel_atomic_get_new_crtc_state(state, crtc);
9844         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9845
9846         /*
9847          * During modesets pipe configuration was programmed as the
9848          * CRTC was enabled.
9849          */
9850         if (!modeset) {
9851                 if (new_crtc_state->uapi.color_mgmt_changed ||
9852                     new_crtc_state->update_pipe)
9853                         intel_color_commit(new_crtc_state);
9854
9855                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9856                         bdw_set_pipemisc(new_crtc_state);
9857
9858                 if (new_crtc_state->update_pipe)
9859                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
9860
9861                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9862         }
9863
9864         if (dev_priv->display.atomic_update_watermarks)
9865                 dev_priv->display.atomic_update_watermarks(state, crtc);
9866 }
9867
9868 static void commit_pipe_post_planes(struct intel_atomic_state *state,
9869                                     struct intel_crtc *crtc)
9870 {
9871         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9872         const struct intel_crtc_state *new_crtc_state =
9873                 intel_atomic_get_new_crtc_state(state, crtc);
9874
9875         /*
9876          * Disable the scaler(s) after the plane(s) so that we don't
9877          * get a catastrophic underrun even if the two operations
9878          * end up happening in two different frames.
9879          */
9880         if (DISPLAY_VER(dev_priv) >= 9 &&
9881             !intel_crtc_needs_modeset(new_crtc_state))
9882                 skl_detach_scalers(new_crtc_state);
9883 }
9884
9885 static void intel_enable_crtc(struct intel_atomic_state *state,
9886                               struct intel_crtc *crtc)
9887 {
9888         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9889         const struct intel_crtc_state *new_crtc_state =
9890                 intel_atomic_get_new_crtc_state(state, crtc);
9891
9892         if (!intel_crtc_needs_modeset(new_crtc_state))
9893                 return;
9894
9895         intel_crtc_update_active_timings(new_crtc_state);
9896
9897         dev_priv->display.crtc_enable(state, crtc);
9898
9899         if (new_crtc_state->bigjoiner_slave)
9900                 return;
9901
9902         /* vblanks work again, re-enable pipe CRC. */
9903         intel_crtc_enable_pipe_crc(crtc);
9904 }
9905
9906 static void intel_update_crtc(struct intel_atomic_state *state,
9907                               struct intel_crtc *crtc)
9908 {
9909         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9910         const struct intel_crtc_state *old_crtc_state =
9911                 intel_atomic_get_old_crtc_state(state, crtc);
9912         struct intel_crtc_state *new_crtc_state =
9913                 intel_atomic_get_new_crtc_state(state, crtc);
9914         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9915
9916         if (!modeset) {
9917                 if (new_crtc_state->preload_luts &&
9918                     (new_crtc_state->uapi.color_mgmt_changed ||
9919                      new_crtc_state->update_pipe))
9920                         intel_color_load_luts(new_crtc_state);
9921
9922                 intel_pre_plane_update(state, crtc);
9923
9924                 if (new_crtc_state->update_pipe)
9925                         intel_encoders_update_pipe(state, crtc);
9926         }
9927
9928         intel_fbc_update(state, crtc);
9929
9930         /* Perform vblank evasion around commit operation */
9931         intel_pipe_update_start(new_crtc_state);
9932
9933         commit_pipe_pre_planes(state, crtc);
9934
9935         if (DISPLAY_VER(dev_priv) >= 9)
9936                 skl_update_planes_on_crtc(state, crtc);
9937         else
9938                 i9xx_update_planes_on_crtc(state, crtc);
9939
9940         commit_pipe_post_planes(state, crtc);
9941
9942         intel_pipe_update_end(new_crtc_state);
9943
9944         /*
9945          * We usually enable FIFO underrun interrupts as part of the
9946          * CRTC enable sequence during modesets.  But when we inherit a
9947          * valid pipe configuration from the BIOS we need to take care
9948          * of enabling them on the CRTC's first fastset.
9949          */
9950         if (new_crtc_state->update_pipe && !modeset &&
9951             old_crtc_state->inherited)
9952                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9953 }
9954
9955 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9956                                           struct intel_crtc_state *old_crtc_state,
9957                                           struct intel_crtc_state *new_crtc_state,
9958                                           struct intel_crtc *crtc)
9959 {
9960         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9961
9962         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9963
9964         intel_encoders_pre_disable(state, crtc);
9965
9966         intel_crtc_disable_planes(state, crtc);
9967
9968         /*
9969          * We still need special handling for disabling bigjoiner master
9970          * and slaves since for slave we do not have encoder or plls
9971          * so we dont need to disable those.
9972          */
9973         if (old_crtc_state->bigjoiner) {
9974                 intel_crtc_disable_planes(state,
9975                                           old_crtc_state->bigjoiner_linked_crtc);
9976                 old_crtc_state->bigjoiner_linked_crtc->active = false;
9977         }
9978
9979         /*
9980          * We need to disable pipe CRC before disabling the pipe,
9981          * or we race against vblank off.
9982          */
9983         intel_crtc_disable_pipe_crc(crtc);
9984
9985         dev_priv->display.crtc_disable(state, crtc);
9986         crtc->active = false;
9987         intel_fbc_disable(crtc);
9988         intel_disable_shared_dpll(old_crtc_state);
9989
9990         /* FIXME unify this for all platforms */
9991         if (!new_crtc_state->hw.active &&
9992             !HAS_GMCH(dev_priv) &&
9993             dev_priv->display.initial_watermarks)
9994                 dev_priv->display.initial_watermarks(state, crtc);
9995 }
9996
9997 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9998 {
9999         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10000         struct intel_crtc *crtc;
10001         u32 handled = 0;
10002         int i;
10003
10004         /* Only disable port sync and MST slaves */
10005         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10006                                             new_crtc_state, i) {
10007                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10008                         continue;
10009
10010                 if (!old_crtc_state->hw.active)
10011                         continue;
10012
10013                 /* In case of Transcoder port Sync master slave CRTCs can be
10014                  * assigned in any order and we need to make sure that
10015                  * slave CRTCs are disabled first and then master CRTC since
10016                  * Slave vblanks are masked till Master Vblanks.
10017                  */
10018                 if (!is_trans_port_sync_slave(old_crtc_state) &&
10019                     !intel_dp_mst_is_slave_trans(old_crtc_state))
10020                         continue;
10021
10022                 intel_pre_plane_update(state, crtc);
10023                 intel_old_crtc_state_disables(state, old_crtc_state,
10024                                               new_crtc_state, crtc);
10025                 handled |= BIT(crtc->pipe);
10026         }
10027
10028         /* Disable everything else left on */
10029         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10030                                             new_crtc_state, i) {
10031                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
10032                     (handled & BIT(crtc->pipe)) ||
10033                     old_crtc_state->bigjoiner_slave)
10034                         continue;
10035
10036                 intel_pre_plane_update(state, crtc);
10037                 if (old_crtc_state->bigjoiner) {
10038                         struct intel_crtc *slave =
10039                                 old_crtc_state->bigjoiner_linked_crtc;
10040
10041                         intel_pre_plane_update(state, slave);
10042                 }
10043
10044                 if (old_crtc_state->hw.active)
10045                         intel_old_crtc_state_disables(state, old_crtc_state,
10046                                                       new_crtc_state, crtc);
10047         }
10048 }
10049
10050 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10051 {
10052         struct intel_crtc_state *new_crtc_state;
10053         struct intel_crtc *crtc;
10054         int i;
10055
10056         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10057                 if (!new_crtc_state->hw.active)
10058                         continue;
10059
10060                 intel_enable_crtc(state, crtc);
10061                 intel_update_crtc(state, crtc);
10062         }
10063 }
10064
10065 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10066 {
10067         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10068         struct intel_crtc *crtc;
10069         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10070         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10071         u8 update_pipes = 0, modeset_pipes = 0;
10072         int i;
10073
10074         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10075                 enum pipe pipe = crtc->pipe;
10076
10077                 if (!new_crtc_state->hw.active)
10078                         continue;
10079
10080                 /* ignore allocations for crtc's that have been turned off. */
10081                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10082                         entries[pipe] = old_crtc_state->wm.skl.ddb;
10083                         update_pipes |= BIT(pipe);
10084                 } else {
10085                         modeset_pipes |= BIT(pipe);
10086                 }
10087         }
10088
10089         /*
10090          * Whenever the number of active pipes changes, we need to make sure we
10091          * update the pipes in the right order so that their ddb allocations
10092          * never overlap with each other between CRTC updates. Otherwise we'll
10093          * cause pipe underruns and other bad stuff.
10094          *
10095          * So first lets enable all pipes that do not need a fullmodeset as
10096          * those don't have any external dependency.
10097          */
10098         while (update_pipes) {
10099                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10100                                                     new_crtc_state, i) {
10101                         enum pipe pipe = crtc->pipe;
10102
10103                         if ((update_pipes & BIT(pipe)) == 0)
10104                                 continue;
10105
10106                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10107                                                         entries, I915_MAX_PIPES, pipe))
10108                                 continue;
10109
10110                         entries[pipe] = new_crtc_state->wm.skl.ddb;
10111                         update_pipes &= ~BIT(pipe);
10112
10113                         intel_update_crtc(state, crtc);
10114
10115                         /*
10116                          * If this is an already active pipe, it's DDB changed,
10117                          * and this isn't the last pipe that needs updating
10118                          * then we need to wait for a vblank to pass for the
10119                          * new ddb allocation to take effect.
10120                          */
10121                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10122                                                  &old_crtc_state->wm.skl.ddb) &&
10123                             (update_pipes | modeset_pipes))
10124                                 intel_wait_for_vblank(dev_priv, pipe);
10125                 }
10126         }
10127
10128         update_pipes = modeset_pipes;
10129
10130         /*
10131          * Enable all pipes that needs a modeset and do not depends on other
10132          * pipes
10133          */
10134         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10135                 enum pipe pipe = crtc->pipe;
10136
10137                 if ((modeset_pipes & BIT(pipe)) == 0)
10138                         continue;
10139
10140                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10141                     is_trans_port_sync_master(new_crtc_state) ||
10142                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10143                         continue;
10144
10145                 modeset_pipes &= ~BIT(pipe);
10146
10147                 intel_enable_crtc(state, crtc);
10148         }
10149
10150         /*
10151          * Then we enable all remaining pipes that depend on other
10152          * pipes: MST slaves and port sync masters, big joiner master
10153          */
10154         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10155                 enum pipe pipe = crtc->pipe;
10156
10157                 if ((modeset_pipes & BIT(pipe)) == 0)
10158                         continue;
10159
10160                 modeset_pipes &= ~BIT(pipe);
10161
10162                 intel_enable_crtc(state, crtc);
10163         }
10164
10165         /*
10166          * Finally we do the plane updates/etc. for all pipes that got enabled.
10167          */
10168         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10169                 enum pipe pipe = crtc->pipe;
10170
10171                 if ((update_pipes & BIT(pipe)) == 0)
10172                         continue;
10173
10174                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10175                                                                         entries, I915_MAX_PIPES, pipe));
10176
10177                 entries[pipe] = new_crtc_state->wm.skl.ddb;
10178                 update_pipes &= ~BIT(pipe);
10179
10180                 intel_update_crtc(state, crtc);
10181         }
10182
10183         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10184         drm_WARN_ON(&dev_priv->drm, update_pipes);
10185 }
10186
10187 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10188 {
10189         struct intel_atomic_state *state, *next;
10190         struct llist_node *freed;
10191
10192         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10193         llist_for_each_entry_safe(state, next, freed, freed)
10194                 drm_atomic_state_put(&state->base);
10195 }
10196
10197 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10198 {
10199         struct drm_i915_private *dev_priv =
10200                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10201
10202         intel_atomic_helper_free_state(dev_priv);
10203 }
10204
10205 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10206 {
10207         struct wait_queue_entry wait_fence, wait_reset;
10208         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10209
10210         init_wait_entry(&wait_fence, 0);
10211         init_wait_entry(&wait_reset, 0);
10212         for (;;) {
10213                 prepare_to_wait(&intel_state->commit_ready.wait,
10214                                 &wait_fence, TASK_UNINTERRUPTIBLE);
10215                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10216                                               I915_RESET_MODESET),
10217                                 &wait_reset, TASK_UNINTERRUPTIBLE);
10218
10219
10220                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10221                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10222                         break;
10223
10224                 schedule();
10225         }
10226         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10227         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10228                                   I915_RESET_MODESET),
10229                     &wait_reset);
10230 }
10231
10232 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10233 {
10234         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10235         struct intel_crtc *crtc;
10236         int i;
10237
10238         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10239                                             new_crtc_state, i)
10240                 intel_dsb_cleanup(old_crtc_state);
10241 }
10242
10243 static void intel_atomic_cleanup_work(struct work_struct *work)
10244 {
10245         struct intel_atomic_state *state =
10246                 container_of(work, struct intel_atomic_state, base.commit_work);
10247         struct drm_i915_private *i915 = to_i915(state->base.dev);
10248
10249         intel_cleanup_dsbs(state);
10250         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10251         drm_atomic_helper_commit_cleanup_done(&state->base);
10252         drm_atomic_state_put(&state->base);
10253
10254         intel_atomic_helper_free_state(i915);
10255 }
10256
10257 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10258 {
10259         struct drm_i915_private *i915 = to_i915(state->base.dev);
10260         struct intel_plane *plane;
10261         struct intel_plane_state *plane_state;
10262         int i;
10263
10264         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10265                 struct drm_framebuffer *fb = plane_state->hw.fb;
10266                 int ret;
10267
10268                 if (!fb ||
10269                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10270                         continue;
10271
10272                 /*
10273                  * The layout of the fast clear color value expected by HW
10274                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10275                  * - 4 x 4 bytes per-channel value
10276                  *   (in surface type specific float/int format provided by the fb user)
10277                  * - 8 bytes native color value used by the display
10278                  *   (converted/written by GPU during a fast clear operation using the
10279                  *    above per-channel values)
10280                  *
10281                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
10282                  * caller made sure that the object is synced wrt. the related color clear value
10283                  * GPU write on it.
10284                  */
10285                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10286                                                      fb->offsets[2] + 16,
10287                                                      &plane_state->ccval,
10288                                                      sizeof(plane_state->ccval));
10289                 /* The above could only fail if the FB obj has an unexpected backing store type. */
10290                 drm_WARN_ON(&i915->drm, ret);
10291         }
10292 }
10293
10294 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10295 {
10296         struct drm_device *dev = state->base.dev;
10297         struct drm_i915_private *dev_priv = to_i915(dev);
10298         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10299         struct intel_crtc *crtc;
10300         u64 put_domains[I915_MAX_PIPES] = {};
10301         intel_wakeref_t wakeref = 0;
10302         int i;
10303
10304         intel_atomic_commit_fence_wait(state);
10305
10306         drm_atomic_helper_wait_for_dependencies(&state->base);
10307
10308         if (state->modeset)
10309                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10310
10311         intel_atomic_prepare_plane_clear_colors(state);
10312
10313         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10314                                             new_crtc_state, i) {
10315                 if (intel_crtc_needs_modeset(new_crtc_state) ||
10316                     new_crtc_state->update_pipe) {
10317
10318                         put_domains[crtc->pipe] =
10319                                 modeset_get_crtc_power_domains(new_crtc_state);
10320                 }
10321         }
10322
10323         intel_commit_modeset_disables(state);
10324
10325         /* FIXME: Eventually get rid of our crtc->config pointer */
10326         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10327                 crtc->config = new_crtc_state;
10328
10329         if (state->modeset) {
10330                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10331
10332                 intel_set_cdclk_pre_plane_update(state);
10333
10334                 intel_modeset_verify_disabled(dev_priv, state);
10335         }
10336
10337         intel_sagv_pre_plane_update(state);
10338
10339         /* Complete the events for pipes that have now been disabled */
10340         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10341                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10342
10343                 /* Complete events for now disable pipes here. */
10344                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10345                         spin_lock_irq(&dev->event_lock);
10346                         drm_crtc_send_vblank_event(&crtc->base,
10347                                                    new_crtc_state->uapi.event);
10348                         spin_unlock_irq(&dev->event_lock);
10349
10350                         new_crtc_state->uapi.event = NULL;
10351                 }
10352         }
10353
10354         if (state->modeset)
10355                 intel_encoders_update_prepare(state);
10356
10357         intel_dbuf_pre_plane_update(state);
10358
10359         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10360                 if (new_crtc_state->uapi.async_flip)
10361                         intel_crtc_enable_flip_done(state, crtc);
10362         }
10363
10364         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10365         dev_priv->display.commit_modeset_enables(state);
10366
10367         if (state->modeset) {
10368                 intel_encoders_update_complete(state);
10369
10370                 intel_set_cdclk_post_plane_update(state);
10371         }
10372
10373         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10374          * already, but still need the state for the delayed optimization. To
10375          * fix this:
10376          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10377          * - schedule that vblank worker _before_ calling hw_done
10378          * - at the start of commit_tail, cancel it _synchrously
10379          * - switch over to the vblank wait helper in the core after that since
10380          *   we don't need out special handling any more.
10381          */
10382         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10383
10384         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10385                 if (new_crtc_state->uapi.async_flip)
10386                         intel_crtc_disable_flip_done(state, crtc);
10387
10388                 if (new_crtc_state->hw.active &&
10389                     !intel_crtc_needs_modeset(new_crtc_state) &&
10390                     !new_crtc_state->preload_luts &&
10391                     (new_crtc_state->uapi.color_mgmt_changed ||
10392                      new_crtc_state->update_pipe))
10393                         intel_color_load_luts(new_crtc_state);
10394         }
10395
10396         /*
10397          * Now that the vblank has passed, we can go ahead and program the
10398          * optimal watermarks on platforms that need two-step watermark
10399          * programming.
10400          *
10401          * TODO: Move this (and other cleanup) to an async worker eventually.
10402          */
10403         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10404                                             new_crtc_state, i) {
10405                 /*
10406                  * Gen2 reports pipe underruns whenever all planes are disabled.
10407                  * So re-enable underrun reporting after some planes get enabled.
10408                  *
10409                  * We do this before .optimize_watermarks() so that we have a
10410                  * chance of catching underruns with the intermediate watermarks
10411                  * vs. the new plane configuration.
10412                  */
10413                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10414                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10415
10416                 if (dev_priv->display.optimize_watermarks)
10417                         dev_priv->display.optimize_watermarks(state, crtc);
10418         }
10419
10420         intel_dbuf_post_plane_update(state);
10421
10422         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10423                 intel_post_plane_update(state, crtc);
10424
10425                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10426
10427                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10428
10429                 /*
10430                  * DSB cleanup is done in cleanup_work aligning with framebuffer
10431                  * cleanup. So copy and reset the dsb structure to sync with
10432                  * commit_done and later do dsb cleanup in cleanup_work.
10433                  */
10434                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10435         }
10436
10437         /* Underruns don't always raise interrupts, so check manually */
10438         intel_check_cpu_fifo_underruns(dev_priv);
10439         intel_check_pch_fifo_underruns(dev_priv);
10440
10441         if (state->modeset)
10442                 intel_verify_planes(state);
10443
10444         intel_sagv_post_plane_update(state);
10445
10446         drm_atomic_helper_commit_hw_done(&state->base);
10447
10448         if (state->modeset) {
10449                 /* As one of the primary mmio accessors, KMS has a high
10450                  * likelihood of triggering bugs in unclaimed access. After we
10451                  * finish modesetting, see if an error has been flagged, and if
10452                  * so enable debugging for the next modeset - and hope we catch
10453                  * the culprit.
10454                  */
10455                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10456                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10457         }
10458         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10459
10460         /*
10461          * Defer the cleanup of the old state to a separate worker to not
10462          * impede the current task (userspace for blocking modesets) that
10463          * are executed inline. For out-of-line asynchronous modesets/flips,
10464          * deferring to a new worker seems overkill, but we would place a
10465          * schedule point (cond_resched()) here anyway to keep latencies
10466          * down.
10467          */
10468         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10469         queue_work(system_highpri_wq, &state->base.commit_work);
10470 }
10471
10472 static void intel_atomic_commit_work(struct work_struct *work)
10473 {
10474         struct intel_atomic_state *state =
10475                 container_of(work, struct intel_atomic_state, base.commit_work);
10476
10477         intel_atomic_commit_tail(state);
10478 }
10479
10480 static int __i915_sw_fence_call
10481 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10482                           enum i915_sw_fence_notify notify)
10483 {
10484         struct intel_atomic_state *state =
10485                 container_of(fence, struct intel_atomic_state, commit_ready);
10486
10487         switch (notify) {
10488         case FENCE_COMPLETE:
10489                 /* we do blocking waits in the worker, nothing to do here */
10490                 break;
10491         case FENCE_FREE:
10492                 {
10493                         struct intel_atomic_helper *helper =
10494                                 &to_i915(state->base.dev)->atomic_helper;
10495
10496                         if (llist_add(&state->freed, &helper->free_list))
10497                                 schedule_work(&helper->free_work);
10498                         break;
10499                 }
10500         }
10501
10502         return NOTIFY_DONE;
10503 }
10504
10505 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10506 {
10507         struct intel_plane_state *old_plane_state, *new_plane_state;
10508         struct intel_plane *plane;
10509         int i;
10510
10511         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10512                                              new_plane_state, i)
10513                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10514                                         to_intel_frontbuffer(new_plane_state->hw.fb),
10515                                         plane->frontbuffer_bit);
10516 }
10517
10518 static int intel_atomic_commit(struct drm_device *dev,
10519                                struct drm_atomic_state *_state,
10520                                bool nonblock)
10521 {
10522         struct intel_atomic_state *state = to_intel_atomic_state(_state);
10523         struct drm_i915_private *dev_priv = to_i915(dev);
10524         int ret = 0;
10525
10526         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10527
10528         drm_atomic_state_get(&state->base);
10529         i915_sw_fence_init(&state->commit_ready,
10530                            intel_atomic_commit_ready);
10531
10532         /*
10533          * The intel_legacy_cursor_update() fast path takes care
10534          * of avoiding the vblank waits for simple cursor
10535          * movement and flips. For cursor on/off and size changes,
10536          * we want to perform the vblank waits so that watermark
10537          * updates happen during the correct frames. Gen9+ have
10538          * double buffered watermarks and so shouldn't need this.
10539          *
10540          * Unset state->legacy_cursor_update before the call to
10541          * drm_atomic_helper_setup_commit() because otherwise
10542          * drm_atomic_helper_wait_for_flip_done() is a noop and
10543          * we get FIFO underruns because we didn't wait
10544          * for vblank.
10545          *
10546          * FIXME doing watermarks and fb cleanup from a vblank worker
10547          * (assuming we had any) would solve these problems.
10548          */
10549         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10550                 struct intel_crtc_state *new_crtc_state;
10551                 struct intel_crtc *crtc;
10552                 int i;
10553
10554                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10555                         if (new_crtc_state->wm.need_postvbl_update ||
10556                             new_crtc_state->update_wm_post)
10557                                 state->base.legacy_cursor_update = false;
10558         }
10559
10560         ret = intel_atomic_prepare_commit(state);
10561         if (ret) {
10562                 drm_dbg_atomic(&dev_priv->drm,
10563                                "Preparing state failed with %i\n", ret);
10564                 i915_sw_fence_commit(&state->commit_ready);
10565                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10566                 return ret;
10567         }
10568
10569         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10570         if (!ret)
10571                 ret = drm_atomic_helper_swap_state(&state->base, true);
10572         if (!ret)
10573                 intel_atomic_swap_global_state(state);
10574
10575         if (ret) {
10576                 struct intel_crtc_state *new_crtc_state;
10577                 struct intel_crtc *crtc;
10578                 int i;
10579
10580                 i915_sw_fence_commit(&state->commit_ready);
10581
10582                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10583                         intel_dsb_cleanup(new_crtc_state);
10584
10585                 drm_atomic_helper_cleanup_planes(dev, &state->base);
10586                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10587                 return ret;
10588         }
10589         intel_shared_dpll_swap_state(state);
10590         intel_atomic_track_fbs(state);
10591
10592         drm_atomic_state_get(&state->base);
10593         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10594
10595         i915_sw_fence_commit(&state->commit_ready);
10596         if (nonblock && state->modeset) {
10597                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10598         } else if (nonblock) {
10599                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
10600         } else {
10601                 if (state->modeset)
10602                         flush_workqueue(dev_priv->modeset_wq);
10603                 intel_atomic_commit_tail(state);
10604         }
10605
10606         return 0;
10607 }
10608
10609 struct wait_rps_boost {
10610         struct wait_queue_entry wait;
10611
10612         struct drm_crtc *crtc;
10613         struct i915_request *request;
10614 };
10615
10616 static int do_rps_boost(struct wait_queue_entry *_wait,
10617                         unsigned mode, int sync, void *key)
10618 {
10619         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10620         struct i915_request *rq = wait->request;
10621
10622         /*
10623          * If we missed the vblank, but the request is already running it
10624          * is reasonable to assume that it will complete before the next
10625          * vblank without our intervention, so leave RPS alone.
10626          */
10627         if (!i915_request_started(rq))
10628                 intel_rps_boost(rq);
10629         i915_request_put(rq);
10630
10631         drm_crtc_vblank_put(wait->crtc);
10632
10633         list_del(&wait->wait.entry);
10634         kfree(wait);
10635         return 1;
10636 }
10637
10638 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10639                                        struct dma_fence *fence)
10640 {
10641         struct wait_rps_boost *wait;
10642
10643         if (!dma_fence_is_i915(fence))
10644                 return;
10645
10646         if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10647                 return;
10648
10649         if (drm_crtc_vblank_get(crtc))
10650                 return;
10651
10652         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10653         if (!wait) {
10654                 drm_crtc_vblank_put(crtc);
10655                 return;
10656         }
10657
10658         wait->request = to_request(dma_fence_get(fence));
10659         wait->crtc = crtc;
10660
10661         wait->wait.func = do_rps_boost;
10662         wait->wait.flags = 0;
10663
10664         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10665 }
10666
10667 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10668 {
10669         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10670         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10671         struct drm_framebuffer *fb = plane_state->hw.fb;
10672         struct i915_vma *vma;
10673         bool phys_cursor =
10674                 plane->id == PLANE_CURSOR &&
10675                 INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10676
10677         if (!intel_fb_uses_dpt(fb)) {
10678                 vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10679                                                  &plane_state->view.gtt,
10680                                                  intel_plane_uses_fence(plane_state),
10681                                                  &plane_state->flags);
10682                 if (IS_ERR(vma))
10683                         return PTR_ERR(vma);
10684
10685                 plane_state->ggtt_vma = vma;
10686         } else {
10687                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10688
10689                 vma = intel_dpt_pin(intel_fb->dpt_vm);
10690                 if (IS_ERR(vma))
10691                         return PTR_ERR(vma);
10692
10693                 plane_state->ggtt_vma = vma;
10694
10695                 vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10696                                            &plane_state->flags, intel_fb->dpt_vm);
10697                 if (IS_ERR(vma)) {
10698                         intel_dpt_unpin(intel_fb->dpt_vm);
10699                         plane_state->ggtt_vma = NULL;
10700                         return PTR_ERR(vma);
10701                 }
10702
10703                 plane_state->dpt_vma = vma;
10704
10705                 WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10706         }
10707
10708         return 0;
10709 }
10710
10711 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10712 {
10713         struct drm_framebuffer *fb = old_plane_state->hw.fb;
10714         struct i915_vma *vma;
10715
10716         if (!intel_fb_uses_dpt(fb)) {
10717                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10718                 if (vma)
10719                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10720         } else {
10721                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10722
10723                 vma = fetch_and_zero(&old_plane_state->dpt_vma);
10724                 if (vma)
10725                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10726
10727                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10728                 if (vma)
10729                         intel_dpt_unpin(intel_fb->dpt_vm);
10730         }
10731 }
10732
10733 /**
10734  * intel_prepare_plane_fb - Prepare fb for usage on plane
10735  * @_plane: drm plane to prepare for
10736  * @_new_plane_state: the plane state being prepared
10737  *
10738  * Prepares a framebuffer for usage on a display plane.  Generally this
10739  * involves pinning the underlying object and updating the frontbuffer tracking
10740  * bits.  Some older platforms need special physical address handling for
10741  * cursor planes.
10742  *
10743  * Returns 0 on success, negative error code on failure.
10744  */
10745 int
10746 intel_prepare_plane_fb(struct drm_plane *_plane,
10747                        struct drm_plane_state *_new_plane_state)
10748 {
10749         struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10750         struct intel_plane *plane = to_intel_plane(_plane);
10751         struct intel_plane_state *new_plane_state =
10752                 to_intel_plane_state(_new_plane_state);
10753         struct intel_atomic_state *state =
10754                 to_intel_atomic_state(new_plane_state->uapi.state);
10755         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10756         const struct intel_plane_state *old_plane_state =
10757                 intel_atomic_get_old_plane_state(state, plane);
10758         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10759         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10760         int ret;
10761
10762         if (old_obj) {
10763                 const struct intel_crtc_state *crtc_state =
10764                         intel_atomic_get_new_crtc_state(state,
10765                                                         to_intel_crtc(old_plane_state->hw.crtc));
10766
10767                 /* Big Hammer, we also need to ensure that any pending
10768                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10769                  * current scanout is retired before unpinning the old
10770                  * framebuffer. Note that we rely on userspace rendering
10771                  * into the buffer attached to the pipe they are waiting
10772                  * on. If not, userspace generates a GPU hang with IPEHR
10773                  * point to the MI_WAIT_FOR_EVENT.
10774                  *
10775                  * This should only fail upon a hung GPU, in which case we
10776                  * can safely continue.
10777                  */
10778                 if (intel_crtc_needs_modeset(crtc_state)) {
10779                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
10780                                                               old_obj->base.resv, NULL,
10781                                                               false, 0,
10782                                                               GFP_KERNEL);
10783                         if (ret < 0)
10784                                 return ret;
10785                 }
10786         }
10787
10788         if (new_plane_state->uapi.fence) { /* explicit fencing */
10789                 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10790                                              &attr);
10791                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10792                                                     new_plane_state->uapi.fence,
10793                                                     i915_fence_timeout(dev_priv),
10794                                                     GFP_KERNEL);
10795                 if (ret < 0)
10796                         return ret;
10797         }
10798
10799         if (!obj)
10800                 return 0;
10801
10802
10803         ret = intel_plane_pin_fb(new_plane_state);
10804         if (ret)
10805                 return ret;
10806
10807         i915_gem_object_wait_priority(obj, 0, &attr);
10808         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
10809
10810         if (!new_plane_state->uapi.fence) { /* implicit fencing */
10811                 struct dma_fence *fence;
10812
10813                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
10814                                                       obj->base.resv, NULL,
10815                                                       false,
10816                                                       i915_fence_timeout(dev_priv),
10817                                                       GFP_KERNEL);
10818                 if (ret < 0)
10819                         goto unpin_fb;
10820
10821                 fence = dma_resv_get_excl_unlocked(obj->base.resv);
10822                 if (fence) {
10823                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10824                                                    fence);
10825                         dma_fence_put(fence);
10826                 }
10827         } else {
10828                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10829                                            new_plane_state->uapi.fence);
10830         }
10831
10832         /*
10833          * We declare pageflips to be interactive and so merit a small bias
10834          * towards upclocking to deliver the frame on time. By only changing
10835          * the RPS thresholds to sample more regularly and aim for higher
10836          * clocks we can hopefully deliver low power workloads (like kodi)
10837          * that are not quite steady state without resorting to forcing
10838          * maximum clocks following a vblank miss (see do_rps_boost()).
10839          */
10840         if (!state->rps_interactive) {
10841                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10842                 state->rps_interactive = true;
10843         }
10844
10845         return 0;
10846
10847 unpin_fb:
10848         intel_plane_unpin_fb(new_plane_state);
10849
10850         return ret;
10851 }
10852
10853 /**
10854  * intel_cleanup_plane_fb - Cleans up an fb after plane use
10855  * @plane: drm plane to clean up for
10856  * @_old_plane_state: the state from the previous modeset
10857  *
10858  * Cleans up a framebuffer that has just been removed from a plane.
10859  */
10860 void
10861 intel_cleanup_plane_fb(struct drm_plane *plane,
10862                        struct drm_plane_state *_old_plane_state)
10863 {
10864         struct intel_plane_state *old_plane_state =
10865                 to_intel_plane_state(_old_plane_state);
10866         struct intel_atomic_state *state =
10867                 to_intel_atomic_state(old_plane_state->uapi.state);
10868         struct drm_i915_private *dev_priv = to_i915(plane->dev);
10869         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10870
10871         if (!obj)
10872                 return;
10873
10874         if (state->rps_interactive) {
10875                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10876                 state->rps_interactive = false;
10877         }
10878
10879         /* Should only be called after a successful intel_prepare_plane_fb()! */
10880         intel_plane_unpin_fb(old_plane_state);
10881 }
10882
10883 /**
10884  * intel_plane_destroy - destroy a plane
10885  * @plane: plane to destroy
10886  *
10887  * Common destruction function for all types of planes (primary, cursor,
10888  * sprite).
10889  */
10890 void intel_plane_destroy(struct drm_plane *plane)
10891 {
10892         drm_plane_cleanup(plane);
10893         kfree(to_intel_plane(plane));
10894 }
10895
10896 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10897 {
10898         struct intel_plane *plane;
10899
10900         for_each_intel_plane(&dev_priv->drm, plane) {
10901                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10902                                                                   plane->pipe);
10903
10904                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10905         }
10906 }
10907
10908
10909 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10910                                       struct drm_file *file)
10911 {
10912         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10913         struct drm_crtc *drmmode_crtc;
10914         struct intel_crtc *crtc;
10915
10916         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10917         if (!drmmode_crtc)
10918                 return -ENOENT;
10919
10920         crtc = to_intel_crtc(drmmode_crtc);
10921         pipe_from_crtc_id->pipe = crtc->pipe;
10922
10923         return 0;
10924 }
10925
10926 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10927 {
10928         struct drm_device *dev = encoder->base.dev;
10929         struct intel_encoder *source_encoder;
10930         u32 possible_clones = 0;
10931
10932         for_each_intel_encoder(dev, source_encoder) {
10933                 if (encoders_cloneable(encoder, source_encoder))
10934                         possible_clones |= drm_encoder_mask(&source_encoder->base);
10935         }
10936
10937         return possible_clones;
10938 }
10939
10940 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10941 {
10942         struct drm_device *dev = encoder->base.dev;
10943         struct intel_crtc *crtc;
10944         u32 possible_crtcs = 0;
10945
10946         for_each_intel_crtc(dev, crtc) {
10947                 if (encoder->pipe_mask & BIT(crtc->pipe))
10948                         possible_crtcs |= drm_crtc_mask(&crtc->base);
10949         }
10950
10951         return possible_crtcs;
10952 }
10953
10954 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10955 {
10956         if (!IS_MOBILE(dev_priv))
10957                 return false;
10958
10959         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10960                 return false;
10961
10962         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10963                 return false;
10964
10965         return true;
10966 }
10967
10968 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10969 {
10970         if (DISPLAY_VER(dev_priv) >= 9)
10971                 return false;
10972
10973         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10974                 return false;
10975
10976         if (HAS_PCH_LPT_H(dev_priv) &&
10977             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10978                 return false;
10979
10980         /* DDI E can't be used if DDI A requires 4 lanes */
10981         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10982                 return false;
10983
10984         if (!dev_priv->vbt.int_crt_support)
10985                 return false;
10986
10987         return true;
10988 }
10989
10990 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10991 {
10992         struct intel_encoder *encoder;
10993         bool dpd_is_edp = false;
10994
10995         intel_pps_unlock_regs_wa(dev_priv);
10996
10997         if (!HAS_DISPLAY(dev_priv))
10998                 return;
10999
11000         if (IS_DG2(dev_priv)) {
11001                 intel_ddi_init(dev_priv, PORT_A);
11002                 intel_ddi_init(dev_priv, PORT_B);
11003                 intel_ddi_init(dev_priv, PORT_C);
11004                 intel_ddi_init(dev_priv, PORT_D_XELPD);
11005         } else if (IS_ALDERLAKE_P(dev_priv)) {
11006                 intel_ddi_init(dev_priv, PORT_A);
11007                 intel_ddi_init(dev_priv, PORT_B);
11008                 intel_ddi_init(dev_priv, PORT_TC1);
11009                 intel_ddi_init(dev_priv, PORT_TC2);
11010                 intel_ddi_init(dev_priv, PORT_TC3);
11011                 intel_ddi_init(dev_priv, PORT_TC4);
11012         } else if (IS_ALDERLAKE_S(dev_priv)) {
11013                 intel_ddi_init(dev_priv, PORT_A);
11014                 intel_ddi_init(dev_priv, PORT_TC1);
11015                 intel_ddi_init(dev_priv, PORT_TC2);
11016                 intel_ddi_init(dev_priv, PORT_TC3);
11017                 intel_ddi_init(dev_priv, PORT_TC4);
11018         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11019                 intel_ddi_init(dev_priv, PORT_A);
11020                 intel_ddi_init(dev_priv, PORT_B);
11021                 intel_ddi_init(dev_priv, PORT_TC1);
11022                 intel_ddi_init(dev_priv, PORT_TC2);
11023         } else if (DISPLAY_VER(dev_priv) >= 12) {
11024                 intel_ddi_init(dev_priv, PORT_A);
11025                 intel_ddi_init(dev_priv, PORT_B);
11026                 intel_ddi_init(dev_priv, PORT_TC1);
11027                 intel_ddi_init(dev_priv, PORT_TC2);
11028                 intel_ddi_init(dev_priv, PORT_TC3);
11029                 intel_ddi_init(dev_priv, PORT_TC4);
11030                 intel_ddi_init(dev_priv, PORT_TC5);
11031                 intel_ddi_init(dev_priv, PORT_TC6);
11032                 icl_dsi_init(dev_priv);
11033         } else if (IS_JSL_EHL(dev_priv)) {
11034                 intel_ddi_init(dev_priv, PORT_A);
11035                 intel_ddi_init(dev_priv, PORT_B);
11036                 intel_ddi_init(dev_priv, PORT_C);
11037                 intel_ddi_init(dev_priv, PORT_D);
11038                 icl_dsi_init(dev_priv);
11039         } else if (DISPLAY_VER(dev_priv) == 11) {
11040                 intel_ddi_init(dev_priv, PORT_A);
11041                 intel_ddi_init(dev_priv, PORT_B);
11042                 intel_ddi_init(dev_priv, PORT_C);
11043                 intel_ddi_init(dev_priv, PORT_D);
11044                 intel_ddi_init(dev_priv, PORT_E);
11045                 intel_ddi_init(dev_priv, PORT_F);
11046                 icl_dsi_init(dev_priv);
11047         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11048                 intel_ddi_init(dev_priv, PORT_A);
11049                 intel_ddi_init(dev_priv, PORT_B);
11050                 intel_ddi_init(dev_priv, PORT_C);
11051                 vlv_dsi_init(dev_priv);
11052         } else if (DISPLAY_VER(dev_priv) >= 9) {
11053                 intel_ddi_init(dev_priv, PORT_A);
11054                 intel_ddi_init(dev_priv, PORT_B);
11055                 intel_ddi_init(dev_priv, PORT_C);
11056                 intel_ddi_init(dev_priv, PORT_D);
11057                 intel_ddi_init(dev_priv, PORT_E);
11058         } else if (HAS_DDI(dev_priv)) {
11059                 u32 found;
11060
11061                 if (intel_ddi_crt_present(dev_priv))
11062                         intel_crt_init(dev_priv);
11063
11064                 /* Haswell uses DDI functions to detect digital outputs. */
11065                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11066                 if (found)
11067                         intel_ddi_init(dev_priv, PORT_A);
11068
11069                 found = intel_de_read(dev_priv, SFUSE_STRAP);
11070                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11071                         intel_ddi_init(dev_priv, PORT_B);
11072                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11073                         intel_ddi_init(dev_priv, PORT_C);
11074                 if (found & SFUSE_STRAP_DDID_DETECTED)
11075                         intel_ddi_init(dev_priv, PORT_D);
11076                 if (found & SFUSE_STRAP_DDIF_DETECTED)
11077                         intel_ddi_init(dev_priv, PORT_F);
11078         } else if (HAS_PCH_SPLIT(dev_priv)) {
11079                 int found;
11080
11081                 /*
11082                  * intel_edp_init_connector() depends on this completing first,
11083                  * to prevent the registration of both eDP and LVDS and the
11084                  * incorrect sharing of the PPS.
11085                  */
11086                 intel_lvds_init(dev_priv);
11087                 intel_crt_init(dev_priv);
11088
11089                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11090
11091                 if (ilk_has_edp_a(dev_priv))
11092                         g4x_dp_init(dev_priv, DP_A, PORT_A);
11093
11094                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11095                         /* PCH SDVOB multiplex with HDMIB */
11096                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11097                         if (!found)
11098                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11099                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11100                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11101                 }
11102
11103                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11104                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11105
11106                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11107                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11108
11109                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11110                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11111
11112                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11113                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11114         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11115                 bool has_edp, has_port;
11116
11117                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11118                         intel_crt_init(dev_priv);
11119
11120                 /*
11121                  * The DP_DETECTED bit is the latched state of the DDC
11122                  * SDA pin at boot. However since eDP doesn't require DDC
11123                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
11124                  * eDP ports may have been muxed to an alternate function.
11125                  * Thus we can't rely on the DP_DETECTED bit alone to detect
11126                  * eDP ports. Consult the VBT as well as DP_DETECTED to
11127                  * detect eDP ports.
11128                  *
11129                  * Sadly the straps seem to be missing sometimes even for HDMI
11130                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11131                  * and VBT for the presence of the port. Additionally we can't
11132                  * trust the port type the VBT declares as we've seen at least
11133                  * HDMI ports that the VBT claim are DP or eDP.
11134                  */
11135                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11136                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11137                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11138                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11139                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11140                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11141
11142                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11143                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11144                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11145                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11146                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11147                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11148
11149                 if (IS_CHERRYVIEW(dev_priv)) {
11150                         /*
11151                          * eDP not supported on port D,
11152                          * so no need to worry about it
11153                          */
11154                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11155                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11156                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11157                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11158                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11159                 }
11160
11161                 vlv_dsi_init(dev_priv);
11162         } else if (IS_PINEVIEW(dev_priv)) {
11163                 intel_lvds_init(dev_priv);
11164                 intel_crt_init(dev_priv);
11165         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11166                 bool found = false;
11167
11168                 if (IS_MOBILE(dev_priv))
11169                         intel_lvds_init(dev_priv);
11170
11171                 intel_crt_init(dev_priv);
11172
11173                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11174                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11175                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11176                         if (!found && IS_G4X(dev_priv)) {
11177                                 drm_dbg_kms(&dev_priv->drm,
11178                                             "probing HDMI on SDVOB\n");
11179                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11180                         }
11181
11182                         if (!found && IS_G4X(dev_priv))
11183                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
11184                 }
11185
11186                 /* Before G4X SDVOC doesn't have its own detect register */
11187
11188                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11189                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11190                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11191                 }
11192
11193                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11194
11195                         if (IS_G4X(dev_priv)) {
11196                                 drm_dbg_kms(&dev_priv->drm,
11197                                             "probing HDMI on SDVOC\n");
11198                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11199                         }
11200                         if (IS_G4X(dev_priv))
11201                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
11202                 }
11203
11204                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11205                         g4x_dp_init(dev_priv, DP_D, PORT_D);
11206
11207                 if (SUPPORTS_TV(dev_priv))
11208                         intel_tv_init(dev_priv);
11209         } else if (DISPLAY_VER(dev_priv) == 2) {
11210                 if (IS_I85X(dev_priv))
11211                         intel_lvds_init(dev_priv);
11212
11213                 intel_crt_init(dev_priv);
11214                 intel_dvo_init(dev_priv);
11215         }
11216
11217         for_each_intel_encoder(&dev_priv->drm, encoder) {
11218                 encoder->base.possible_crtcs =
11219                         intel_encoder_possible_crtcs(encoder);
11220                 encoder->base.possible_clones =
11221                         intel_encoder_possible_clones(encoder);
11222         }
11223
11224         intel_init_pch_refclk(dev_priv);
11225
11226         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11227 }
11228
11229 static enum drm_mode_status
11230 intel_mode_valid(struct drm_device *dev,
11231                  const struct drm_display_mode *mode)
11232 {
11233         struct drm_i915_private *dev_priv = to_i915(dev);
11234         int hdisplay_max, htotal_max;
11235         int vdisplay_max, vtotal_max;
11236
11237         /*
11238          * Can't reject DBLSCAN here because Xorg ddxen can add piles
11239          * of DBLSCAN modes to the output's mode list when they detect
11240          * the scaling mode property on the connector. And they don't
11241          * ask the kernel to validate those modes in any way until
11242          * modeset time at which point the client gets a protocol error.
11243          * So in order to not upset those clients we silently ignore the
11244          * DBLSCAN flag on such connectors. For other connectors we will
11245          * reject modes with the DBLSCAN flag in encoder->compute_config().
11246          * And we always reject DBLSCAN modes in connector->mode_valid()
11247          * as we never want such modes on the connector's mode list.
11248          */
11249
11250         if (mode->vscan > 1)
11251                 return MODE_NO_VSCAN;
11252
11253         if (mode->flags & DRM_MODE_FLAG_HSKEW)
11254                 return MODE_H_ILLEGAL;
11255
11256         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11257                            DRM_MODE_FLAG_NCSYNC |
11258                            DRM_MODE_FLAG_PCSYNC))
11259                 return MODE_HSYNC;
11260
11261         if (mode->flags & (DRM_MODE_FLAG_BCAST |
11262                            DRM_MODE_FLAG_PIXMUX |
11263                            DRM_MODE_FLAG_CLKDIV2))
11264                 return MODE_BAD;
11265
11266         /* Transcoder timing limits */
11267         if (DISPLAY_VER(dev_priv) >= 11) {
11268                 hdisplay_max = 16384;
11269                 vdisplay_max = 8192;
11270                 htotal_max = 16384;
11271                 vtotal_max = 8192;
11272         } else if (DISPLAY_VER(dev_priv) >= 9 ||
11273                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11274                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11275                 vdisplay_max = 4096;
11276                 htotal_max = 8192;
11277                 vtotal_max = 8192;
11278         } else if (DISPLAY_VER(dev_priv) >= 3) {
11279                 hdisplay_max = 4096;
11280                 vdisplay_max = 4096;
11281                 htotal_max = 8192;
11282                 vtotal_max = 8192;
11283         } else {
11284                 hdisplay_max = 2048;
11285                 vdisplay_max = 2048;
11286                 htotal_max = 4096;
11287                 vtotal_max = 4096;
11288         }
11289
11290         if (mode->hdisplay > hdisplay_max ||
11291             mode->hsync_start > htotal_max ||
11292             mode->hsync_end > htotal_max ||
11293             mode->htotal > htotal_max)
11294                 return MODE_H_ILLEGAL;
11295
11296         if (mode->vdisplay > vdisplay_max ||
11297             mode->vsync_start > vtotal_max ||
11298             mode->vsync_end > vtotal_max ||
11299             mode->vtotal > vtotal_max)
11300                 return MODE_V_ILLEGAL;
11301
11302         if (DISPLAY_VER(dev_priv) >= 5) {
11303                 if (mode->hdisplay < 64 ||
11304                     mode->htotal - mode->hdisplay < 32)
11305                         return MODE_H_ILLEGAL;
11306
11307                 if (mode->vtotal - mode->vdisplay < 5)
11308                         return MODE_V_ILLEGAL;
11309         } else {
11310                 if (mode->htotal - mode->hdisplay < 32)
11311                         return MODE_H_ILLEGAL;
11312
11313                 if (mode->vtotal - mode->vdisplay < 3)
11314                         return MODE_V_ILLEGAL;
11315         }
11316
11317         return MODE_OK;
11318 }
11319
11320 enum drm_mode_status
11321 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11322                                 const struct drm_display_mode *mode,
11323                                 bool bigjoiner)
11324 {
11325         int plane_width_max, plane_height_max;
11326
11327         /*
11328          * intel_mode_valid() should be
11329          * sufficient on older platforms.
11330          */
11331         if (DISPLAY_VER(dev_priv) < 9)
11332                 return MODE_OK;
11333
11334         /*
11335          * Most people will probably want a fullscreen
11336          * plane so let's not advertize modes that are
11337          * too big for that.
11338          */
11339         if (DISPLAY_VER(dev_priv) >= 11) {
11340                 plane_width_max = 5120 << bigjoiner;
11341                 plane_height_max = 4320;
11342         } else {
11343                 plane_width_max = 5120;
11344                 plane_height_max = 4096;
11345         }
11346
11347         if (mode->hdisplay > plane_width_max)
11348                 return MODE_H_ILLEGAL;
11349
11350         if (mode->vdisplay > plane_height_max)
11351                 return MODE_V_ILLEGAL;
11352
11353         return MODE_OK;
11354 }
11355
11356 static const struct drm_mode_config_funcs intel_mode_funcs = {
11357         .fb_create = intel_user_framebuffer_create,
11358         .get_format_info = intel_get_format_info,
11359         .output_poll_changed = intel_fbdev_output_poll_changed,
11360         .mode_valid = intel_mode_valid,
11361         .atomic_check = intel_atomic_check,
11362         .atomic_commit = intel_atomic_commit,
11363         .atomic_state_alloc = intel_atomic_state_alloc,
11364         .atomic_state_clear = intel_atomic_state_clear,
11365         .atomic_state_free = intel_atomic_state_free,
11366 };
11367
11368 /**
11369  * intel_init_display_hooks - initialize the display modesetting hooks
11370  * @dev_priv: device private
11371  */
11372 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11373 {
11374         if (!HAS_DISPLAY(dev_priv))
11375                 return;
11376
11377         intel_init_cdclk_hooks(dev_priv);
11378         intel_init_audio_hooks(dev_priv);
11379
11380         intel_dpll_init_clock_hook(dev_priv);
11381
11382         if (DISPLAY_VER(dev_priv) >= 9) {
11383                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11384                 dev_priv->display.crtc_enable = hsw_crtc_enable;
11385                 dev_priv->display.crtc_disable = hsw_crtc_disable;
11386         } else if (HAS_DDI(dev_priv)) {
11387                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11388                 dev_priv->display.crtc_enable = hsw_crtc_enable;
11389                 dev_priv->display.crtc_disable = hsw_crtc_disable;
11390         } else if (HAS_PCH_SPLIT(dev_priv)) {
11391                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11392                 dev_priv->display.crtc_enable = ilk_crtc_enable;
11393                 dev_priv->display.crtc_disable = ilk_crtc_disable;
11394         } else if (IS_CHERRYVIEW(dev_priv) ||
11395                    IS_VALLEYVIEW(dev_priv)) {
11396                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11397                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
11398                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11399         } else {
11400                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11401                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
11402                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
11403         }
11404
11405         intel_fdi_init_hook(dev_priv);
11406
11407         if (DISPLAY_VER(dev_priv) >= 9) {
11408                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11409                 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11410         } else {
11411                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11412                 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11413         }
11414
11415 }
11416
11417 void intel_modeset_init_hw(struct drm_i915_private *i915)
11418 {
11419         struct intel_cdclk_state *cdclk_state;
11420
11421         if (!HAS_DISPLAY(i915))
11422                 return;
11423
11424         cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11425
11426         intel_update_cdclk(i915);
11427         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11428         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11429 }
11430
11431 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11432 {
11433         struct drm_plane *plane;
11434         struct intel_crtc *crtc;
11435
11436         for_each_intel_crtc(state->dev, crtc) {
11437                 struct intel_crtc_state *crtc_state;
11438
11439                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
11440                 if (IS_ERR(crtc_state))
11441                         return PTR_ERR(crtc_state);
11442
11443                 if (crtc_state->hw.active) {
11444                         /*
11445                          * Preserve the inherited flag to avoid
11446                          * taking the full modeset path.
11447                          */
11448                         crtc_state->inherited = true;
11449                 }
11450         }
11451
11452         drm_for_each_plane(plane, state->dev) {
11453                 struct drm_plane_state *plane_state;
11454
11455                 plane_state = drm_atomic_get_plane_state(state, plane);
11456                 if (IS_ERR(plane_state))
11457                         return PTR_ERR(plane_state);
11458         }
11459
11460         return 0;
11461 }
11462
11463 /*
11464  * Calculate what we think the watermarks should be for the state we've read
11465  * out of the hardware and then immediately program those watermarks so that
11466  * we ensure the hardware settings match our internal state.
11467  *
11468  * We can calculate what we think WM's should be by creating a duplicate of the
11469  * current state (which was constructed during hardware readout) and running it
11470  * through the atomic check code to calculate new watermark values in the
11471  * state object.
11472  */
11473 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11474 {
11475         struct drm_atomic_state *state;
11476         struct intel_atomic_state *intel_state;
11477         struct intel_crtc *crtc;
11478         struct intel_crtc_state *crtc_state;
11479         struct drm_modeset_acquire_ctx ctx;
11480         int ret;
11481         int i;
11482
11483         /* Only supported on platforms that use atomic watermark design */
11484         if (!dev_priv->display.optimize_watermarks)
11485                 return;
11486
11487         state = drm_atomic_state_alloc(&dev_priv->drm);
11488         if (drm_WARN_ON(&dev_priv->drm, !state))
11489                 return;
11490
11491         intel_state = to_intel_atomic_state(state);
11492
11493         drm_modeset_acquire_init(&ctx, 0);
11494
11495 retry:
11496         state->acquire_ctx = &ctx;
11497
11498         /*
11499          * Hardware readout is the only time we don't want to calculate
11500          * intermediate watermarks (since we don't trust the current
11501          * watermarks).
11502          */
11503         if (!HAS_GMCH(dev_priv))
11504                 intel_state->skip_intermediate_wm = true;
11505
11506         ret = sanitize_watermarks_add_affected(state);
11507         if (ret)
11508                 goto fail;
11509
11510         ret = intel_atomic_check(&dev_priv->drm, state);
11511         if (ret)
11512                 goto fail;
11513
11514         /* Write calculated watermark values back */
11515         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11516                 crtc_state->wm.need_postvbl_update = true;
11517                 dev_priv->display.optimize_watermarks(intel_state, crtc);
11518
11519                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11520         }
11521
11522 fail:
11523         if (ret == -EDEADLK) {
11524                 drm_atomic_state_clear(state);
11525                 drm_modeset_backoff(&ctx);
11526                 goto retry;
11527         }
11528
11529         /*
11530          * If we fail here, it means that the hardware appears to be
11531          * programmed in a way that shouldn't be possible, given our
11532          * understanding of watermark requirements.  This might mean a
11533          * mistake in the hardware readout code or a mistake in the
11534          * watermark calculations for a given platform.  Raise a WARN
11535          * so that this is noticeable.
11536          *
11537          * If this actually happens, we'll have to just leave the
11538          * BIOS-programmed watermarks untouched and hope for the best.
11539          */
11540         drm_WARN(&dev_priv->drm, ret,
11541                  "Could not determine valid watermarks for inherited state\n");
11542
11543         drm_atomic_state_put(state);
11544
11545         drm_modeset_drop_locks(&ctx);
11546         drm_modeset_acquire_fini(&ctx);
11547 }
11548
11549 static int intel_initial_commit(struct drm_device *dev)
11550 {
11551         struct drm_atomic_state *state = NULL;
11552         struct drm_modeset_acquire_ctx ctx;
11553         struct intel_crtc *crtc;
11554         int ret = 0;
11555
11556         state = drm_atomic_state_alloc(dev);
11557         if (!state)
11558                 return -ENOMEM;
11559
11560         drm_modeset_acquire_init(&ctx, 0);
11561
11562 retry:
11563         state->acquire_ctx = &ctx;
11564
11565         for_each_intel_crtc(dev, crtc) {
11566                 struct intel_crtc_state *crtc_state =
11567                         intel_atomic_get_crtc_state(state, crtc);
11568
11569                 if (IS_ERR(crtc_state)) {
11570                         ret = PTR_ERR(crtc_state);
11571                         goto out;
11572                 }
11573
11574                 if (crtc_state->hw.active) {
11575                         struct intel_encoder *encoder;
11576
11577                         /*
11578                          * We've not yet detected sink capabilities
11579                          * (audio,infoframes,etc.) and thus we don't want to
11580                          * force a full state recomputation yet. We want that to
11581                          * happen only for the first real commit from userspace.
11582                          * So preserve the inherited flag for the time being.
11583                          */
11584                         crtc_state->inherited = true;
11585
11586                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
11587                         if (ret)
11588                                 goto out;
11589
11590                         /*
11591                          * FIXME hack to force a LUT update to avoid the
11592                          * plane update forcing the pipe gamma on without
11593                          * having a proper LUT loaded. Remove once we
11594                          * have readout for pipe gamma enable.
11595                          */
11596                         crtc_state->uapi.color_mgmt_changed = true;
11597
11598                         for_each_intel_encoder_mask(dev, encoder,
11599                                                     crtc_state->uapi.encoder_mask) {
11600                                 if (encoder->initial_fastset_check &&
11601                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
11602                                         ret = drm_atomic_add_affected_connectors(state,
11603                                                                                  &crtc->base);
11604                                         if (ret)
11605                                                 goto out;
11606                                 }
11607                         }
11608                 }
11609         }
11610
11611         ret = drm_atomic_commit(state);
11612
11613 out:
11614         if (ret == -EDEADLK) {
11615                 drm_atomic_state_clear(state);
11616                 drm_modeset_backoff(&ctx);
11617                 goto retry;
11618         }
11619
11620         drm_atomic_state_put(state);
11621
11622         drm_modeset_drop_locks(&ctx);
11623         drm_modeset_acquire_fini(&ctx);
11624
11625         return ret;
11626 }
11627
11628 static void intel_mode_config_init(struct drm_i915_private *i915)
11629 {
11630         struct drm_mode_config *mode_config = &i915->drm.mode_config;
11631
11632         drm_mode_config_init(&i915->drm);
11633         INIT_LIST_HEAD(&i915->global_obj_list);
11634
11635         mode_config->min_width = 0;
11636         mode_config->min_height = 0;
11637
11638         mode_config->preferred_depth = 24;
11639         mode_config->prefer_shadow = 1;
11640
11641         mode_config->funcs = &intel_mode_funcs;
11642
11643         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
11644
11645         /*
11646          * Maximum framebuffer dimensions, chosen to match
11647          * the maximum render engine surface size on gen4+.
11648          */
11649         if (DISPLAY_VER(i915) >= 7) {
11650                 mode_config->max_width = 16384;
11651                 mode_config->max_height = 16384;
11652         } else if (DISPLAY_VER(i915) >= 4) {
11653                 mode_config->max_width = 8192;
11654                 mode_config->max_height = 8192;
11655         } else if (DISPLAY_VER(i915) == 3) {
11656                 mode_config->max_width = 4096;
11657                 mode_config->max_height = 4096;
11658         } else {
11659                 mode_config->max_width = 2048;
11660                 mode_config->max_height = 2048;
11661         }
11662
11663         if (IS_I845G(i915) || IS_I865G(i915)) {
11664                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11665                 mode_config->cursor_height = 1023;
11666         } else if (IS_I830(i915) || IS_I85X(i915) ||
11667                    IS_I915G(i915) || IS_I915GM(i915)) {
11668                 mode_config->cursor_width = 64;
11669                 mode_config->cursor_height = 64;
11670         } else {
11671                 mode_config->cursor_width = 256;
11672                 mode_config->cursor_height = 256;
11673         }
11674 }
11675
11676 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11677 {
11678         intel_atomic_global_obj_cleanup(i915);
11679         drm_mode_config_cleanup(&i915->drm);
11680 }
11681
11682 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11683 {
11684         if (plane_config->fb) {
11685                 struct drm_framebuffer *fb = &plane_config->fb->base;
11686
11687                 /* We may only have the stub and not a full framebuffer */
11688                 if (drm_framebuffer_read_refcount(fb))
11689                         drm_framebuffer_put(fb);
11690                 else
11691                         kfree(fb);
11692         }
11693
11694         if (plane_config->vma)
11695                 i915_vma_put(plane_config->vma);
11696 }
11697
11698 /* part #1: call before irq install */
11699 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11700 {
11701         int ret;
11702
11703         if (i915_inject_probe_failure(i915))
11704                 return -ENODEV;
11705
11706         if (HAS_DISPLAY(i915)) {
11707                 ret = drm_vblank_init(&i915->drm,
11708                                       INTEL_NUM_PIPES(i915));
11709                 if (ret)
11710                         return ret;
11711         }
11712
11713         intel_bios_init(i915);
11714
11715         ret = intel_vga_register(i915);
11716         if (ret)
11717                 goto cleanup_bios;
11718
11719         /* FIXME: completely on the wrong abstraction layer */
11720         intel_power_domains_init_hw(i915, false);
11721
11722         if (!HAS_DISPLAY(i915))
11723                 return 0;
11724
11725         intel_dmc_ucode_init(i915);
11726
11727         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11728         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11729                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11730
11731         i915->framestart_delay = 1; /* 1-4 */
11732
11733         i915->window2_delay = 0; /* No DSB so no window2 delay */
11734
11735         intel_mode_config_init(i915);
11736
11737         ret = intel_cdclk_init(i915);
11738         if (ret)
11739                 goto cleanup_vga_client_pw_domain_dmc;
11740
11741         ret = intel_dbuf_init(i915);
11742         if (ret)
11743                 goto cleanup_vga_client_pw_domain_dmc;
11744
11745         ret = intel_bw_init(i915);
11746         if (ret)
11747                 goto cleanup_vga_client_pw_domain_dmc;
11748
11749         init_llist_head(&i915->atomic_helper.free_list);
11750         INIT_WORK(&i915->atomic_helper.free_work,
11751                   intel_atomic_helper_free_state_worker);
11752
11753         intel_init_quirks(i915);
11754
11755         intel_fbc_init(i915);
11756
11757         return 0;
11758
11759 cleanup_vga_client_pw_domain_dmc:
11760         intel_dmc_ucode_fini(i915);
11761         intel_power_domains_driver_remove(i915);
11762         intel_vga_unregister(i915);
11763 cleanup_bios:
11764         intel_bios_driver_remove(i915);
11765
11766         return ret;
11767 }
11768
11769 /* part #2: call after irq install, but before gem init */
11770 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11771 {
11772         struct drm_device *dev = &i915->drm;
11773         enum pipe pipe;
11774         struct intel_crtc *crtc;
11775         int ret;
11776
11777         if (!HAS_DISPLAY(i915))
11778                 return 0;
11779
11780         intel_init_pm(i915);
11781
11782         intel_panel_sanitize_ssc(i915);
11783
11784         intel_pps_setup(i915);
11785
11786         intel_gmbus_setup(i915);
11787
11788         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11789                     INTEL_NUM_PIPES(i915),
11790                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11791
11792         for_each_pipe(i915, pipe) {
11793                 ret = intel_crtc_init(i915, pipe);
11794                 if (ret) {
11795                         intel_mode_config_cleanup(i915);
11796                         return ret;
11797                 }
11798         }
11799
11800         intel_plane_possible_crtcs_init(i915);
11801         intel_shared_dpll_init(dev);
11802         intel_fdi_pll_freq_update(i915);
11803
11804         intel_update_czclk(i915);
11805         intel_modeset_init_hw(i915);
11806         intel_dpll_update_ref_clks(i915);
11807
11808         intel_hdcp_component_init(i915);
11809
11810         if (i915->max_cdclk_freq == 0)
11811                 intel_update_max_cdclk(i915);
11812
11813         /*
11814          * If the platform has HTI, we need to find out whether it has reserved
11815          * any display resources before we create our display outputs.
11816          */
11817         if (INTEL_INFO(i915)->display.has_hti)
11818                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11819
11820         /* Just disable it once at startup */
11821         intel_vga_disable(i915);
11822         intel_setup_outputs(i915);
11823
11824         drm_modeset_lock_all(dev);
11825         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11826         drm_modeset_unlock_all(dev);
11827
11828         for_each_intel_crtc(dev, crtc) {
11829                 struct intel_initial_plane_config plane_config = {};
11830
11831                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11832                         continue;
11833
11834                 /*
11835                  * Note that reserving the BIOS fb up front prevents us
11836                  * from stuffing other stolen allocations like the ring
11837                  * on top.  This prevents some ugliness at boot time, and
11838                  * can even allow for smooth boot transitions if the BIOS
11839                  * fb is large enough for the active pipe configuration.
11840                  */
11841                 i915->display.get_initial_plane_config(crtc, &plane_config);
11842
11843                 /*
11844                  * If the fb is shared between multiple heads, we'll
11845                  * just get the first one.
11846                  */
11847                 intel_find_initial_plane_obj(crtc, &plane_config);
11848
11849                 plane_config_fini(&plane_config);
11850         }
11851
11852         /*
11853          * Make sure hardware watermarks really match the state we read out.
11854          * Note that we need to do this after reconstructing the BIOS fb's
11855          * since the watermark calculation done here will use pstate->fb.
11856          */
11857         if (!HAS_GMCH(i915))
11858                 sanitize_watermarks(i915);
11859
11860         return 0;
11861 }
11862
11863 /* part #3: call after gem init */
11864 int intel_modeset_init(struct drm_i915_private *i915)
11865 {
11866         int ret;
11867
11868         if (!HAS_DISPLAY(i915))
11869                 return 0;
11870
11871         /*
11872          * Force all active planes to recompute their states. So that on
11873          * mode_setcrtc after probe, all the intel_plane_state variables
11874          * are already calculated and there is no assert_plane warnings
11875          * during bootup.
11876          */
11877         ret = intel_initial_commit(&i915->drm);
11878         if (ret)
11879                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11880
11881         intel_overlay_setup(i915);
11882
11883         ret = intel_fbdev_init(&i915->drm);
11884         if (ret)
11885                 return ret;
11886
11887         /* Only enable hotplug handling once the fbdev is fully set up. */
11888         intel_hpd_init(i915);
11889         intel_hpd_poll_disable(i915);
11890
11891         intel_init_ipc(i915);
11892
11893         return 0;
11894 }
11895
11896 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11897 {
11898         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11899         /* 640x480@60Hz, ~25175 kHz */
11900         struct dpll clock = {
11901                 .m1 = 18,
11902                 .m2 = 7,
11903                 .p1 = 13,
11904                 .p2 = 4,
11905                 .n = 2,
11906         };
11907         u32 dpll, fp;
11908         int i;
11909
11910         drm_WARN_ON(&dev_priv->drm,
11911                     i9xx_calc_dpll_params(48000, &clock) != 25154);
11912
11913         drm_dbg_kms(&dev_priv->drm,
11914                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11915                     pipe_name(pipe), clock.vco, clock.dot);
11916
11917         fp = i9xx_dpll_compute_fp(&clock);
11918         dpll = DPLL_DVO_2X_MODE |
11919                 DPLL_VGA_MODE_DIS |
11920                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11921                 PLL_P2_DIVIDE_BY_4 |
11922                 PLL_REF_INPUT_DREFCLK |
11923                 DPLL_VCO_ENABLE;
11924
11925         intel_de_write(dev_priv, FP0(pipe), fp);
11926         intel_de_write(dev_priv, FP1(pipe), fp);
11927
11928         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
11929         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
11930         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
11931         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
11932         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
11933         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
11934         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
11935
11936         /*
11937          * Apparently we need to have VGA mode enabled prior to changing
11938          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
11939          * dividers, even though the register value does change.
11940          */
11941         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
11942         intel_de_write(dev_priv, DPLL(pipe), dpll);
11943
11944         /* Wait for the clocks to stabilize. */
11945         intel_de_posting_read(dev_priv, DPLL(pipe));
11946         udelay(150);
11947
11948         /* The pixel multiplier can only be updated once the
11949          * DPLL is enabled and the clocks are stable.
11950          *
11951          * So write it again.
11952          */
11953         intel_de_write(dev_priv, DPLL(pipe), dpll);
11954
11955         /* We do this three times for luck */
11956         for (i = 0; i < 3 ; i++) {
11957                 intel_de_write(dev_priv, DPLL(pipe), dpll);
11958                 intel_de_posting_read(dev_priv, DPLL(pipe));
11959                 udelay(150); /* wait for warmup */
11960         }
11961
11962         intel_de_write(dev_priv, PIPECONF(pipe),
11963                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
11964         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11965
11966         intel_wait_for_pipe_scanline_moving(crtc);
11967 }
11968
11969 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11970 {
11971         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11972
11973         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
11974                     pipe_name(pipe));
11975
11976         drm_WARN_ON(&dev_priv->drm,
11977                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
11978                     DISPLAY_PLANE_ENABLE);
11979         drm_WARN_ON(&dev_priv->drm,
11980                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
11981                     DISPLAY_PLANE_ENABLE);
11982         drm_WARN_ON(&dev_priv->drm,
11983                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
11984                     DISPLAY_PLANE_ENABLE);
11985         drm_WARN_ON(&dev_priv->drm,
11986                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
11987         drm_WARN_ON(&dev_priv->drm,
11988                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
11989
11990         intel_de_write(dev_priv, PIPECONF(pipe), 0);
11991         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11992
11993         intel_wait_for_pipe_scanline_stopped(crtc);
11994
11995         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
11996         intel_de_posting_read(dev_priv, DPLL(pipe));
11997 }
11998
11999 static void
12000 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12001 {
12002         struct intel_crtc *crtc;
12003
12004         if (DISPLAY_VER(dev_priv) >= 4)
12005                 return;
12006
12007         for_each_intel_crtc(&dev_priv->drm, crtc) {
12008                 struct intel_plane *plane =
12009                         to_intel_plane(crtc->base.primary);
12010                 struct intel_crtc *plane_crtc;
12011                 enum pipe pipe;
12012
12013                 if (!plane->get_hw_state(plane, &pipe))
12014                         continue;
12015
12016                 if (pipe == crtc->pipe)
12017                         continue;
12018
12019                 drm_dbg_kms(&dev_priv->drm,
12020                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12021                             plane->base.base.id, plane->base.name);
12022
12023                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12024                 intel_plane_disable_noatomic(plane_crtc, plane);
12025         }
12026 }
12027
12028 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12029 {
12030         struct drm_device *dev = crtc->base.dev;
12031         struct intel_encoder *encoder;
12032
12033         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12034                 return true;
12035
12036         return false;
12037 }
12038
12039 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12040 {
12041         struct drm_device *dev = encoder->base.dev;
12042         struct intel_connector *connector;
12043
12044         for_each_connector_on_encoder(dev, &encoder->base, connector)
12045                 return connector;
12046
12047         return NULL;
12048 }
12049
12050 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12051                               enum pipe pch_transcoder)
12052 {
12053         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12054                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12055 }
12056
12057 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12058 {
12059         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12061         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12062
12063         if (DISPLAY_VER(dev_priv) >= 9 ||
12064             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12065                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12066                 u32 val;
12067
12068                 if (transcoder_is_dsi(cpu_transcoder))
12069                         return;
12070
12071                 val = intel_de_read(dev_priv, reg);
12072                 val &= ~HSW_FRAME_START_DELAY_MASK;
12073                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12074                 intel_de_write(dev_priv, reg, val);
12075         } else {
12076                 i915_reg_t reg = PIPECONF(cpu_transcoder);
12077                 u32 val;
12078
12079                 val = intel_de_read(dev_priv, reg);
12080                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12081                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12082                 intel_de_write(dev_priv, reg, val);
12083         }
12084
12085         if (!crtc_state->has_pch_encoder)
12086                 return;
12087
12088         if (HAS_PCH_IBX(dev_priv)) {
12089                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12090                 u32 val;
12091
12092                 val = intel_de_read(dev_priv, reg);
12093                 val &= ~TRANS_FRAME_START_DELAY_MASK;
12094                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12095                 intel_de_write(dev_priv, reg, val);
12096         } else {
12097                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12098                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12099                 u32 val;
12100
12101                 val = intel_de_read(dev_priv, reg);
12102                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12103                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12104                 intel_de_write(dev_priv, reg, val);
12105         }
12106 }
12107
12108 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12109                                 struct drm_modeset_acquire_ctx *ctx)
12110 {
12111         struct drm_device *dev = crtc->base.dev;
12112         struct drm_i915_private *dev_priv = to_i915(dev);
12113         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12114
12115         if (crtc_state->hw.active) {
12116                 struct intel_plane *plane;
12117
12118                 /* Clear any frame start delays used for debugging left by the BIOS */
12119                 intel_sanitize_frame_start_delay(crtc_state);
12120
12121                 /* Disable everything but the primary plane */
12122                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
12123                         const struct intel_plane_state *plane_state =
12124                                 to_intel_plane_state(plane->base.state);
12125
12126                         if (plane_state->uapi.visible &&
12127                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12128                                 intel_plane_disable_noatomic(crtc, plane);
12129                 }
12130
12131                 /*
12132                  * Disable any background color set by the BIOS, but enable the
12133                  * gamma and CSC to match how we program our planes.
12134                  */
12135                 if (DISPLAY_VER(dev_priv) >= 9)
12136                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12137                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12138         }
12139
12140         /* Adjust the state of the output pipe according to whether we
12141          * have active connectors/encoders. */
12142         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12143             !crtc_state->bigjoiner_slave)
12144                 intel_crtc_disable_noatomic(crtc, ctx);
12145
12146         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12147                 /*
12148                  * We start out with underrun reporting disabled to avoid races.
12149                  * For correct bookkeeping mark this on active crtcs.
12150                  *
12151                  * Also on gmch platforms we dont have any hardware bits to
12152                  * disable the underrun reporting. Which means we need to start
12153                  * out with underrun reporting disabled also on inactive pipes,
12154                  * since otherwise we'll complain about the garbage we read when
12155                  * e.g. coming up after runtime pm.
12156                  *
12157                  * No protection against concurrent access is required - at
12158                  * worst a fifo underrun happens which also sets this to false.
12159                  */
12160                 crtc->cpu_fifo_underrun_disabled = true;
12161                 /*
12162                  * We track the PCH trancoder underrun reporting state
12163                  * within the crtc. With crtc for pipe A housing the underrun
12164                  * reporting state for PCH transcoder A, crtc for pipe B housing
12165                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12166                  * and marking underrun reporting as disabled for the non-existing
12167                  * PCH transcoders B and C would prevent enabling the south
12168                  * error interrupt (see cpt_can_enable_serr_int()).
12169                  */
12170                 if (has_pch_trancoder(dev_priv, crtc->pipe))
12171                         crtc->pch_fifo_underrun_disabled = true;
12172         }
12173 }
12174
12175 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12176 {
12177         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12178
12179         /*
12180          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12181          * the hardware when a high res displays plugged in. DPLL P
12182          * divider is zero, and the pipe timings are bonkers. We'll
12183          * try to disable everything in that case.
12184          *
12185          * FIXME would be nice to be able to sanitize this state
12186          * without several WARNs, but for now let's take the easy
12187          * road.
12188          */
12189         return IS_SANDYBRIDGE(dev_priv) &&
12190                 crtc_state->hw.active &&
12191                 crtc_state->shared_dpll &&
12192                 crtc_state->port_clock == 0;
12193 }
12194
12195 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12196 {
12197         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12198         struct intel_connector *connector;
12199         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12200         struct intel_crtc_state *crtc_state = crtc ?
12201                 to_intel_crtc_state(crtc->base.state) : NULL;
12202
12203         /* We need to check both for a crtc link (meaning that the
12204          * encoder is active and trying to read from a pipe) and the
12205          * pipe itself being active. */
12206         bool has_active_crtc = crtc_state &&
12207                 crtc_state->hw.active;
12208
12209         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12210                 drm_dbg_kms(&dev_priv->drm,
12211                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12212                             pipe_name(crtc->pipe));
12213                 has_active_crtc = false;
12214         }
12215
12216         connector = intel_encoder_find_connector(encoder);
12217         if (connector && !has_active_crtc) {
12218                 drm_dbg_kms(&dev_priv->drm,
12219                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12220                             encoder->base.base.id,
12221                             encoder->base.name);
12222
12223                 /* Connector is active, but has no active pipe. This is
12224                  * fallout from our resume register restoring. Disable
12225                  * the encoder manually again. */
12226                 if (crtc_state) {
12227                         struct drm_encoder *best_encoder;
12228
12229                         drm_dbg_kms(&dev_priv->drm,
12230                                     "[ENCODER:%d:%s] manually disabled\n",
12231                                     encoder->base.base.id,
12232                                     encoder->base.name);
12233
12234                         /* avoid oopsing in case the hooks consult best_encoder */
12235                         best_encoder = connector->base.state->best_encoder;
12236                         connector->base.state->best_encoder = &encoder->base;
12237
12238                         /* FIXME NULL atomic state passed! */
12239                         if (encoder->disable)
12240                                 encoder->disable(NULL, encoder, crtc_state,
12241                                                  connector->base.state);
12242                         if (encoder->post_disable)
12243                                 encoder->post_disable(NULL, encoder, crtc_state,
12244                                                       connector->base.state);
12245
12246                         connector->base.state->best_encoder = best_encoder;
12247                 }
12248                 encoder->base.crtc = NULL;
12249
12250                 /* Inconsistent output/port/pipe state happens presumably due to
12251                  * a bug in one of the get_hw_state functions. Or someplace else
12252                  * in our code, like the register restore mess on resume. Clamp
12253                  * things to off as a safer default. */
12254
12255                 connector->base.dpms = DRM_MODE_DPMS_OFF;
12256                 connector->base.encoder = NULL;
12257         }
12258
12259         /* notify opregion of the sanitized encoder state */
12260         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12261
12262         if (HAS_DDI(dev_priv))
12263                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
12264 }
12265
12266 /* FIXME read out full plane state for all planes */
12267 static void readout_plane_state(struct drm_i915_private *dev_priv)
12268 {
12269         struct intel_plane *plane;
12270         struct intel_crtc *crtc;
12271
12272         for_each_intel_plane(&dev_priv->drm, plane) {
12273                 struct intel_plane_state *plane_state =
12274                         to_intel_plane_state(plane->base.state);
12275                 struct intel_crtc_state *crtc_state;
12276                 enum pipe pipe = PIPE_A;
12277                 bool visible;
12278
12279                 visible = plane->get_hw_state(plane, &pipe);
12280
12281                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12282                 crtc_state = to_intel_crtc_state(crtc->base.state);
12283
12284                 intel_set_plane_visible(crtc_state, plane_state, visible);
12285
12286                 drm_dbg_kms(&dev_priv->drm,
12287                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12288                             plane->base.base.id, plane->base.name,
12289                             enableddisabled(visible), pipe_name(pipe));
12290         }
12291
12292         for_each_intel_crtc(&dev_priv->drm, crtc) {
12293                 struct intel_crtc_state *crtc_state =
12294                         to_intel_crtc_state(crtc->base.state);
12295
12296                 fixup_plane_bitmasks(crtc_state);
12297         }
12298 }
12299
12300 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12301 {
12302         struct drm_i915_private *dev_priv = to_i915(dev);
12303         struct intel_cdclk_state *cdclk_state =
12304                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12305         struct intel_dbuf_state *dbuf_state =
12306                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12307         enum pipe pipe;
12308         struct intel_crtc *crtc;
12309         struct intel_encoder *encoder;
12310         struct intel_connector *connector;
12311         struct drm_connector_list_iter conn_iter;
12312         u8 active_pipes = 0;
12313
12314         for_each_intel_crtc(dev, crtc) {
12315                 struct intel_crtc_state *crtc_state =
12316                         to_intel_crtc_state(crtc->base.state);
12317
12318                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12319                 intel_crtc_free_hw_state(crtc_state);
12320                 intel_crtc_state_reset(crtc_state, crtc);
12321
12322                 intel_crtc_get_pipe_config(crtc_state);
12323
12324                 crtc_state->hw.enable = crtc_state->hw.active;
12325
12326                 crtc->base.enabled = crtc_state->hw.enable;
12327                 crtc->active = crtc_state->hw.active;
12328
12329                 if (crtc_state->hw.active)
12330                         active_pipes |= BIT(crtc->pipe);
12331
12332                 drm_dbg_kms(&dev_priv->drm,
12333                             "[CRTC:%d:%s] hw state readout: %s\n",
12334                             crtc->base.base.id, crtc->base.name,
12335                             enableddisabled(crtc_state->hw.active));
12336         }
12337
12338         dev_priv->active_pipes = cdclk_state->active_pipes =
12339                 dbuf_state->active_pipes = active_pipes;
12340
12341         readout_plane_state(dev_priv);
12342
12343         for_each_intel_encoder(dev, encoder) {
12344                 pipe = 0;
12345
12346                 if (encoder->get_hw_state(encoder, &pipe)) {
12347                         struct intel_crtc_state *crtc_state;
12348
12349                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12350                         crtc_state = to_intel_crtc_state(crtc->base.state);
12351
12352                         encoder->base.crtc = &crtc->base;
12353                         intel_encoder_get_config(encoder, crtc_state);
12354                         if (encoder->sync_state)
12355                                 encoder->sync_state(encoder, crtc_state);
12356
12357                         /* read out to slave crtc as well for bigjoiner */
12358                         if (crtc_state->bigjoiner) {
12359                                 /* encoder should read be linked to bigjoiner master */
12360                                 WARN_ON(crtc_state->bigjoiner_slave);
12361
12362                                 crtc = crtc_state->bigjoiner_linked_crtc;
12363                                 crtc_state = to_intel_crtc_state(crtc->base.state);
12364                                 intel_encoder_get_config(encoder, crtc_state);
12365                         }
12366                 } else {
12367                         encoder->base.crtc = NULL;
12368                 }
12369
12370                 drm_dbg_kms(&dev_priv->drm,
12371                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12372                             encoder->base.base.id, encoder->base.name,
12373                             enableddisabled(encoder->base.crtc),
12374                             pipe_name(pipe));
12375         }
12376
12377         intel_dpll_readout_hw_state(dev_priv);
12378
12379         drm_connector_list_iter_begin(dev, &conn_iter);
12380         for_each_intel_connector_iter(connector, &conn_iter) {
12381                 if (connector->get_hw_state(connector)) {
12382                         struct intel_crtc_state *crtc_state;
12383                         struct intel_crtc *crtc;
12384
12385                         connector->base.dpms = DRM_MODE_DPMS_ON;
12386
12387                         encoder = intel_attached_encoder(connector);
12388                         connector->base.encoder = &encoder->base;
12389
12390                         crtc = to_intel_crtc(encoder->base.crtc);
12391                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12392
12393                         if (crtc_state && crtc_state->hw.active) {
12394                                 /*
12395                                  * This has to be done during hardware readout
12396                                  * because anything calling .crtc_disable may
12397                                  * rely on the connector_mask being accurate.
12398                                  */
12399                                 crtc_state->uapi.connector_mask |=
12400                                         drm_connector_mask(&connector->base);
12401                                 crtc_state->uapi.encoder_mask |=
12402                                         drm_encoder_mask(&encoder->base);
12403                         }
12404                 } else {
12405                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12406                         connector->base.encoder = NULL;
12407                 }
12408                 drm_dbg_kms(&dev_priv->drm,
12409                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
12410                             connector->base.base.id, connector->base.name,
12411                             enableddisabled(connector->base.encoder));
12412         }
12413         drm_connector_list_iter_end(&conn_iter);
12414
12415         for_each_intel_crtc(dev, crtc) {
12416                 struct intel_bw_state *bw_state =
12417                         to_intel_bw_state(dev_priv->bw_obj.state);
12418                 struct intel_crtc_state *crtc_state =
12419                         to_intel_crtc_state(crtc->base.state);
12420                 struct intel_plane *plane;
12421                 int min_cdclk = 0;
12422
12423                 if (crtc_state->bigjoiner_slave)
12424                         continue;
12425
12426                 if (crtc_state->hw.active) {
12427                         /*
12428                          * The initial mode needs to be set in order to keep
12429                          * the atomic core happy. It wants a valid mode if the
12430                          * crtc's enabled, so we do the above call.
12431                          *
12432                          * But we don't set all the derived state fully, hence
12433                          * set a flag to indicate that a full recalculation is
12434                          * needed on the next commit.
12435                          */
12436                         crtc_state->inherited = true;
12437
12438                         intel_crtc_update_active_timings(crtc_state);
12439
12440                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
12441                 }
12442
12443                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12444                         const struct intel_plane_state *plane_state =
12445                                 to_intel_plane_state(plane->base.state);
12446
12447                         /*
12448                          * FIXME don't have the fb yet, so can't
12449                          * use intel_plane_data_rate() :(
12450                          */
12451                         if (plane_state->uapi.visible)
12452                                 crtc_state->data_rate[plane->id] =
12453                                         4 * crtc_state->pixel_rate;
12454                         /*
12455                          * FIXME don't have the fb yet, so can't
12456                          * use plane->min_cdclk() :(
12457                          */
12458                         if (plane_state->uapi.visible && plane->min_cdclk) {
12459                                 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12460                                         crtc_state->min_cdclk[plane->id] =
12461                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12462                                 else
12463                                         crtc_state->min_cdclk[plane->id] =
12464                                                 crtc_state->pixel_rate;
12465                         }
12466                         drm_dbg_kms(&dev_priv->drm,
12467                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
12468                                     plane->base.base.id, plane->base.name,
12469                                     crtc_state->min_cdclk[plane->id]);
12470                 }
12471
12472                 if (crtc_state->hw.active) {
12473                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12474                         if (drm_WARN_ON(dev, min_cdclk < 0))
12475                                 min_cdclk = 0;
12476                 }
12477
12478                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12479                 cdclk_state->min_voltage_level[crtc->pipe] =
12480                         crtc_state->min_voltage_level;
12481
12482                 intel_bw_crtc_update(bw_state, crtc_state);
12483
12484                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
12485
12486                 /* discard our incomplete slave state, copy it from master */
12487                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
12488                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12489                         struct intel_crtc_state *slave_crtc_state =
12490                                 to_intel_crtc_state(slave->base.state);
12491
12492                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12493                         slave->base.mode = crtc->base.mode;
12494
12495                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12496                         cdclk_state->min_voltage_level[slave->pipe] =
12497                                 crtc_state->min_voltage_level;
12498
12499                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12500                                 const struct intel_plane_state *plane_state =
12501                                         to_intel_plane_state(plane->base.state);
12502
12503                                 /*
12504                                  * FIXME don't have the fb yet, so can't
12505                                  * use intel_plane_data_rate() :(
12506                                  */
12507                                 if (plane_state->uapi.visible)
12508                                         crtc_state->data_rate[plane->id] =
12509                                                 4 * crtc_state->pixel_rate;
12510                                 else
12511                                         crtc_state->data_rate[plane->id] = 0;
12512                         }
12513
12514                         intel_bw_crtc_update(bw_state, slave_crtc_state);
12515                         drm_calc_timestamping_constants(&slave->base,
12516                                                         &slave_crtc_state->hw.adjusted_mode);
12517                 }
12518         }
12519 }
12520
12521 static void
12522 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12523 {
12524         struct intel_encoder *encoder;
12525
12526         for_each_intel_encoder(&dev_priv->drm, encoder) {
12527                 struct intel_crtc_state *crtc_state;
12528
12529                 if (!encoder->get_power_domains)
12530                         continue;
12531
12532                 /*
12533                  * MST-primary and inactive encoders don't have a crtc state
12534                  * and neither of these require any power domain references.
12535                  */
12536                 if (!encoder->base.crtc)
12537                         continue;
12538
12539                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12540                 encoder->get_power_domains(encoder, crtc_state);
12541         }
12542 }
12543
12544 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12545 {
12546         /*
12547          * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
12548          * Also known as Wa_14010480278.
12549          */
12550         if (IS_DISPLAY_VER(dev_priv, 10, 12))
12551                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12552                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12553
12554         if (IS_HASWELL(dev_priv)) {
12555                 /*
12556                  * WaRsPkgCStateDisplayPMReq:hsw
12557                  * System hang if this isn't done before disabling all planes!
12558                  */
12559                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
12560                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12561         }
12562
12563         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12564                 /* Display WA #1142:kbl,cfl,cml */
12565                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12566                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12567                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12568                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12569                              KBL_ARB_FILL_SPARE_14);
12570         }
12571 }
12572
12573 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12574                                        enum port port, i915_reg_t hdmi_reg)
12575 {
12576         u32 val = intel_de_read(dev_priv, hdmi_reg);
12577
12578         if (val & SDVO_ENABLE ||
12579             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12580                 return;
12581
12582         drm_dbg_kms(&dev_priv->drm,
12583                     "Sanitizing transcoder select for HDMI %c\n",
12584                     port_name(port));
12585
12586         val &= ~SDVO_PIPE_SEL_MASK;
12587         val |= SDVO_PIPE_SEL(PIPE_A);
12588
12589         intel_de_write(dev_priv, hdmi_reg, val);
12590 }
12591
12592 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12593                                      enum port port, i915_reg_t dp_reg)
12594 {
12595         u32 val = intel_de_read(dev_priv, dp_reg);
12596
12597         if (val & DP_PORT_EN ||
12598             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12599                 return;
12600
12601         drm_dbg_kms(&dev_priv->drm,
12602                     "Sanitizing transcoder select for DP %c\n",
12603                     port_name(port));
12604
12605         val &= ~DP_PIPE_SEL_MASK;
12606         val |= DP_PIPE_SEL(PIPE_A);
12607
12608         intel_de_write(dev_priv, dp_reg, val);
12609 }
12610
12611 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12612 {
12613         /*
12614          * The BIOS may select transcoder B on some of the PCH
12615          * ports even it doesn't enable the port. This would trip
12616          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12617          * Sanitize the transcoder select bits to prevent that. We
12618          * assume that the BIOS never actually enabled the port,
12619          * because if it did we'd actually have to toggle the port
12620          * on and back off to make the transcoder A select stick
12621          * (see. intel_dp_link_down(), intel_disable_hdmi(),
12622          * intel_disable_sdvo()).
12623          */
12624         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12625         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12626         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12627
12628         /* PCH SDVOB multiplex with HDMIB */
12629         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12630         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12631         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12632 }
12633
12634 /* Scan out the current hw modeset state,
12635  * and sanitizes it to the current state
12636  */
12637 static void
12638 intel_modeset_setup_hw_state(struct drm_device *dev,
12639                              struct drm_modeset_acquire_ctx *ctx)
12640 {
12641         struct drm_i915_private *dev_priv = to_i915(dev);
12642         struct intel_encoder *encoder;
12643         struct intel_crtc *crtc;
12644         intel_wakeref_t wakeref;
12645
12646         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12647
12648         intel_early_display_was(dev_priv);
12649         intel_modeset_readout_hw_state(dev);
12650
12651         /* HW state is read out, now we need to sanitize this mess. */
12652
12653         /* Sanitize the TypeC port mode upfront, encoders depend on this */
12654         for_each_intel_encoder(dev, encoder) {
12655                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
12656
12657                 /* We need to sanitize only the MST primary port. */
12658                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
12659                     intel_phy_is_tc(dev_priv, phy))
12660                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
12661         }
12662
12663         get_encoder_power_domains(dev_priv);
12664
12665         if (HAS_PCH_IBX(dev_priv))
12666                 ibx_sanitize_pch_ports(dev_priv);
12667
12668         /*
12669          * intel_sanitize_plane_mapping() may need to do vblank
12670          * waits, so we need vblank interrupts restored beforehand.
12671          */
12672         for_each_intel_crtc(&dev_priv->drm, crtc) {
12673                 struct intel_crtc_state *crtc_state =
12674                         to_intel_crtc_state(crtc->base.state);
12675
12676                 drm_crtc_vblank_reset(&crtc->base);
12677
12678                 if (crtc_state->hw.active)
12679                         intel_crtc_vblank_on(crtc_state);
12680         }
12681
12682         intel_sanitize_plane_mapping(dev_priv);
12683
12684         for_each_intel_encoder(dev, encoder)
12685                 intel_sanitize_encoder(encoder);
12686
12687         for_each_intel_crtc(&dev_priv->drm, crtc) {
12688                 struct intel_crtc_state *crtc_state =
12689                         to_intel_crtc_state(crtc->base.state);
12690
12691                 intel_sanitize_crtc(crtc, ctx);
12692                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12693         }
12694
12695         intel_modeset_update_connector_atomic_state(dev);
12696
12697         intel_dpll_sanitize_state(dev_priv);
12698
12699         if (IS_G4X(dev_priv)) {
12700                 g4x_wm_get_hw_state(dev_priv);
12701                 g4x_wm_sanitize(dev_priv);
12702         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12703                 vlv_wm_get_hw_state(dev_priv);
12704                 vlv_wm_sanitize(dev_priv);
12705         } else if (DISPLAY_VER(dev_priv) >= 9) {
12706                 skl_wm_get_hw_state(dev_priv);
12707         } else if (HAS_PCH_SPLIT(dev_priv)) {
12708                 ilk_wm_get_hw_state(dev_priv);
12709         }
12710
12711         for_each_intel_crtc(dev, crtc) {
12712                 struct intel_crtc_state *crtc_state =
12713                         to_intel_crtc_state(crtc->base.state);
12714                 u64 put_domains;
12715
12716                 put_domains = modeset_get_crtc_power_domains(crtc_state);
12717                 if (drm_WARN_ON(dev, put_domains))
12718                         modeset_put_crtc_power_domains(crtc, put_domains);
12719         }
12720
12721         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12722 }
12723
12724 void intel_display_resume(struct drm_device *dev)
12725 {
12726         struct drm_i915_private *dev_priv = to_i915(dev);
12727         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12728         struct drm_modeset_acquire_ctx ctx;
12729         int ret;
12730
12731         if (!HAS_DISPLAY(dev_priv))
12732                 return;
12733
12734         dev_priv->modeset_restore_state = NULL;
12735         if (state)
12736                 state->acquire_ctx = &ctx;
12737
12738         drm_modeset_acquire_init(&ctx, 0);
12739
12740         while (1) {
12741                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
12742                 if (ret != -EDEADLK)
12743                         break;
12744
12745                 drm_modeset_backoff(&ctx);
12746         }
12747
12748         if (!ret)
12749                 ret = __intel_display_resume(dev, state, &ctx);
12750
12751         intel_enable_ipc(dev_priv);
12752         drm_modeset_drop_locks(&ctx);
12753         drm_modeset_acquire_fini(&ctx);
12754
12755         if (ret)
12756                 drm_err(&dev_priv->drm,
12757                         "Restoring old state failed with %i\n", ret);
12758         if (state)
12759                 drm_atomic_state_put(state);
12760 }
12761
12762 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12763 {
12764         struct intel_connector *connector;
12765         struct drm_connector_list_iter conn_iter;
12766
12767         /* Kill all the work that may have been queued by hpd. */
12768         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12769         for_each_intel_connector_iter(connector, &conn_iter) {
12770                 if (connector->modeset_retry_work.func)
12771                         cancel_work_sync(&connector->modeset_retry_work);
12772                 if (connector->hdcp.shim) {
12773                         cancel_delayed_work_sync(&connector->hdcp.check_work);
12774                         cancel_work_sync(&connector->hdcp.prop_work);
12775                 }
12776         }
12777         drm_connector_list_iter_end(&conn_iter);
12778 }
12779
12780 /* part #1: call before irq uninstall */
12781 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12782 {
12783         if (!HAS_DISPLAY(i915))
12784                 return;
12785
12786         flush_workqueue(i915->flip_wq);
12787         flush_workqueue(i915->modeset_wq);
12788
12789         flush_work(&i915->atomic_helper.free_work);
12790         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12791 }
12792
12793 /* part #2: call after irq uninstall */
12794 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12795 {
12796         if (!HAS_DISPLAY(i915))
12797                 return;
12798
12799         /*
12800          * Due to the hpd irq storm handling the hotplug work can re-arm the
12801          * poll handlers. Hence disable polling after hpd handling is shut down.
12802          */
12803         intel_hpd_poll_fini(i915);
12804
12805         /*
12806          * MST topology needs to be suspended so we don't have any calls to
12807          * fbdev after it's finalized. MST will be destroyed later as part of
12808          * drm_mode_config_cleanup()
12809          */
12810         intel_dp_mst_suspend(i915);
12811
12812         /* poll work can call into fbdev, hence clean that up afterwards */
12813         intel_fbdev_fini(i915);
12814
12815         intel_unregister_dsm_handler();
12816
12817         intel_fbc_global_disable(i915);
12818
12819         /* flush any delayed tasks or pending work */
12820         flush_scheduled_work();
12821
12822         intel_hdcp_component_fini(i915);
12823
12824         intel_mode_config_cleanup(i915);
12825
12826         intel_overlay_cleanup(i915);
12827
12828         intel_gmbus_teardown(i915);
12829
12830         destroy_workqueue(i915->flip_wq);
12831         destroy_workqueue(i915->modeset_wq);
12832
12833         intel_fbc_cleanup_cfb(i915);
12834 }
12835
12836 /* part #3: call after gem init */
12837 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12838 {
12839         intel_dmc_ucode_fini(i915);
12840
12841         intel_power_domains_driver_remove(i915);
12842
12843         intel_vga_unregister(i915);
12844
12845         intel_bios_driver_remove(i915);
12846 }
12847
12848 void intel_display_driver_register(struct drm_i915_private *i915)
12849 {
12850         if (!HAS_DISPLAY(i915))
12851                 return;
12852
12853         intel_display_debugfs_register(i915);
12854
12855         /* Must be done after probing outputs */
12856         intel_opregion_register(i915);
12857         acpi_video_register();
12858
12859         intel_audio_init(i915);
12860
12861         /*
12862          * Some ports require correctly set-up hpd registers for
12863          * detection to work properly (leading to ghost connected
12864          * connector status), e.g. VGA on gm45.  Hence we can only set
12865          * up the initial fbdev config after hpd irqs are fully
12866          * enabled. We do it last so that the async config cannot run
12867          * before the connectors are registered.
12868          */
12869         intel_fbdev_initial_config_async(&i915->drm);
12870
12871         /*
12872          * We need to coordinate the hotplugs with the asynchronous
12873          * fbdev configuration, for which we use the
12874          * fbdev->async_cookie.
12875          */
12876         drm_kms_helper_poll_init(&i915->drm);
12877 }
12878
12879 void intel_display_driver_unregister(struct drm_i915_private *i915)
12880 {
12881         if (!HAS_DISPLAY(i915))
12882                 return;
12883
12884         intel_fbdev_unregister(i915);
12885         intel_audio_deinit(i915);
12886
12887         /*
12888          * After flushing the fbdev (incl. a late async config which
12889          * will have delayed queuing of a hotplug event), then flush
12890          * the hotplug events.
12891          */
12892         drm_kms_helper_poll_fini(&i915->drm);
12893         drm_atomic_helper_shutdown(&i915->drm);
12894
12895         acpi_video_unregister();
12896         intel_opregion_unregister(i915);
12897 }