05e42903bde0dd99c762bf59764c748bf7bb27ee
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_drrs.h"
56 #include "display/intel_dsi.h"
57 #include "display/intel_dvo.h"
58 #include "display/intel_fb.h"
59 #include "display/intel_gmbus.h"
60 #include "display/intel_hdmi.h"
61 #include "display/intel_lvds.h"
62 #include "display/intel_sdvo.h"
63 #include "display/intel_snps_phy.h"
64 #include "display/intel_tv.h"
65 #include "display/intel_vdsc.h"
66 #include "display/intel_vrr.h"
67
68 #include "gem/i915_gem_lmem.h"
69 #include "gem/i915_gem_object.h"
70
71 #include "gt/intel_rps.h"
72 #include "gt/gen8_ppgtt.h"
73
74 #include "g4x_dp.h"
75 #include "g4x_hdmi.h"
76 #include "i915_drv.h"
77 #include "intel_acpi.h"
78 #include "intel_atomic.h"
79 #include "intel_atomic_plane.h"
80 #include "intel_bw.h"
81 #include "intel_cdclk.h"
82 #include "intel_color.h"
83 #include "intel_crtc.h"
84 #include "intel_de.h"
85 #include "intel_display_types.h"
86 #include "intel_dmc.h"
87 #include "intel_dp_link_training.h"
88 #include "intel_dpt.h"
89 #include "intel_fbc.h"
90 #include "intel_fdi.h"
91 #include "intel_fbdev.h"
92 #include "intel_fifo_underrun.h"
93 #include "intel_frontbuffer.h"
94 #include "intel_hdcp.h"
95 #include "intel_hotplug.h"
96 #include "intel_overlay.h"
97 #include "intel_panel.h"
98 #include "intel_pipe_crc.h"
99 #include "intel_pm.h"
100 #include "intel_pps.h"
101 #include "intel_psr.h"
102 #include "intel_quirks.h"
103 #include "intel_sideband.h"
104 #include "intel_sprite.h"
105 #include "intel_tc.h"
106 #include "intel_vga.h"
107 #include "i9xx_plane.h"
108 #include "skl_scaler.h"
109 #include "skl_universal_plane.h"
110
111 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
112                                 struct intel_crtc_state *pipe_config);
113 static void ilk_pch_clock_get(struct intel_crtc *crtc,
114                               struct intel_crtc_state *pipe_config);
115
116 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
117 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
118 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
119                                          const struct intel_link_m_n *m_n,
120                                          const struct intel_link_m_n *m2_n2);
121 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
124 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
125 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
126 static void intel_modeset_setup_hw_state(struct drm_device *dev,
127                                          struct drm_modeset_acquire_ctx *ctx);
128
129 /**
130  * intel_update_watermarks - update FIFO watermark values based on current modes
131  * @dev_priv: i915 device
132  *
133  * Calculate watermark values for the various WM regs based on current mode
134  * and plane configuration.
135  *
136  * There are several cases to deal with here:
137  *   - normal (i.e. non-self-refresh)
138  *   - self-refresh (SR) mode
139  *   - lines are large relative to FIFO size (buffer can hold up to 2)
140  *   - lines are small relative to FIFO size (buffer can hold more than 2
141  *     lines), so need to account for TLB latency
142  *
143  *   The normal calculation is:
144  *     watermark = dotclock * bytes per pixel * latency
145  *   where latency is platform & configuration dependent (we assume pessimal
146  *   values here).
147  *
148  *   The SR calculation is:
149  *     watermark = (trunc(latency/line time)+1) * surface width *
150  *       bytes per pixel
151  *   where
152  *     line time = htotal / dotclock
153  *     surface width = hdisplay for normal plane and 64 for cursor
154  *   and latency is assumed to be high, as above.
155  *
156  * The final value programmed to the register should always be rounded up,
157  * and include an extra 2 entries to account for clock crossings.
158  *
159  * We don't use the sprite, so we can ignore that.  And on Crestline we have
160  * to set the non-SR watermarks to 8.
161  */
162 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
163 {
164         if (dev_priv->wm_disp->update_wm)
165                 dev_priv->wm_disp->update_wm(dev_priv);
166 }
167
168 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
169                                  struct intel_crtc *crtc)
170 {
171         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
172         if (dev_priv->wm_disp->compute_pipe_wm)
173                 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
174         return 0;
175 }
176
177 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
178                                          struct intel_crtc *crtc)
179 {
180         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
181         if (!dev_priv->wm_disp->compute_intermediate_wm)
182                 return 0;
183         if (drm_WARN_ON(&dev_priv->drm,
184                         !dev_priv->wm_disp->compute_pipe_wm))
185                 return 0;
186         return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
187 }
188
189 static bool intel_initial_watermarks(struct intel_atomic_state *state,
190                                      struct intel_crtc *crtc)
191 {
192         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
193         if (dev_priv->wm_disp->initial_watermarks) {
194                 dev_priv->wm_disp->initial_watermarks(state, crtc);
195                 return true;
196         }
197         return false;
198 }
199
200 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
201                                            struct intel_crtc *crtc)
202 {
203         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
204         if (dev_priv->wm_disp->atomic_update_watermarks)
205                 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
206 }
207
208 static void intel_optimize_watermarks(struct intel_atomic_state *state,
209                                       struct intel_crtc *crtc)
210 {
211         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
212         if (dev_priv->wm_disp->optimize_watermarks)
213                 dev_priv->wm_disp->optimize_watermarks(state, crtc);
214 }
215
216 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
217 {
218         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
219         if (dev_priv->wm_disp->compute_global_watermarks)
220                 return dev_priv->wm_disp->compute_global_watermarks(state);
221         return 0;
222 }
223
224 /* returns HPLL frequency in kHz */
225 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
226 {
227         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
228
229         /* Obtain SKU information */
230         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
231                 CCK_FUSE_HPLL_FREQ_MASK;
232
233         return vco_freq[hpll_freq] * 1000;
234 }
235
236 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
237                       const char *name, u32 reg, int ref_freq)
238 {
239         u32 val;
240         int divider;
241
242         val = vlv_cck_read(dev_priv, reg);
243         divider = val & CCK_FREQUENCY_VALUES;
244
245         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
246                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
247                  "%s change in progress\n", name);
248
249         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
250 }
251
252 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
253                            const char *name, u32 reg)
254 {
255         int hpll;
256
257         vlv_cck_get(dev_priv);
258
259         if (dev_priv->hpll_freq == 0)
260                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
261
262         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
263
264         vlv_cck_put(dev_priv);
265
266         return hpll;
267 }
268
269 static void intel_update_czclk(struct drm_i915_private *dev_priv)
270 {
271         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
272                 return;
273
274         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
275                                                       CCK_CZ_CLOCK_CONTROL);
276
277         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
278                 dev_priv->czclk_freq);
279 }
280
281 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
282 {
283         return (crtc_state->active_planes &
284                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
285 }
286
287 /* WA Display #0827: Gen9:all */
288 static void
289 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
290 {
291         if (enable)
292                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
293                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
294         else
295                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
296                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
297 }
298
299 /* Wa_2006604312:icl,ehl */
300 static void
301 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
302                        bool enable)
303 {
304         if (enable)
305                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
306                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
307         else
308                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
309                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
310 }
311
312 /* Wa_1604331009:icl,jsl,ehl */
313 static void
314 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
315                        bool enable)
316 {
317         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
318                      enable ? CURSOR_GATING_DIS : 0);
319 }
320
321 static bool
322 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
323 {
324         return crtc_state->master_transcoder != INVALID_TRANSCODER;
325 }
326
327 static bool
328 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
329 {
330         return crtc_state->sync_mode_slaves_mask != 0;
331 }
332
333 bool
334 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
335 {
336         return is_trans_port_sync_master(crtc_state) ||
337                 is_trans_port_sync_slave(crtc_state);
338 }
339
340 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
341                                     enum pipe pipe)
342 {
343         i915_reg_t reg = PIPEDSL(pipe);
344         u32 line1, line2;
345         u32 line_mask;
346
347         if (DISPLAY_VER(dev_priv) == 2)
348                 line_mask = DSL_LINEMASK_GEN2;
349         else
350                 line_mask = DSL_LINEMASK_GEN3;
351
352         line1 = intel_de_read(dev_priv, reg) & line_mask;
353         msleep(5);
354         line2 = intel_de_read(dev_priv, reg) & line_mask;
355
356         return line1 != line2;
357 }
358
359 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
360 {
361         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
362         enum pipe pipe = crtc->pipe;
363
364         /* Wait for the display line to settle/start moving */
365         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
366                 drm_err(&dev_priv->drm,
367                         "pipe %c scanline %s wait timed out\n",
368                         pipe_name(pipe), onoff(state));
369 }
370
371 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
372 {
373         wait_for_pipe_scanline_moving(crtc, false);
374 }
375
376 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
377 {
378         wait_for_pipe_scanline_moving(crtc, true);
379 }
380
381 static void
382 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
383 {
384         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
385         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
386
387         if (DISPLAY_VER(dev_priv) >= 4) {
388                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
389                 i915_reg_t reg = PIPECONF(cpu_transcoder);
390
391                 /* Wait for the Pipe State to go off */
392                 if (intel_de_wait_for_clear(dev_priv, reg,
393                                             I965_PIPECONF_ACTIVE, 100))
394                         drm_WARN(&dev_priv->drm, 1,
395                                  "pipe_off wait timed out\n");
396         } else {
397                 intel_wait_for_pipe_scanline_stopped(crtc);
398         }
399 }
400
401 /* Only for pre-ILK configs */
402 void assert_pll(struct drm_i915_private *dev_priv,
403                 enum pipe pipe, bool state)
404 {
405         u32 val;
406         bool cur_state;
407
408         val = intel_de_read(dev_priv, DPLL(pipe));
409         cur_state = !!(val & DPLL_VCO_ENABLE);
410         I915_STATE_WARN(cur_state != state,
411              "PLL state assertion failure (expected %s, current %s)\n",
412                         onoff(state), onoff(cur_state));
413 }
414
415 /* XXX: the dsi pll is shared between MIPI DSI ports */
416 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
417 {
418         u32 val;
419         bool cur_state;
420
421         vlv_cck_get(dev_priv);
422         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
423         vlv_cck_put(dev_priv);
424
425         cur_state = val & DSI_PLL_VCO_EN;
426         I915_STATE_WARN(cur_state != state,
427              "DSI PLL state assertion failure (expected %s, current %s)\n",
428                         onoff(state), onoff(cur_state));
429 }
430
431 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
432 {
433         i915_reg_t pp_reg;
434         u32 val;
435         enum pipe panel_pipe = INVALID_PIPE;
436         bool locked = true;
437
438         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
439                 return;
440
441         if (HAS_PCH_SPLIT(dev_priv)) {
442                 u32 port_sel;
443
444                 pp_reg = PP_CONTROL(0);
445                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
446
447                 switch (port_sel) {
448                 case PANEL_PORT_SELECT_LVDS:
449                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
450                         break;
451                 case PANEL_PORT_SELECT_DPA:
452                         g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
453                         break;
454                 case PANEL_PORT_SELECT_DPC:
455                         g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
456                         break;
457                 case PANEL_PORT_SELECT_DPD:
458                         g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
459                         break;
460                 default:
461                         MISSING_CASE(port_sel);
462                         break;
463                 }
464         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
465                 /* presumably write lock depends on pipe, not port select */
466                 pp_reg = PP_CONTROL(pipe);
467                 panel_pipe = pipe;
468         } else {
469                 u32 port_sel;
470
471                 pp_reg = PP_CONTROL(0);
472                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
473
474                 drm_WARN_ON(&dev_priv->drm,
475                             port_sel != PANEL_PORT_SELECT_LVDS);
476                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
477         }
478
479         val = intel_de_read(dev_priv, pp_reg);
480         if (!(val & PANEL_POWER_ON) ||
481             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
482                 locked = false;
483
484         I915_STATE_WARN(panel_pipe == pipe && locked,
485              "panel assertion failure, pipe %c regs locked\n",
486              pipe_name(pipe));
487 }
488
489 void assert_transcoder(struct drm_i915_private *dev_priv,
490                        enum transcoder cpu_transcoder, bool state)
491 {
492         bool cur_state;
493         enum intel_display_power_domain power_domain;
494         intel_wakeref_t wakeref;
495
496         /* we keep both pipes enabled on 830 */
497         if (IS_I830(dev_priv))
498                 state = true;
499
500         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
501         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
502         if (wakeref) {
503                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
504                 cur_state = !!(val & PIPECONF_ENABLE);
505
506                 intel_display_power_put(dev_priv, power_domain, wakeref);
507         } else {
508                 cur_state = false;
509         }
510
511         I915_STATE_WARN(cur_state != state,
512                         "transcoder %s assertion failure (expected %s, current %s)\n",
513                         transcoder_name(cpu_transcoder),
514                         onoff(state), onoff(cur_state));
515 }
516
517 static void assert_plane(struct intel_plane *plane, bool state)
518 {
519         enum pipe pipe;
520         bool cur_state;
521
522         cur_state = plane->get_hw_state(plane, &pipe);
523
524         I915_STATE_WARN(cur_state != state,
525                         "%s assertion failure (expected %s, current %s)\n",
526                         plane->base.name, onoff(state), onoff(cur_state));
527 }
528
529 #define assert_plane_enabled(p) assert_plane(p, true)
530 #define assert_plane_disabled(p) assert_plane(p, false)
531
532 static void assert_planes_disabled(struct intel_crtc *crtc)
533 {
534         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
535         struct intel_plane *plane;
536
537         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
538                 assert_plane_disabled(plane);
539 }
540
541 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
542                                     enum pipe pipe)
543 {
544         u32 val;
545         bool enabled;
546
547         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
548         enabled = !!(val & TRANS_ENABLE);
549         I915_STATE_WARN(enabled,
550              "transcoder assertion failed, should be off on pipe %c but is still active\n",
551              pipe_name(pipe));
552 }
553
554 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
555                                    enum pipe pipe, enum port port,
556                                    i915_reg_t dp_reg)
557 {
558         enum pipe port_pipe;
559         bool state;
560
561         state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
562
563         I915_STATE_WARN(state && port_pipe == pipe,
564                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
565                         port_name(port), pipe_name(pipe));
566
567         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
568                         "IBX PCH DP %c still using transcoder B\n",
569                         port_name(port));
570 }
571
572 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
573                                      enum pipe pipe, enum port port,
574                                      i915_reg_t hdmi_reg)
575 {
576         enum pipe port_pipe;
577         bool state;
578
579         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
580
581         I915_STATE_WARN(state && port_pipe == pipe,
582                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
583                         port_name(port), pipe_name(pipe));
584
585         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
586                         "IBX PCH HDMI %c still using transcoder B\n",
587                         port_name(port));
588 }
589
590 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
591                                       enum pipe pipe)
592 {
593         enum pipe port_pipe;
594
595         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
596         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
597         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
598
599         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
600                         port_pipe == pipe,
601                         "PCH VGA enabled on transcoder %c, should be disabled\n",
602                         pipe_name(pipe));
603
604         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
605                         port_pipe == pipe,
606                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
607                         pipe_name(pipe));
608
609         /* PCH SDVOB multiplex with HDMIB */
610         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
611         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
612         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
613 }
614
615 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
616                          struct intel_digital_port *dig_port,
617                          unsigned int expected_mask)
618 {
619         u32 port_mask;
620         i915_reg_t dpll_reg;
621
622         switch (dig_port->base.port) {
623         case PORT_B:
624                 port_mask = DPLL_PORTB_READY_MASK;
625                 dpll_reg = DPLL(0);
626                 break;
627         case PORT_C:
628                 port_mask = DPLL_PORTC_READY_MASK;
629                 dpll_reg = DPLL(0);
630                 expected_mask <<= 4;
631                 break;
632         case PORT_D:
633                 port_mask = DPLL_PORTD_READY_MASK;
634                 dpll_reg = DPIO_PHY_STATUS;
635                 break;
636         default:
637                 BUG();
638         }
639
640         if (intel_de_wait_for_register(dev_priv, dpll_reg,
641                                        port_mask, expected_mask, 1000))
642                 drm_WARN(&dev_priv->drm, 1,
643                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
644                          dig_port->base.base.base.id, dig_port->base.base.name,
645                          intel_de_read(dev_priv, dpll_reg) & port_mask,
646                          expected_mask);
647 }
648
649 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
650 {
651         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
652         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
653         enum pipe pipe = crtc->pipe;
654         i915_reg_t reg;
655         u32 val, pipeconf_val;
656
657         /* Make sure PCH DPLL is enabled */
658         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
659
660         /* FDI must be feeding us bits for PCH ports */
661         assert_fdi_tx_enabled(dev_priv, pipe);
662         assert_fdi_rx_enabled(dev_priv, pipe);
663
664         if (HAS_PCH_CPT(dev_priv)) {
665                 reg = TRANS_CHICKEN2(pipe);
666                 val = intel_de_read(dev_priv, reg);
667                 /*
668                  * Workaround: Set the timing override bit
669                  * before enabling the pch transcoder.
670                  */
671                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
672                 /* Configure frame start delay to match the CPU */
673                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
674                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
675                 intel_de_write(dev_priv, reg, val);
676         }
677
678         reg = PCH_TRANSCONF(pipe);
679         val = intel_de_read(dev_priv, reg);
680         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
681
682         if (HAS_PCH_IBX(dev_priv)) {
683                 /* Configure frame start delay to match the CPU */
684                 val &= ~TRANS_FRAME_START_DELAY_MASK;
685                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
686
687                 /*
688                  * Make the BPC in transcoder be consistent with
689                  * that in pipeconf reg. For HDMI we must use 8bpc
690                  * here for both 8bpc and 12bpc.
691                  */
692                 val &= ~PIPECONF_BPC_MASK;
693                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
694                         val |= PIPECONF_8BPC;
695                 else
696                         val |= pipeconf_val & PIPECONF_BPC_MASK;
697         }
698
699         val &= ~TRANS_INTERLACE_MASK;
700         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
701                 if (HAS_PCH_IBX(dev_priv) &&
702                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
703                         val |= TRANS_LEGACY_INTERLACED_ILK;
704                 else
705                         val |= TRANS_INTERLACED;
706         } else {
707                 val |= TRANS_PROGRESSIVE;
708         }
709
710         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
711         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
712                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
713                         pipe_name(pipe));
714 }
715
716 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
717                                       enum transcoder cpu_transcoder)
718 {
719         u32 val, pipeconf_val;
720
721         /* FDI must be feeding us bits for PCH ports */
722         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
723         assert_fdi_rx_enabled(dev_priv, PIPE_A);
724
725         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
726         /* Workaround: set timing override bit. */
727         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
728         /* Configure frame start delay to match the CPU */
729         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
730         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
731         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
732
733         val = TRANS_ENABLE;
734         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
735
736         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
737             PIPECONF_INTERLACED_ILK)
738                 val |= TRANS_INTERLACED;
739         else
740                 val |= TRANS_PROGRESSIVE;
741
742         intel_de_write(dev_priv, LPT_TRANSCONF, val);
743         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
744                                   TRANS_STATE_ENABLE, 100))
745                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
746 }
747
748 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
749                                        enum pipe pipe)
750 {
751         i915_reg_t reg;
752         u32 val;
753
754         /* FDI relies on the transcoder */
755         assert_fdi_tx_disabled(dev_priv, pipe);
756         assert_fdi_rx_disabled(dev_priv, pipe);
757
758         /* Ports must be off as well */
759         assert_pch_ports_disabled(dev_priv, pipe);
760
761         reg = PCH_TRANSCONF(pipe);
762         val = intel_de_read(dev_priv, reg);
763         val &= ~TRANS_ENABLE;
764         intel_de_write(dev_priv, reg, val);
765         /* wait for PCH transcoder off, transcoder state */
766         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
767                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
768                         pipe_name(pipe));
769
770         if (HAS_PCH_CPT(dev_priv)) {
771                 /* Workaround: Clear the timing override chicken bit again. */
772                 reg = TRANS_CHICKEN2(pipe);
773                 val = intel_de_read(dev_priv, reg);
774                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
775                 intel_de_write(dev_priv, reg, val);
776         }
777 }
778
779 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
780 {
781         u32 val;
782
783         val = intel_de_read(dev_priv, LPT_TRANSCONF);
784         val &= ~TRANS_ENABLE;
785         intel_de_write(dev_priv, LPT_TRANSCONF, val);
786         /* wait for PCH transcoder off, transcoder state */
787         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
788                                     TRANS_STATE_ENABLE, 50))
789                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
790
791         /* Workaround: clear timing override bit. */
792         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
793         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
794         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
795 }
796
797 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
798 {
799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
800
801         if (HAS_PCH_LPT(dev_priv))
802                 return PIPE_A;
803         else
804                 return crtc->pipe;
805 }
806
807 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
808 {
809         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
810         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
811         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
812         enum pipe pipe = crtc->pipe;
813         i915_reg_t reg;
814         u32 val;
815
816         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
817
818         assert_planes_disabled(crtc);
819
820         /*
821          * A pipe without a PLL won't actually be able to drive bits from
822          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
823          * need the check.
824          */
825         if (HAS_GMCH(dev_priv)) {
826                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
827                         assert_dsi_pll_enabled(dev_priv);
828                 else
829                         assert_pll_enabled(dev_priv, pipe);
830         } else {
831                 if (new_crtc_state->has_pch_encoder) {
832                         /* if driving the PCH, we need FDI enabled */
833                         assert_fdi_rx_pll_enabled(dev_priv,
834                                                   intel_crtc_pch_transcoder(crtc));
835                         assert_fdi_tx_pll_enabled(dev_priv,
836                                                   (enum pipe) cpu_transcoder);
837                 }
838                 /* FIXME: assert CPU port conditions for SNB+ */
839         }
840
841         /* Wa_22012358565:adl-p */
842         if (DISPLAY_VER(dev_priv) == 13)
843                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
844                              0, PIPE_ARB_USE_PROG_SLOTS);
845
846         reg = PIPECONF(cpu_transcoder);
847         val = intel_de_read(dev_priv, reg);
848         if (val & PIPECONF_ENABLE) {
849                 /* we keep both pipes enabled on 830 */
850                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
851                 return;
852         }
853
854         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
855         intel_de_posting_read(dev_priv, reg);
856
857         /*
858          * Until the pipe starts PIPEDSL reads will return a stale value,
859          * which causes an apparent vblank timestamp jump when PIPEDSL
860          * resets to its proper value. That also messes up the frame count
861          * when it's derived from the timestamps. So let's wait for the
862          * pipe to start properly before we call drm_crtc_vblank_on()
863          */
864         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
865                 intel_wait_for_pipe_scanline_moving(crtc);
866 }
867
868 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
869 {
870         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
871         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
872         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
873         enum pipe pipe = crtc->pipe;
874         i915_reg_t reg;
875         u32 val;
876
877         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
878
879         /*
880          * Make sure planes won't keep trying to pump pixels to us,
881          * or we might hang the display.
882          */
883         assert_planes_disabled(crtc);
884
885         reg = PIPECONF(cpu_transcoder);
886         val = intel_de_read(dev_priv, reg);
887         if ((val & PIPECONF_ENABLE) == 0)
888                 return;
889
890         /*
891          * Double wide has implications for planes
892          * so best keep it disabled when not needed.
893          */
894         if (old_crtc_state->double_wide)
895                 val &= ~PIPECONF_DOUBLE_WIDE;
896
897         /* Don't disable pipe or pipe PLLs if needed */
898         if (!IS_I830(dev_priv))
899                 val &= ~PIPECONF_ENABLE;
900
901         if (DISPLAY_VER(dev_priv) >= 12)
902                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
903                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
904
905         intel_de_write(dev_priv, reg, val);
906         if ((val & PIPECONF_ENABLE) == 0)
907                 intel_wait_for_pipe_off(old_crtc_state);
908 }
909
910 bool
911 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
912                                     u64 modifier)
913 {
914         return info->is_yuv &&
915                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
916 }
917
918 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
919 {
920         unsigned int size = 0;
921         int i;
922
923         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
924                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
925
926         return size;
927 }
928
929 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
930 {
931         unsigned int size = 0;
932         int i;
933
934         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
935                 if (rem_info->plane_alignment)
936                         size = ALIGN(size, rem_info->plane_alignment);
937                 size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
938         }
939
940         return size;
941 }
942
943 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
944 {
945         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
946         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
947
948         return DISPLAY_VER(dev_priv) < 4 ||
949                 (plane->has_fbc &&
950                  plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
951 }
952
953 static struct i915_vma *
954 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
955                      const struct i915_ggtt_view *view,
956                      bool uses_fence,
957                      unsigned long *out_flags,
958                      struct i915_address_space *vm)
959 {
960         struct drm_device *dev = fb->dev;
961         struct drm_i915_private *dev_priv = to_i915(dev);
962         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
963         struct i915_vma *vma;
964         u32 alignment;
965         int ret;
966
967         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
968                 return ERR_PTR(-EINVAL);
969
970         alignment = 4096 * 512;
971
972         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
973
974         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
975         if (ret) {
976                 vma = ERR_PTR(ret);
977                 goto err;
978         }
979
980         vma = i915_vma_instance(obj, vm, view);
981         if (IS_ERR(vma))
982                 goto err;
983
984         if (i915_vma_misplaced(vma, 0, alignment, 0)) {
985                 ret = i915_vma_unbind(vma);
986                 if (ret) {
987                         vma = ERR_PTR(ret);
988                         goto err;
989                 }
990         }
991
992         ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
993         if (ret) {
994                 vma = ERR_PTR(ret);
995                 goto err;
996         }
997
998         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
999
1000         i915_gem_object_flush_if_display(obj);
1001
1002         i915_vma_get(vma);
1003 err:
1004         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1005
1006         return vma;
1007 }
1008
1009 struct i915_vma *
1010 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1011                            bool phys_cursor,
1012                            const struct i915_ggtt_view *view,
1013                            bool uses_fence,
1014                            unsigned long *out_flags)
1015 {
1016         struct drm_device *dev = fb->dev;
1017         struct drm_i915_private *dev_priv = to_i915(dev);
1018         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1019         intel_wakeref_t wakeref;
1020         struct i915_gem_ww_ctx ww;
1021         struct i915_vma *vma;
1022         unsigned int pinctl;
1023         u32 alignment;
1024         int ret;
1025
1026         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1027                 return ERR_PTR(-EINVAL);
1028
1029         if (phys_cursor)
1030                 alignment = intel_cursor_alignment(dev_priv);
1031         else
1032                 alignment = intel_surf_alignment(fb, 0);
1033         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1034                 return ERR_PTR(-EINVAL);
1035
1036         /* Note that the w/a also requires 64 PTE of padding following the
1037          * bo. We currently fill all unused PTE with the shadow page and so
1038          * we should always have valid PTE following the scanout preventing
1039          * the VT-d warning.
1040          */
1041         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1042                 alignment = 256 * 1024;
1043
1044         /*
1045          * Global gtt pte registers are special registers which actually forward
1046          * writes to a chunk of system memory. Which means that there is no risk
1047          * that the register values disappear as soon as we call
1048          * intel_runtime_pm_put(), so it is correct to wrap only the
1049          * pin/unpin/fence and not more.
1050          */
1051         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1052
1053         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1054
1055         /*
1056          * Valleyview is definitely limited to scanning out the first
1057          * 512MiB. Lets presume this behaviour was inherited from the
1058          * g4x display engine and that all earlier gen are similarly
1059          * limited. Testing suggests that it is a little more
1060          * complicated than this. For example, Cherryview appears quite
1061          * happy to scanout from anywhere within its global aperture.
1062          */
1063         pinctl = 0;
1064         if (HAS_GMCH(dev_priv))
1065                 pinctl |= PIN_MAPPABLE;
1066
1067         i915_gem_ww_ctx_init(&ww, true);
1068 retry:
1069         ret = i915_gem_object_lock(obj, &ww);
1070         if (!ret && phys_cursor)
1071                 ret = i915_gem_object_attach_phys(obj, alignment);
1072         else if (!ret && HAS_LMEM(dev_priv))
1073                 ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1074         /* TODO: Do we need to sync when migration becomes async? */
1075         if (!ret)
1076                 ret = i915_gem_object_pin_pages(obj);
1077         if (ret)
1078                 goto err;
1079
1080         if (!ret) {
1081                 vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1082                                                            view, pinctl);
1083                 if (IS_ERR(vma)) {
1084                         ret = PTR_ERR(vma);
1085                         goto err_unpin;
1086                 }
1087         }
1088
1089         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1090                 /*
1091                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1092                  * fence, whereas 965+ only requires a fence if using
1093                  * framebuffer compression.  For simplicity, we always, when
1094                  * possible, install a fence as the cost is not that onerous.
1095                  *
1096                  * If we fail to fence the tiled scanout, then either the
1097                  * modeset will reject the change (which is highly unlikely as
1098                  * the affected systems, all but one, do not have unmappable
1099                  * space) or we will not be able to enable full powersaving
1100                  * techniques (also likely not to apply due to various limits
1101                  * FBC and the like impose on the size of the buffer, which
1102                  * presumably we violated anyway with this unmappable buffer).
1103                  * Anyway, it is presumably better to stumble onwards with
1104                  * something and try to run the system in a "less than optimal"
1105                  * mode that matches the user configuration.
1106                  */
1107                 ret = i915_vma_pin_fence(vma);
1108                 if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1109                         i915_vma_unpin(vma);
1110                         goto err_unpin;
1111                 }
1112                 ret = 0;
1113
1114                 if (vma->fence)
1115                         *out_flags |= PLANE_HAS_FENCE;
1116         }
1117
1118         i915_vma_get(vma);
1119
1120 err_unpin:
1121         i915_gem_object_unpin_pages(obj);
1122 err:
1123         if (ret == -EDEADLK) {
1124                 ret = i915_gem_ww_ctx_backoff(&ww);
1125                 if (!ret)
1126                         goto retry;
1127         }
1128         i915_gem_ww_ctx_fini(&ww);
1129         if (ret)
1130                 vma = ERR_PTR(ret);
1131
1132         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1133         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1134         return vma;
1135 }
1136
1137 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1138 {
1139         if (flags & PLANE_HAS_FENCE)
1140                 i915_vma_unpin_fence(vma);
1141         i915_vma_unpin(vma);
1142         i915_vma_put(vma);
1143 }
1144
1145 /*
1146  * Convert the x/y offsets into a linear offset.
1147  * Only valid with 0/180 degree rotation, which is fine since linear
1148  * offset is only used with linear buffers on pre-hsw and tiled buffers
1149  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1150  */
1151 u32 intel_fb_xy_to_linear(int x, int y,
1152                           const struct intel_plane_state *state,
1153                           int color_plane)
1154 {
1155         const struct drm_framebuffer *fb = state->hw.fb;
1156         unsigned int cpp = fb->format->cpp[color_plane];
1157         unsigned int pitch = state->view.color_plane[color_plane].stride;
1158
1159         return y * pitch + x * cpp;
1160 }
1161
1162 /*
1163  * Add the x/y offsets derived from fb->offsets[] to the user
1164  * specified plane src x/y offsets. The resulting x/y offsets
1165  * specify the start of scanout from the beginning of the gtt mapping.
1166  */
1167 void intel_add_fb_offsets(int *x, int *y,
1168                           const struct intel_plane_state *state,
1169                           int color_plane)
1170
1171 {
1172         *x += state->view.color_plane[color_plane].x;
1173         *y += state->view.color_plane[color_plane].y;
1174 }
1175
1176 /*
1177  * From the Sky Lake PRM:
1178  * "The Color Control Surface (CCS) contains the compression status of
1179  *  the cache-line pairs. The compression state of the cache-line pair
1180  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1181  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1182  *  cache-line-pairs. CCS is always Y tiled."
1183  *
1184  * Since cache line pairs refers to horizontally adjacent cache lines,
1185  * each cache line in the CCS corresponds to an area of 32x16 cache
1186  * lines on the main surface. Since each pixel is 4 bytes, this gives
1187  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1188  * main surface.
1189  */
1190 static const struct drm_format_info skl_ccs_formats[] = {
1191         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1192           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1193         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1194           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1195         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1196           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1197         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1198           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1199 };
1200
1201 /*
1202  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1203  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1204  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1205  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1206  * the main surface.
1207  */
1208 static const struct drm_format_info gen12_ccs_formats[] = {
1209         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1210           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1211           .hsub = 1, .vsub = 1, },
1212         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1213           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1214           .hsub = 1, .vsub = 1, },
1215         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1216           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1217           .hsub = 1, .vsub = 1, .has_alpha = true },
1218         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1219           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1220           .hsub = 1, .vsub = 1, .has_alpha = true },
1221         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1222           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1223           .hsub = 2, .vsub = 1, .is_yuv = true },
1224         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1225           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1226           .hsub = 2, .vsub = 1, .is_yuv = true },
1227         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1228           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1229           .hsub = 2, .vsub = 1, .is_yuv = true },
1230         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1231           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1232           .hsub = 2, .vsub = 1, .is_yuv = true },
1233         { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1234           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1235           .hsub = 1, .vsub = 1, .is_yuv = true },
1236         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1237           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1238           .hsub = 2, .vsub = 2, .is_yuv = true },
1239         { .format = DRM_FORMAT_P010, .num_planes = 4,
1240           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1241           .hsub = 2, .vsub = 2, .is_yuv = true },
1242         { .format = DRM_FORMAT_P012, .num_planes = 4,
1243           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1244           .hsub = 2, .vsub = 2, .is_yuv = true },
1245         { .format = DRM_FORMAT_P016, .num_planes = 4,
1246           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1247           .hsub = 2, .vsub = 2, .is_yuv = true },
1248 };
1249
1250 /*
1251  * Same as gen12_ccs_formats[] above, but with additional surface used
1252  * to pass Clear Color information in plane 2 with 64 bits of data.
1253  */
1254 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1255         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1256           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1257           .hsub = 1, .vsub = 1, },
1258         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1259           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1260           .hsub = 1, .vsub = 1, },
1261         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1262           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1263           .hsub = 1, .vsub = 1, .has_alpha = true },
1264         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1265           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1266           .hsub = 1, .vsub = 1, .has_alpha = true },
1267 };
1268
1269 static const struct drm_format_info *
1270 lookup_format_info(const struct drm_format_info formats[],
1271                    int num_formats, u32 format)
1272 {
1273         int i;
1274
1275         for (i = 0; i < num_formats; i++) {
1276                 if (formats[i].format == format)
1277                         return &formats[i];
1278         }
1279
1280         return NULL;
1281 }
1282
1283 static const struct drm_format_info *
1284 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1285 {
1286         switch (cmd->modifier[0]) {
1287         case I915_FORMAT_MOD_Y_TILED_CCS:
1288         case I915_FORMAT_MOD_Yf_TILED_CCS:
1289                 return lookup_format_info(skl_ccs_formats,
1290                                           ARRAY_SIZE(skl_ccs_formats),
1291                                           cmd->pixel_format);
1292         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1293         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1294                 return lookup_format_info(gen12_ccs_formats,
1295                                           ARRAY_SIZE(gen12_ccs_formats),
1296                                           cmd->pixel_format);
1297         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1298                 return lookup_format_info(gen12_ccs_cc_formats,
1299                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1300                                           cmd->pixel_format);
1301         default:
1302                 return NULL;
1303         }
1304 }
1305
1306 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1307                               u32 pixel_format, u64 modifier)
1308 {
1309         struct intel_crtc *crtc;
1310         struct intel_plane *plane;
1311
1312         if (!HAS_DISPLAY(dev_priv))
1313                 return 0;
1314
1315         /*
1316          * We assume the primary plane for pipe A has
1317          * the highest stride limits of them all,
1318          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1319          */
1320         crtc = intel_get_first_crtc(dev_priv);
1321         if (!crtc)
1322                 return 0;
1323
1324         plane = to_intel_plane(crtc->base.primary);
1325
1326         return plane->max_stride(plane, pixel_format, modifier,
1327                                  DRM_MODE_ROTATE_0);
1328 }
1329
1330 static struct i915_vma *
1331 initial_plane_vma(struct drm_i915_private *i915,
1332                   struct intel_initial_plane_config *plane_config)
1333 {
1334         struct drm_i915_gem_object *obj;
1335         struct i915_vma *vma;
1336         u32 base, size;
1337
1338         if (plane_config->size == 0)
1339                 return NULL;
1340
1341         base = round_down(plane_config->base,
1342                           I915_GTT_MIN_ALIGNMENT);
1343         size = round_up(plane_config->base + plane_config->size,
1344                         I915_GTT_MIN_ALIGNMENT);
1345         size -= base;
1346
1347         /*
1348          * If the FB is too big, just don't use it since fbdev is not very
1349          * important and we should probably use that space with FBC or other
1350          * features.
1351          */
1352         if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1353             size * 2 > i915->stolen_usable_size)
1354                 return NULL;
1355
1356         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1357         if (IS_ERR(obj))
1358                 return NULL;
1359
1360         /*
1361          * Mark it WT ahead of time to avoid changing the
1362          * cache_level during fbdev initialization. The
1363          * unbind there would get stuck waiting for rcu.
1364          */
1365         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1366                                             I915_CACHE_WT : I915_CACHE_NONE);
1367
1368         switch (plane_config->tiling) {
1369         case I915_TILING_NONE:
1370                 break;
1371         case I915_TILING_X:
1372         case I915_TILING_Y:
1373                 obj->tiling_and_stride =
1374                         plane_config->fb->base.pitches[0] |
1375                         plane_config->tiling;
1376                 break;
1377         default:
1378                 MISSING_CASE(plane_config->tiling);
1379                 goto err_obj;
1380         }
1381
1382         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1383         if (IS_ERR(vma))
1384                 goto err_obj;
1385
1386         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1387                 goto err_obj;
1388
1389         if (i915_gem_object_is_tiled(obj) &&
1390             !i915_vma_is_map_and_fenceable(vma))
1391                 goto err_obj;
1392
1393         return vma;
1394
1395 err_obj:
1396         i915_gem_object_put(obj);
1397         return NULL;
1398 }
1399
1400 static bool
1401 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1402                               struct intel_initial_plane_config *plane_config)
1403 {
1404         struct drm_device *dev = crtc->base.dev;
1405         struct drm_i915_private *dev_priv = to_i915(dev);
1406         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1407         struct drm_framebuffer *fb = &plane_config->fb->base;
1408         struct i915_vma *vma;
1409
1410         switch (fb->modifier) {
1411         case DRM_FORMAT_MOD_LINEAR:
1412         case I915_FORMAT_MOD_X_TILED:
1413         case I915_FORMAT_MOD_Y_TILED:
1414                 break;
1415         default:
1416                 drm_dbg(&dev_priv->drm,
1417                         "Unsupported modifier for initial FB: 0x%llx\n",
1418                         fb->modifier);
1419                 return false;
1420         }
1421
1422         vma = initial_plane_vma(dev_priv, plane_config);
1423         if (!vma)
1424                 return false;
1425
1426         mode_cmd.pixel_format = fb->format->format;
1427         mode_cmd.width = fb->width;
1428         mode_cmd.height = fb->height;
1429         mode_cmd.pitches[0] = fb->pitches[0];
1430         mode_cmd.modifier[0] = fb->modifier;
1431         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1432
1433         if (intel_framebuffer_init(to_intel_framebuffer(fb),
1434                                    vma->obj, &mode_cmd)) {
1435                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1436                 goto err_vma;
1437         }
1438
1439         plane_config->vma = vma;
1440         return true;
1441
1442 err_vma:
1443         i915_vma_put(vma);
1444         return false;
1445 }
1446
1447 static void
1448 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1449                         struct intel_plane_state *plane_state,
1450                         bool visible)
1451 {
1452         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1453
1454         plane_state->uapi.visible = visible;
1455
1456         if (visible)
1457                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1458         else
1459                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1460 }
1461
1462 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1463 {
1464         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1465         struct drm_plane *plane;
1466
1467         /*
1468          * Active_planes aliases if multiple "primary" or cursor planes
1469          * have been used on the same (or wrong) pipe. plane_mask uses
1470          * unique ids, hence we can use that to reconstruct active_planes.
1471          */
1472         crtc_state->enabled_planes = 0;
1473         crtc_state->active_planes = 0;
1474
1475         drm_for_each_plane_mask(plane, &dev_priv->drm,
1476                                 crtc_state->uapi.plane_mask) {
1477                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1478                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1479         }
1480 }
1481
1482 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1483                                          struct intel_plane *plane)
1484 {
1485         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1486         struct intel_crtc_state *crtc_state =
1487                 to_intel_crtc_state(crtc->base.state);
1488         struct intel_plane_state *plane_state =
1489                 to_intel_plane_state(plane->base.state);
1490
1491         drm_dbg_kms(&dev_priv->drm,
1492                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1493                     plane->base.base.id, plane->base.name,
1494                     crtc->base.base.id, crtc->base.name);
1495
1496         intel_set_plane_visible(crtc_state, plane_state, false);
1497         fixup_plane_bitmasks(crtc_state);
1498         crtc_state->data_rate[plane->id] = 0;
1499         crtc_state->min_cdclk[plane->id] = 0;
1500
1501         if (plane->id == PLANE_PRIMARY)
1502                 hsw_disable_ips(crtc_state);
1503
1504         /*
1505          * Vblank time updates from the shadow to live plane control register
1506          * are blocked if the memory self-refresh mode is active at that
1507          * moment. So to make sure the plane gets truly disabled, disable
1508          * first the self-refresh mode. The self-refresh enable bit in turn
1509          * will be checked/applied by the HW only at the next frame start
1510          * event which is after the vblank start event, so we need to have a
1511          * wait-for-vblank between disabling the plane and the pipe.
1512          */
1513         if (HAS_GMCH(dev_priv) &&
1514             intel_set_memory_cxsr(dev_priv, false))
1515                 intel_wait_for_vblank(dev_priv, crtc->pipe);
1516
1517         /*
1518          * Gen2 reports pipe underruns whenever all planes are disabled.
1519          * So disable underrun reporting before all the planes get disabled.
1520          */
1521         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1522                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1523
1524         intel_disable_plane(plane, crtc_state);
1525         intel_wait_for_vblank(dev_priv, crtc->pipe);
1526 }
1527
1528 static bool
1529 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1530                               const struct intel_initial_plane_config *plane_config,
1531                               struct drm_framebuffer **fb,
1532                               struct i915_vma **vma)
1533 {
1534         struct intel_crtc *crtc;
1535
1536         for_each_intel_crtc(&i915->drm, crtc) {
1537                 struct intel_crtc_state *crtc_state =
1538                         to_intel_crtc_state(crtc->base.state);
1539                 struct intel_plane *plane =
1540                         to_intel_plane(crtc->base.primary);
1541                 struct intel_plane_state *plane_state =
1542                         to_intel_plane_state(plane->base.state);
1543
1544                 if (!crtc_state->uapi.active)
1545                         continue;
1546
1547                 if (!plane_state->ggtt_vma)
1548                         continue;
1549
1550                 if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1551                         *fb = plane_state->hw.fb;
1552                         *vma = plane_state->ggtt_vma;
1553                         return true;
1554                 }
1555         }
1556
1557         return false;
1558 }
1559
1560 static void
1561 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1562                              struct intel_initial_plane_config *plane_config)
1563 {
1564         struct drm_device *dev = crtc->base.dev;
1565         struct drm_i915_private *dev_priv = to_i915(dev);
1566         struct intel_crtc_state *crtc_state =
1567                 to_intel_crtc_state(crtc->base.state);
1568         struct intel_plane *plane =
1569                 to_intel_plane(crtc->base.primary);
1570         struct intel_plane_state *plane_state =
1571                 to_intel_plane_state(plane->base.state);
1572         struct drm_framebuffer *fb;
1573         struct i915_vma *vma;
1574
1575         /*
1576          * TODO:
1577          *   Disable planes if get_initial_plane_config() failed.
1578          *   Make sure things work if the surface base is not page aligned.
1579          */
1580         if (!plane_config->fb)
1581                 return;
1582
1583         if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1584                 fb = &plane_config->fb->base;
1585                 vma = plane_config->vma;
1586                 goto valid_fb;
1587         }
1588
1589         /*
1590          * Failed to alloc the obj, check to see if we should share
1591          * an fb with another CRTC instead
1592          */
1593         if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1594                 goto valid_fb;
1595
1596         /*
1597          * We've failed to reconstruct the BIOS FB.  Current display state
1598          * indicates that the primary plane is visible, but has a NULL FB,
1599          * which will lead to problems later if we don't fix it up.  The
1600          * simplest solution is to just disable the primary plane now and
1601          * pretend the BIOS never had it enabled.
1602          */
1603         intel_plane_disable_noatomic(crtc, plane);
1604         if (crtc_state->bigjoiner) {
1605                 struct intel_crtc *slave =
1606                         crtc_state->bigjoiner_linked_crtc;
1607                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1608         }
1609
1610         return;
1611
1612 valid_fb:
1613         plane_state->uapi.rotation = plane_config->rotation;
1614         intel_fb_fill_view(to_intel_framebuffer(fb),
1615                            plane_state->uapi.rotation, &plane_state->view);
1616
1617         __i915_vma_pin(vma);
1618         plane_state->ggtt_vma = i915_vma_get(vma);
1619         if (intel_plane_uses_fence(plane_state) &&
1620             i915_vma_pin_fence(vma) == 0 && vma->fence)
1621                 plane_state->flags |= PLANE_HAS_FENCE;
1622
1623         plane_state->uapi.src_x = 0;
1624         plane_state->uapi.src_y = 0;
1625         plane_state->uapi.src_w = fb->width << 16;
1626         plane_state->uapi.src_h = fb->height << 16;
1627
1628         plane_state->uapi.crtc_x = 0;
1629         plane_state->uapi.crtc_y = 0;
1630         plane_state->uapi.crtc_w = fb->width;
1631         plane_state->uapi.crtc_h = fb->height;
1632
1633         if (plane_config->tiling)
1634                 dev_priv->preserve_bios_swizzle = true;
1635
1636         plane_state->uapi.fb = fb;
1637         drm_framebuffer_get(fb);
1638
1639         plane_state->uapi.crtc = &crtc->base;
1640         intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
1641
1642         atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
1643 }
1644
1645 unsigned int
1646 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1647 {
1648         int x = 0, y = 0;
1649
1650         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1651                                           plane_state->view.color_plane[0].offset, 0);
1652
1653         return y;
1654 }
1655
1656 static int
1657 __intel_display_resume(struct drm_device *dev,
1658                        struct drm_atomic_state *state,
1659                        struct drm_modeset_acquire_ctx *ctx)
1660 {
1661         struct drm_crtc_state *crtc_state;
1662         struct drm_crtc *crtc;
1663         int i, ret;
1664
1665         intel_modeset_setup_hw_state(dev, ctx);
1666         intel_vga_redisable(to_i915(dev));
1667
1668         if (!state)
1669                 return 0;
1670
1671         /*
1672          * We've duplicated the state, pointers to the old state are invalid.
1673          *
1674          * Don't attempt to use the old state until we commit the duplicated state.
1675          */
1676         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1677                 /*
1678                  * Force recalculation even if we restore
1679                  * current state. With fast modeset this may not result
1680                  * in a modeset when the state is compatible.
1681                  */
1682                 crtc_state->mode_changed = true;
1683         }
1684
1685         /* ignore any reset values/BIOS leftovers in the WM registers */
1686         if (!HAS_GMCH(to_i915(dev)))
1687                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
1688
1689         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1690
1691         drm_WARN_ON(dev, ret == -EDEADLK);
1692         return ret;
1693 }
1694
1695 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1696 {
1697         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1698                 intel_has_gpu_reset(&dev_priv->gt));
1699 }
1700
1701 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1702 {
1703         struct drm_device *dev = &dev_priv->drm;
1704         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1705         struct drm_atomic_state *state;
1706         int ret;
1707
1708         if (!HAS_DISPLAY(dev_priv))
1709                 return;
1710
1711         /* reset doesn't touch the display */
1712         if (!dev_priv->params.force_reset_modeset_test &&
1713             !gpu_reset_clobbers_display(dev_priv))
1714                 return;
1715
1716         /* We have a modeset vs reset deadlock, defensively unbreak it. */
1717         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1718         smp_mb__after_atomic();
1719         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1720
1721         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1722                 drm_dbg_kms(&dev_priv->drm,
1723                             "Modeset potentially stuck, unbreaking through wedging\n");
1724                 intel_gt_set_wedged(&dev_priv->gt);
1725         }
1726
1727         /*
1728          * Need mode_config.mutex so that we don't
1729          * trample ongoing ->detect() and whatnot.
1730          */
1731         mutex_lock(&dev->mode_config.mutex);
1732         drm_modeset_acquire_init(ctx, 0);
1733         while (1) {
1734                 ret = drm_modeset_lock_all_ctx(dev, ctx);
1735                 if (ret != -EDEADLK)
1736                         break;
1737
1738                 drm_modeset_backoff(ctx);
1739         }
1740         /*
1741          * Disabling the crtcs gracefully seems nicer. Also the
1742          * g33 docs say we should at least disable all the planes.
1743          */
1744         state = drm_atomic_helper_duplicate_state(dev, ctx);
1745         if (IS_ERR(state)) {
1746                 ret = PTR_ERR(state);
1747                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1748                         ret);
1749                 return;
1750         }
1751
1752         ret = drm_atomic_helper_disable_all(dev, ctx);
1753         if (ret) {
1754                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1755                         ret);
1756                 drm_atomic_state_put(state);
1757                 return;
1758         }
1759
1760         dev_priv->modeset_restore_state = state;
1761         state->acquire_ctx = ctx;
1762 }
1763
1764 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1765 {
1766         struct drm_device *dev = &dev_priv->drm;
1767         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1768         struct drm_atomic_state *state;
1769         int ret;
1770
1771         if (!HAS_DISPLAY(dev_priv))
1772                 return;
1773
1774         /* reset doesn't touch the display */
1775         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1776                 return;
1777
1778         state = fetch_and_zero(&dev_priv->modeset_restore_state);
1779         if (!state)
1780                 goto unlock;
1781
1782         /* reset doesn't touch the display */
1783         if (!gpu_reset_clobbers_display(dev_priv)) {
1784                 /* for testing only restore the display */
1785                 ret = __intel_display_resume(dev, state, ctx);
1786                 if (ret)
1787                         drm_err(&dev_priv->drm,
1788                                 "Restoring old state failed with %i\n", ret);
1789         } else {
1790                 /*
1791                  * The display has been reset as well,
1792                  * so need a full re-initialization.
1793                  */
1794                 intel_pps_unlock_regs_wa(dev_priv);
1795                 intel_modeset_init_hw(dev_priv);
1796                 intel_init_clock_gating(dev_priv);
1797                 intel_hpd_init(dev_priv);
1798
1799                 ret = __intel_display_resume(dev, state, ctx);
1800                 if (ret)
1801                         drm_err(&dev_priv->drm,
1802                                 "Restoring old state failed with %i\n", ret);
1803
1804                 intel_hpd_poll_disable(dev_priv);
1805         }
1806
1807         drm_atomic_state_put(state);
1808 unlock:
1809         drm_modeset_drop_locks(ctx);
1810         drm_modeset_acquire_fini(ctx);
1811         mutex_unlock(&dev->mode_config.mutex);
1812
1813         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1814 }
1815
1816 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
1817 {
1818         if (crtc_state->pch_pfit.enabled &&
1819             (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
1820              crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
1821              crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
1822                 return false;
1823
1824         if (crtc_state->dsc.compression_enable)
1825                 return false;
1826
1827         if (crtc_state->has_psr2)
1828                 return false;
1829
1830         if (crtc_state->splitter.enable)
1831                 return false;
1832
1833         return true;
1834 }
1835
1836 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
1837 {
1838         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1839         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1840         enum pipe pipe = crtc->pipe;
1841         u32 tmp;
1842
1843         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1844
1845         /*
1846          * Display WA #1153: icl
1847          * enable hardware to bypass the alpha math
1848          * and rounding for per-pixel values 00 and 0xff
1849          */
1850         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1851         /*
1852          * Display WA # 1605353570: icl
1853          * Set the pixel rounding bit to 1 for allowing
1854          * passthrough of Frame buffer pixels unmodified
1855          * across pipe
1856          */
1857         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1858
1859         if (IS_DG2(dev_priv)) {
1860                 /*
1861                  * Underrun recovery must always be disabled on DG2.  However
1862                  * the chicken bit meaning is inverted compared to other
1863                  * platforms.
1864                  */
1865                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1866         } else if (DISPLAY_VER(dev_priv) >= 13) {
1867                 if (underrun_recovery_supported(crtc_state))
1868                         tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
1869                 else
1870                         tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1871         }
1872
1873         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1874 }
1875
1876 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1877 {
1878         struct drm_crtc *crtc;
1879         bool cleanup_done;
1880
1881         drm_for_each_crtc(crtc, &dev_priv->drm) {
1882                 struct drm_crtc_commit *commit;
1883                 spin_lock(&crtc->commit_lock);
1884                 commit = list_first_entry_or_null(&crtc->commit_list,
1885                                                   struct drm_crtc_commit, commit_entry);
1886                 cleanup_done = commit ?
1887                         try_wait_for_completion(&commit->cleanup_done) : true;
1888                 spin_unlock(&crtc->commit_lock);
1889
1890                 if (cleanup_done)
1891                         continue;
1892
1893                 drm_crtc_wait_one_vblank(crtc);
1894
1895                 return true;
1896         }
1897
1898         return false;
1899 }
1900
1901 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1902 {
1903         u32 temp;
1904
1905         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1906
1907         mutex_lock(&dev_priv->sb_lock);
1908
1909         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1910         temp |= SBI_SSCCTL_DISABLE;
1911         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1912
1913         mutex_unlock(&dev_priv->sb_lock);
1914 }
1915
1916 /* Program iCLKIP clock to the desired frequency */
1917 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1918 {
1919         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1920         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1921         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1922         u32 divsel, phaseinc, auxdiv, phasedir = 0;
1923         u32 temp;
1924
1925         lpt_disable_iclkip(dev_priv);
1926
1927         /* The iCLK virtual clock root frequency is in MHz,
1928          * but the adjusted_mode->crtc_clock in in KHz. To get the
1929          * divisors, it is necessary to divide one by another, so we
1930          * convert the virtual clock precision to KHz here for higher
1931          * precision.
1932          */
1933         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1934                 u32 iclk_virtual_root_freq = 172800 * 1000;
1935                 u32 iclk_pi_range = 64;
1936                 u32 desired_divisor;
1937
1938                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1939                                                     clock << auxdiv);
1940                 divsel = (desired_divisor / iclk_pi_range) - 2;
1941                 phaseinc = desired_divisor % iclk_pi_range;
1942
1943                 /*
1944                  * Near 20MHz is a corner case which is
1945                  * out of range for the 7-bit divisor
1946                  */
1947                 if (divsel <= 0x7f)
1948                         break;
1949         }
1950
1951         /* This should not happen with any sane values */
1952         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1953                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1954         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1955                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
1956
1957         drm_dbg_kms(&dev_priv->drm,
1958                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
1959                     clock, auxdiv, divsel, phasedir, phaseinc);
1960
1961         mutex_lock(&dev_priv->sb_lock);
1962
1963         /* Program SSCDIVINTPHASE6 */
1964         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
1965         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
1966         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
1967         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
1968         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
1969         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
1970         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
1971         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
1972
1973         /* Program SSCAUXDIV */
1974         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
1975         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
1976         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
1977         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
1978
1979         /* Enable modulator and associated divider */
1980         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1981         temp &= ~SBI_SSCCTL_DISABLE;
1982         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1983
1984         mutex_unlock(&dev_priv->sb_lock);
1985
1986         /* Wait for initialization time */
1987         udelay(24);
1988
1989         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
1990 }
1991
1992 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
1993 {
1994         u32 divsel, phaseinc, auxdiv;
1995         u32 iclk_virtual_root_freq = 172800 * 1000;
1996         u32 iclk_pi_range = 64;
1997         u32 desired_divisor;
1998         u32 temp;
1999
2000         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2001                 return 0;
2002
2003         mutex_lock(&dev_priv->sb_lock);
2004
2005         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2006         if (temp & SBI_SSCCTL_DISABLE) {
2007                 mutex_unlock(&dev_priv->sb_lock);
2008                 return 0;
2009         }
2010
2011         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2012         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2013                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2014         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2015                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2016
2017         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2018         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2019                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2020
2021         mutex_unlock(&dev_priv->sb_lock);
2022
2023         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2024
2025         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2026                                  desired_divisor << auxdiv);
2027 }
2028
2029 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2030                                            enum pipe pch_transcoder)
2031 {
2032         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2033         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2034         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2035
2036         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2037                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2038         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2039                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2040         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2041                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2042
2043         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2044                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2045         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2046                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2047         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2048                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2049         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2050                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2051 }
2052
2053 /*
2054  * Finds the encoder associated with the given CRTC. This can only be
2055  * used when we know that the CRTC isn't feeding multiple encoders!
2056  */
2057 struct intel_encoder *
2058 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2059                            const struct intel_crtc_state *crtc_state)
2060 {
2061         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2062         const struct drm_connector_state *connector_state;
2063         const struct drm_connector *connector;
2064         struct intel_encoder *encoder = NULL;
2065         int num_encoders = 0;
2066         int i;
2067
2068         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2069                 if (connector_state->crtc != &crtc->base)
2070                         continue;
2071
2072                 encoder = to_intel_encoder(connector_state->best_encoder);
2073                 num_encoders++;
2074         }
2075
2076         drm_WARN(encoder->base.dev, num_encoders != 1,
2077                  "%d encoders for pipe %c\n",
2078                  num_encoders, pipe_name(crtc->pipe));
2079
2080         return encoder;
2081 }
2082
2083 /*
2084  * Enable PCH resources required for PCH ports:
2085  *   - PCH PLLs
2086  *   - FDI training & RX/TX
2087  *   - update transcoder timings
2088  *   - DP transcoding bits
2089  *   - transcoder
2090  */
2091 static void ilk_pch_enable(const struct intel_atomic_state *state,
2092                            const struct intel_crtc_state *crtc_state)
2093 {
2094         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2095         struct drm_device *dev = crtc->base.dev;
2096         struct drm_i915_private *dev_priv = to_i915(dev);
2097         enum pipe pipe = crtc->pipe;
2098         u32 temp;
2099
2100         assert_pch_transcoder_disabled(dev_priv, pipe);
2101
2102         /* For PCH output, training FDI link */
2103         intel_fdi_link_train(crtc, crtc_state);
2104
2105         /* We need to program the right clock selection before writing the pixel
2106          * mutliplier into the DPLL. */
2107         if (HAS_PCH_CPT(dev_priv)) {
2108                 u32 sel;
2109
2110                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2111                 temp |= TRANS_DPLL_ENABLE(pipe);
2112                 sel = TRANS_DPLLB_SEL(pipe);
2113                 if (crtc_state->shared_dpll ==
2114                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2115                         temp |= sel;
2116                 else
2117                         temp &= ~sel;
2118                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2119         }
2120
2121         /* XXX: pch pll's can be enabled any time before we enable the PCH
2122          * transcoder, and we actually should do this to not upset any PCH
2123          * transcoder that already use the clock when we share it.
2124          *
2125          * Note that enable_shared_dpll tries to do the right thing, but
2126          * get_shared_dpll unconditionally resets the pll - we need that to have
2127          * the right LVDS enable sequence. */
2128         intel_enable_shared_dpll(crtc_state);
2129
2130         /* set transcoder timing, panel must allow it */
2131         assert_panel_unlocked(dev_priv, pipe);
2132         ilk_pch_transcoder_set_timings(crtc_state, pipe);
2133
2134         intel_fdi_normal_train(crtc);
2135
2136         /* For PCH DP, enable TRANS_DP_CTL */
2137         if (HAS_PCH_CPT(dev_priv) &&
2138             intel_crtc_has_dp_encoder(crtc_state)) {
2139                 const struct drm_display_mode *adjusted_mode =
2140                         &crtc_state->hw.adjusted_mode;
2141                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2142                 i915_reg_t reg = TRANS_DP_CTL(pipe);
2143                 enum port port;
2144
2145                 temp = intel_de_read(dev_priv, reg);
2146                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2147                           TRANS_DP_SYNC_MASK |
2148                           TRANS_DP_BPC_MASK);
2149                 temp |= TRANS_DP_OUTPUT_ENABLE;
2150                 temp |= bpc << 9; /* same format but at 11:9 */
2151
2152                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2153                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2154                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2155                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2156
2157                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2158                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2159                 temp |= TRANS_DP_PORT_SEL(port);
2160
2161                 intel_de_write(dev_priv, reg, temp);
2162         }
2163
2164         ilk_enable_pch_transcoder(crtc_state);
2165 }
2166
2167 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2168 {
2169         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2170         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2171         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2172
2173         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2174
2175         lpt_program_iclkip(crtc_state);
2176
2177         /* Set transcoder timing. */
2178         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2179
2180         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2181 }
2182
2183 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2184                                enum pipe pipe)
2185 {
2186         i915_reg_t dslreg = PIPEDSL(pipe);
2187         u32 temp;
2188
2189         temp = intel_de_read(dev_priv, dslreg);
2190         udelay(500);
2191         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2192                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2193                         drm_err(&dev_priv->drm,
2194                                 "mode set failed: pipe %c stuck\n",
2195                                 pipe_name(pipe));
2196         }
2197 }
2198
2199 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2200 {
2201         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2202         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2203         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2204         enum pipe pipe = crtc->pipe;
2205         int width = drm_rect_width(dst);
2206         int height = drm_rect_height(dst);
2207         int x = dst->x1;
2208         int y = dst->y1;
2209
2210         if (!crtc_state->pch_pfit.enabled)
2211                 return;
2212
2213         /* Force use of hard-coded filter coefficients
2214          * as some pre-programmed values are broken,
2215          * e.g. x201.
2216          */
2217         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2218                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2219                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2220         else
2221                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2222                                PF_FILTER_MED_3x3);
2223         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2224         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2225 }
2226
2227 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2228 {
2229         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2230         struct drm_device *dev = crtc->base.dev;
2231         struct drm_i915_private *dev_priv = to_i915(dev);
2232
2233         if (!crtc_state->ips_enabled)
2234                 return;
2235
2236         /*
2237          * We can only enable IPS after we enable a plane and wait for a vblank
2238          * This function is called from post_plane_update, which is run after
2239          * a vblank wait.
2240          */
2241         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2242
2243         if (IS_BROADWELL(dev_priv)) {
2244                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2245                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
2246                 /* Quoting Art Runyan: "its not safe to expect any particular
2247                  * value in IPS_CTL bit 31 after enabling IPS through the
2248                  * mailbox." Moreover, the mailbox may return a bogus state,
2249                  * so we need to just enable it and continue on.
2250                  */
2251         } else {
2252                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2253                 /* The bit only becomes 1 in the next vblank, so this wait here
2254                  * is essentially intel_wait_for_vblank. If we don't have this
2255                  * and don't wait for vblanks until the end of crtc_enable, then
2256                  * the HW state readout code will complain that the expected
2257                  * IPS_CTL value is not the one we read. */
2258                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2259                         drm_err(&dev_priv->drm,
2260                                 "Timed out waiting for IPS enable\n");
2261         }
2262 }
2263
2264 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2265 {
2266         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2267         struct drm_device *dev = crtc->base.dev;
2268         struct drm_i915_private *dev_priv = to_i915(dev);
2269
2270         if (!crtc_state->ips_enabled)
2271                 return;
2272
2273         if (IS_BROADWELL(dev_priv)) {
2274                 drm_WARN_ON(dev,
2275                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2276                 /*
2277                  * Wait for PCODE to finish disabling IPS. The BSpec specified
2278                  * 42ms timeout value leads to occasional timeouts so use 100ms
2279                  * instead.
2280                  */
2281                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2282                         drm_err(&dev_priv->drm,
2283                                 "Timed out waiting for IPS disable\n");
2284         } else {
2285                 intel_de_write(dev_priv, IPS_CTL, 0);
2286                 intel_de_posting_read(dev_priv, IPS_CTL);
2287         }
2288
2289         /* We need to wait for a vblank before we can disable the plane. */
2290         intel_wait_for_vblank(dev_priv, crtc->pipe);
2291 }
2292
2293 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2294 {
2295         if (crtc->overlay)
2296                 (void) intel_overlay_switch_off(crtc->overlay);
2297
2298         /* Let userspace switch the overlay on again. In most cases userspace
2299          * has to recompute where to put it anyway.
2300          */
2301 }
2302
2303 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2304                                        const struct intel_crtc_state *new_crtc_state)
2305 {
2306         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2307         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2308
2309         if (!old_crtc_state->ips_enabled)
2310                 return false;
2311
2312         if (intel_crtc_needs_modeset(new_crtc_state))
2313                 return true;
2314
2315         /*
2316          * Workaround : Do not read or write the pipe palette/gamma data while
2317          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2318          *
2319          * Disable IPS before we program the LUT.
2320          */
2321         if (IS_HASWELL(dev_priv) &&
2322             (new_crtc_state->uapi.color_mgmt_changed ||
2323              new_crtc_state->update_pipe) &&
2324             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2325                 return true;
2326
2327         return !new_crtc_state->ips_enabled;
2328 }
2329
2330 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2331                                        const struct intel_crtc_state *new_crtc_state)
2332 {
2333         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2335
2336         if (!new_crtc_state->ips_enabled)
2337                 return false;
2338
2339         if (intel_crtc_needs_modeset(new_crtc_state))
2340                 return true;
2341
2342         /*
2343          * Workaround : Do not read or write the pipe palette/gamma data while
2344          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2345          *
2346          * Re-enable IPS after the LUT has been programmed.
2347          */
2348         if (IS_HASWELL(dev_priv) &&
2349             (new_crtc_state->uapi.color_mgmt_changed ||
2350              new_crtc_state->update_pipe) &&
2351             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2352                 return true;
2353
2354         /*
2355          * We can't read out IPS on broadwell, assume the worst and
2356          * forcibly enable IPS on the first fastset.
2357          */
2358         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2359                 return true;
2360
2361         return !old_crtc_state->ips_enabled;
2362 }
2363
2364 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2365 {
2366         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2367
2368         if (!crtc_state->nv12_planes)
2369                 return false;
2370
2371         /* WA Display #0827: Gen9:all */
2372         if (DISPLAY_VER(dev_priv) == 9)
2373                 return true;
2374
2375         return false;
2376 }
2377
2378 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2379 {
2380         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2381
2382         /* Wa_2006604312:icl,ehl */
2383         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2384                 return true;
2385
2386         return false;
2387 }
2388
2389 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
2390 {
2391         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2392
2393         /* Wa_1604331009:icl,jsl,ehl */
2394         if (is_hdr_mode(crtc_state) &&
2395             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
2396             DISPLAY_VER(dev_priv) == 11)
2397                 return true;
2398
2399         return false;
2400 }
2401
2402 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2403                             const struct intel_crtc_state *new_crtc_state)
2404 {
2405         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2406                 new_crtc_state->active_planes;
2407 }
2408
2409 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2410                              const struct intel_crtc_state *new_crtc_state)
2411 {
2412         return old_crtc_state->active_planes &&
2413                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2414 }
2415
2416 static void intel_post_plane_update(struct intel_atomic_state *state,
2417                                     struct intel_crtc *crtc)
2418 {
2419         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2420         const struct intel_crtc_state *old_crtc_state =
2421                 intel_atomic_get_old_crtc_state(state, crtc);
2422         const struct intel_crtc_state *new_crtc_state =
2423                 intel_atomic_get_new_crtc_state(state, crtc);
2424         enum pipe pipe = crtc->pipe;
2425
2426         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2427
2428         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2429                 intel_update_watermarks(dev_priv);
2430
2431         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2432                 hsw_enable_ips(new_crtc_state);
2433
2434         intel_fbc_post_update(state, crtc);
2435         intel_drrs_page_flip(state, crtc);
2436
2437         if (needs_nv12_wa(old_crtc_state) &&
2438             !needs_nv12_wa(new_crtc_state))
2439                 skl_wa_827(dev_priv, pipe, false);
2440
2441         if (needs_scalerclk_wa(old_crtc_state) &&
2442             !needs_scalerclk_wa(new_crtc_state))
2443                 icl_wa_scalerclkgating(dev_priv, pipe, false);
2444
2445         if (needs_cursorclk_wa(old_crtc_state) &&
2446             !needs_cursorclk_wa(new_crtc_state))
2447                 icl_wa_cursorclkgating(dev_priv, pipe, false);
2448
2449 }
2450
2451 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2452                                         struct intel_crtc *crtc)
2453 {
2454         const struct intel_crtc_state *crtc_state =
2455                 intel_atomic_get_new_crtc_state(state, crtc);
2456         u8 update_planes = crtc_state->update_planes;
2457         const struct intel_plane_state *plane_state;
2458         struct intel_plane *plane;
2459         int i;
2460
2461         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2462                 if (plane->enable_flip_done &&
2463                     plane->pipe == crtc->pipe &&
2464                     update_planes & BIT(plane->id))
2465                         plane->enable_flip_done(plane);
2466         }
2467 }
2468
2469 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2470                                          struct intel_crtc *crtc)
2471 {
2472         const struct intel_crtc_state *crtc_state =
2473                 intel_atomic_get_new_crtc_state(state, crtc);
2474         u8 update_planes = crtc_state->update_planes;
2475         const struct intel_plane_state *plane_state;
2476         struct intel_plane *plane;
2477         int i;
2478
2479         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2480                 if (plane->disable_flip_done &&
2481                     plane->pipe == crtc->pipe &&
2482                     update_planes & BIT(plane->id))
2483                         plane->disable_flip_done(plane);
2484         }
2485 }
2486
2487 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2488                                              struct intel_crtc *crtc)
2489 {
2490         struct drm_i915_private *i915 = to_i915(state->base.dev);
2491         const struct intel_crtc_state *old_crtc_state =
2492                 intel_atomic_get_old_crtc_state(state, crtc);
2493         const struct intel_crtc_state *new_crtc_state =
2494                 intel_atomic_get_new_crtc_state(state, crtc);
2495         u8 update_planes = new_crtc_state->update_planes;
2496         const struct intel_plane_state *old_plane_state;
2497         struct intel_plane *plane;
2498         bool need_vbl_wait = false;
2499         int i;
2500
2501         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2502                 if (plane->need_async_flip_disable_wa &&
2503                     plane->pipe == crtc->pipe &&
2504                     update_planes & BIT(plane->id)) {
2505                         /*
2506                          * Apart from the async flip bit we want to
2507                          * preserve the old state for the plane.
2508                          */
2509                         plane->async_flip(plane, old_crtc_state,
2510                                           old_plane_state, false);
2511                         need_vbl_wait = true;
2512                 }
2513         }
2514
2515         if (need_vbl_wait)
2516                 intel_wait_for_vblank(i915, crtc->pipe);
2517 }
2518
2519 static void intel_pre_plane_update(struct intel_atomic_state *state,
2520                                    struct intel_crtc *crtc)
2521 {
2522         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2523         const struct intel_crtc_state *old_crtc_state =
2524                 intel_atomic_get_old_crtc_state(state, crtc);
2525         const struct intel_crtc_state *new_crtc_state =
2526                 intel_atomic_get_new_crtc_state(state, crtc);
2527         enum pipe pipe = crtc->pipe;
2528
2529         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2530                 hsw_disable_ips(old_crtc_state);
2531
2532         if (intel_fbc_pre_update(state, crtc))
2533                 intel_wait_for_vblank(dev_priv, pipe);
2534
2535         /* Display WA 827 */
2536         if (!needs_nv12_wa(old_crtc_state) &&
2537             needs_nv12_wa(new_crtc_state))
2538                 skl_wa_827(dev_priv, pipe, true);
2539
2540         /* Wa_2006604312:icl,ehl */
2541         if (!needs_scalerclk_wa(old_crtc_state) &&
2542             needs_scalerclk_wa(new_crtc_state))
2543                 icl_wa_scalerclkgating(dev_priv, pipe, true);
2544
2545         /* Wa_1604331009:icl,jsl,ehl */
2546         if (!needs_cursorclk_wa(old_crtc_state) &&
2547             needs_cursorclk_wa(new_crtc_state))
2548                 icl_wa_cursorclkgating(dev_priv, pipe, true);
2549
2550         /*
2551          * Vblank time updates from the shadow to live plane control register
2552          * are blocked if the memory self-refresh mode is active at that
2553          * moment. So to make sure the plane gets truly disabled, disable
2554          * first the self-refresh mode. The self-refresh enable bit in turn
2555          * will be checked/applied by the HW only at the next frame start
2556          * event which is after the vblank start event, so we need to have a
2557          * wait-for-vblank between disabling the plane and the pipe.
2558          */
2559         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2560             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2561                 intel_wait_for_vblank(dev_priv, pipe);
2562
2563         /*
2564          * IVB workaround: must disable low power watermarks for at least
2565          * one frame before enabling scaling.  LP watermarks can be re-enabled
2566          * when scaling is disabled.
2567          *
2568          * WaCxSRDisabledForSpriteScaling:ivb
2569          */
2570         if (old_crtc_state->hw.active &&
2571             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2572                 intel_wait_for_vblank(dev_priv, pipe);
2573
2574         /*
2575          * If we're doing a modeset we don't need to do any
2576          * pre-vblank watermark programming here.
2577          */
2578         if (!intel_crtc_needs_modeset(new_crtc_state)) {
2579                 /*
2580                  * For platforms that support atomic watermarks, program the
2581                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2582                  * will be the intermediate values that are safe for both pre- and
2583                  * post- vblank; when vblank happens, the 'active' values will be set
2584                  * to the final 'target' values and we'll do this again to get the
2585                  * optimal watermarks.  For gen9+ platforms, the values we program here
2586                  * will be the final target values which will get automatically latched
2587                  * at vblank time; no further programming will be necessary.
2588                  *
2589                  * If a platform hasn't been transitioned to atomic watermarks yet,
2590                  * we'll continue to update watermarks the old way, if flags tell
2591                  * us to.
2592                  */
2593                 if (!intel_initial_watermarks(state, crtc))
2594                         if (new_crtc_state->update_wm_pre)
2595                                 intel_update_watermarks(dev_priv);
2596         }
2597
2598         /*
2599          * Gen2 reports pipe underruns whenever all planes are disabled.
2600          * So disable underrun reporting before all the planes get disabled.
2601          *
2602          * We do this after .initial_watermarks() so that we have a
2603          * chance of catching underruns with the intermediate watermarks
2604          * vs. the old plane configuration.
2605          */
2606         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2607                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2608
2609         /*
2610          * WA for platforms where async address update enable bit
2611          * is double buffered and only latched at start of vblank.
2612          */
2613         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2614                 intel_crtc_async_flip_disable_wa(state, crtc);
2615 }
2616
2617 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2618                                       struct intel_crtc *crtc)
2619 {
2620         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2621         const struct intel_crtc_state *new_crtc_state =
2622                 intel_atomic_get_new_crtc_state(state, crtc);
2623         unsigned int update_mask = new_crtc_state->update_planes;
2624         const struct intel_plane_state *old_plane_state;
2625         struct intel_plane *plane;
2626         unsigned fb_bits = 0;
2627         int i;
2628
2629         intel_crtc_dpms_overlay_disable(crtc);
2630
2631         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2632                 if (crtc->pipe != plane->pipe ||
2633                     !(update_mask & BIT(plane->id)))
2634                         continue;
2635
2636                 intel_disable_plane(plane, new_crtc_state);
2637
2638                 if (old_plane_state->uapi.visible)
2639                         fb_bits |= plane->frontbuffer_bit;
2640         }
2641
2642         intel_frontbuffer_flip(dev_priv, fb_bits);
2643 }
2644
2645 /*
2646  * intel_connector_primary_encoder - get the primary encoder for a connector
2647  * @connector: connector for which to return the encoder
2648  *
2649  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2650  * all connectors to their encoder, except for DP-MST connectors which have
2651  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2652  * pointed to by as many DP-MST connectors as there are pipes.
2653  */
2654 static struct intel_encoder *
2655 intel_connector_primary_encoder(struct intel_connector *connector)
2656 {
2657         struct intel_encoder *encoder;
2658
2659         if (connector->mst_port)
2660                 return &dp_to_dig_port(connector->mst_port)->base;
2661
2662         encoder = intel_attached_encoder(connector);
2663         drm_WARN_ON(connector->base.dev, !encoder);
2664
2665         return encoder;
2666 }
2667
2668 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2669 {
2670         struct drm_connector_state *new_conn_state;
2671         struct drm_connector *connector;
2672         int i;
2673
2674         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2675                                         i) {
2676                 struct intel_connector *intel_connector;
2677                 struct intel_encoder *encoder;
2678                 struct intel_crtc *crtc;
2679
2680                 if (!intel_connector_needs_modeset(state, connector))
2681                         continue;
2682
2683                 intel_connector = to_intel_connector(connector);
2684                 encoder = intel_connector_primary_encoder(intel_connector);
2685                 if (!encoder->update_prepare)
2686                         continue;
2687
2688                 crtc = new_conn_state->crtc ?
2689                         to_intel_crtc(new_conn_state->crtc) : NULL;
2690                 encoder->update_prepare(state, encoder, crtc);
2691         }
2692 }
2693
2694 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2695 {
2696         struct drm_connector_state *new_conn_state;
2697         struct drm_connector *connector;
2698         int i;
2699
2700         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2701                                         i) {
2702                 struct intel_connector *intel_connector;
2703                 struct intel_encoder *encoder;
2704                 struct intel_crtc *crtc;
2705
2706                 if (!intel_connector_needs_modeset(state, connector))
2707                         continue;
2708
2709                 intel_connector = to_intel_connector(connector);
2710                 encoder = intel_connector_primary_encoder(intel_connector);
2711                 if (!encoder->update_complete)
2712                         continue;
2713
2714                 crtc = new_conn_state->crtc ?
2715                         to_intel_crtc(new_conn_state->crtc) : NULL;
2716                 encoder->update_complete(state, encoder, crtc);
2717         }
2718 }
2719
2720 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2721                                           struct intel_crtc *crtc)
2722 {
2723         const struct intel_crtc_state *crtc_state =
2724                 intel_atomic_get_new_crtc_state(state, crtc);
2725         const struct drm_connector_state *conn_state;
2726         struct drm_connector *conn;
2727         int i;
2728
2729         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2730                 struct intel_encoder *encoder =
2731                         to_intel_encoder(conn_state->best_encoder);
2732
2733                 if (conn_state->crtc != &crtc->base)
2734                         continue;
2735
2736                 if (encoder->pre_pll_enable)
2737                         encoder->pre_pll_enable(state, encoder,
2738                                                 crtc_state, conn_state);
2739         }
2740 }
2741
2742 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2743                                       struct intel_crtc *crtc)
2744 {
2745         const struct intel_crtc_state *crtc_state =
2746                 intel_atomic_get_new_crtc_state(state, crtc);
2747         const struct drm_connector_state *conn_state;
2748         struct drm_connector *conn;
2749         int i;
2750
2751         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2752                 struct intel_encoder *encoder =
2753                         to_intel_encoder(conn_state->best_encoder);
2754
2755                 if (conn_state->crtc != &crtc->base)
2756                         continue;
2757
2758                 if (encoder->pre_enable)
2759                         encoder->pre_enable(state, encoder,
2760                                             crtc_state, conn_state);
2761         }
2762 }
2763
2764 static void intel_encoders_enable(struct intel_atomic_state *state,
2765                                   struct intel_crtc *crtc)
2766 {
2767         const struct intel_crtc_state *crtc_state =
2768                 intel_atomic_get_new_crtc_state(state, crtc);
2769         const struct drm_connector_state *conn_state;
2770         struct drm_connector *conn;
2771         int i;
2772
2773         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2774                 struct intel_encoder *encoder =
2775                         to_intel_encoder(conn_state->best_encoder);
2776
2777                 if (conn_state->crtc != &crtc->base)
2778                         continue;
2779
2780                 if (encoder->enable)
2781                         encoder->enable(state, encoder,
2782                                         crtc_state, conn_state);
2783                 intel_opregion_notify_encoder(encoder, true);
2784         }
2785 }
2786
2787 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
2788                                        struct intel_crtc *crtc)
2789 {
2790         const struct intel_crtc_state *old_crtc_state =
2791                 intel_atomic_get_old_crtc_state(state, crtc);
2792         const struct drm_connector_state *old_conn_state;
2793         struct drm_connector *conn;
2794         int i;
2795
2796         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2797                 struct intel_encoder *encoder =
2798                         to_intel_encoder(old_conn_state->best_encoder);
2799
2800                 if (old_conn_state->crtc != &crtc->base)
2801                         continue;
2802
2803                 if (encoder->pre_disable)
2804                         encoder->pre_disable(state, encoder, old_crtc_state,
2805                                              old_conn_state);
2806         }
2807 }
2808
2809 static void intel_encoders_disable(struct intel_atomic_state *state,
2810                                    struct intel_crtc *crtc)
2811 {
2812         const struct intel_crtc_state *old_crtc_state =
2813                 intel_atomic_get_old_crtc_state(state, crtc);
2814         const struct drm_connector_state *old_conn_state;
2815         struct drm_connector *conn;
2816         int i;
2817
2818         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2819                 struct intel_encoder *encoder =
2820                         to_intel_encoder(old_conn_state->best_encoder);
2821
2822                 if (old_conn_state->crtc != &crtc->base)
2823                         continue;
2824
2825                 intel_opregion_notify_encoder(encoder, false);
2826                 if (encoder->disable)
2827                         encoder->disable(state, encoder,
2828                                          old_crtc_state, old_conn_state);
2829         }
2830 }
2831
2832 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2833                                         struct intel_crtc *crtc)
2834 {
2835         const struct intel_crtc_state *old_crtc_state =
2836                 intel_atomic_get_old_crtc_state(state, crtc);
2837         const struct drm_connector_state *old_conn_state;
2838         struct drm_connector *conn;
2839         int i;
2840
2841         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2842                 struct intel_encoder *encoder =
2843                         to_intel_encoder(old_conn_state->best_encoder);
2844
2845                 if (old_conn_state->crtc != &crtc->base)
2846                         continue;
2847
2848                 if (encoder->post_disable)
2849                         encoder->post_disable(state, encoder,
2850                                               old_crtc_state, old_conn_state);
2851         }
2852 }
2853
2854 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2855                                             struct intel_crtc *crtc)
2856 {
2857         const struct intel_crtc_state *old_crtc_state =
2858                 intel_atomic_get_old_crtc_state(state, crtc);
2859         const struct drm_connector_state *old_conn_state;
2860         struct drm_connector *conn;
2861         int i;
2862
2863         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2864                 struct intel_encoder *encoder =
2865                         to_intel_encoder(old_conn_state->best_encoder);
2866
2867                 if (old_conn_state->crtc != &crtc->base)
2868                         continue;
2869
2870                 if (encoder->post_pll_disable)
2871                         encoder->post_pll_disable(state, encoder,
2872                                                   old_crtc_state, old_conn_state);
2873         }
2874 }
2875
2876 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2877                                        struct intel_crtc *crtc)
2878 {
2879         const struct intel_crtc_state *crtc_state =
2880                 intel_atomic_get_new_crtc_state(state, crtc);
2881         const struct drm_connector_state *conn_state;
2882         struct drm_connector *conn;
2883         int i;
2884
2885         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2886                 struct intel_encoder *encoder =
2887                         to_intel_encoder(conn_state->best_encoder);
2888
2889                 if (conn_state->crtc != &crtc->base)
2890                         continue;
2891
2892                 if (encoder->update_pipe)
2893                         encoder->update_pipe(state, encoder,
2894                                              crtc_state, conn_state);
2895         }
2896 }
2897
2898 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2899 {
2900         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2901         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2902
2903         plane->disable_plane(plane, crtc_state);
2904 }
2905
2906 static void ilk_crtc_enable(struct intel_atomic_state *state,
2907                             struct intel_crtc *crtc)
2908 {
2909         const struct intel_crtc_state *new_crtc_state =
2910                 intel_atomic_get_new_crtc_state(state, crtc);
2911         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2912         enum pipe pipe = crtc->pipe;
2913
2914         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2915                 return;
2916
2917         /*
2918          * Sometimes spurious CPU pipe underruns happen during FDI
2919          * training, at least with VGA+HDMI cloning. Suppress them.
2920          *
2921          * On ILK we get an occasional spurious CPU pipe underruns
2922          * between eDP port A enable and vdd enable. Also PCH port
2923          * enable seems to result in the occasional CPU pipe underrun.
2924          *
2925          * Spurious PCH underruns also occur during PCH enabling.
2926          */
2927         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2928         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2929
2930         if (intel_crtc_has_dp_encoder(new_crtc_state))
2931                 intel_dp_set_m_n(new_crtc_state, M1_N1);
2932
2933         intel_set_transcoder_timings(new_crtc_state);
2934         intel_set_pipe_src_size(new_crtc_state);
2935
2936         if (new_crtc_state->has_pch_encoder)
2937                 intel_cpu_transcoder_set_m_n(new_crtc_state,
2938                                              &new_crtc_state->fdi_m_n, NULL);
2939
2940         ilk_set_pipeconf(new_crtc_state);
2941
2942         crtc->active = true;
2943
2944         intel_encoders_pre_enable(state, crtc);
2945
2946         if (new_crtc_state->has_pch_encoder) {
2947                 /* Note: FDI PLL enabling _must_ be done before we enable the
2948                  * cpu pipes, hence this is separate from all the other fdi/pch
2949                  * enabling. */
2950                 ilk_fdi_pll_enable(new_crtc_state);
2951         } else {
2952                 assert_fdi_tx_disabled(dev_priv, pipe);
2953                 assert_fdi_rx_disabled(dev_priv, pipe);
2954         }
2955
2956         ilk_pfit_enable(new_crtc_state);
2957
2958         /*
2959          * On ILK+ LUT must be loaded before the pipe is running but with
2960          * clocks enabled
2961          */
2962         intel_color_load_luts(new_crtc_state);
2963         intel_color_commit(new_crtc_state);
2964         /* update DSPCNTR to configure gamma for pipe bottom color */
2965         intel_disable_primary_plane(new_crtc_state);
2966
2967         intel_initial_watermarks(state, crtc);
2968         intel_enable_transcoder(new_crtc_state);
2969
2970         if (new_crtc_state->has_pch_encoder)
2971                 ilk_pch_enable(state, new_crtc_state);
2972
2973         intel_crtc_vblank_on(new_crtc_state);
2974
2975         intel_encoders_enable(state, crtc);
2976
2977         if (HAS_PCH_CPT(dev_priv))
2978                 cpt_verify_modeset(dev_priv, pipe);
2979
2980         /*
2981          * Must wait for vblank to avoid spurious PCH FIFO underruns.
2982          * And a second vblank wait is needed at least on ILK with
2983          * some interlaced HDMI modes. Let's do the double wait always
2984          * in case there are more corner cases we don't know about.
2985          */
2986         if (new_crtc_state->has_pch_encoder) {
2987                 intel_wait_for_vblank(dev_priv, pipe);
2988                 intel_wait_for_vblank(dev_priv, pipe);
2989         }
2990         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2991         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2992 }
2993
2994 /* IPS only exists on ULT machines and is tied to pipe A. */
2995 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
2996 {
2997         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
2998 }
2999
3000 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3001                                             enum pipe pipe, bool apply)
3002 {
3003         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3004         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3005
3006         if (apply)
3007                 val |= mask;
3008         else
3009                 val &= ~mask;
3010
3011         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3012 }
3013
3014 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3015 {
3016         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3017         enum pipe pipe = crtc->pipe;
3018         u32 val;
3019
3020         /* Wa_22010947358:adl-p */
3021         if (IS_ALDERLAKE_P(dev_priv))
3022                 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3023         else
3024                 val = MBUS_DBOX_A_CREDIT(2);
3025
3026         if (DISPLAY_VER(dev_priv) >= 12) {
3027                 val |= MBUS_DBOX_BW_CREDIT(2);
3028                 val |= MBUS_DBOX_B_CREDIT(12);
3029         } else {
3030                 val |= MBUS_DBOX_BW_CREDIT(1);
3031                 val |= MBUS_DBOX_B_CREDIT(8);
3032         }
3033
3034         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3035 }
3036
3037 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3038 {
3039         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3040         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3041
3042         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3043                        HSW_LINETIME(crtc_state->linetime) |
3044                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
3045 }
3046
3047 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3048 {
3049         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3050         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3051         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3052         u32 val;
3053
3054         val = intel_de_read(dev_priv, reg);
3055         val &= ~HSW_FRAME_START_DELAY_MASK;
3056         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3057         intel_de_write(dev_priv, reg, val);
3058 }
3059
3060 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3061                                          const struct intel_crtc_state *crtc_state)
3062 {
3063         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3064         struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3065         struct intel_crtc_state *master_crtc_state;
3066         struct drm_connector_state *conn_state;
3067         struct drm_connector *conn;
3068         struct intel_encoder *encoder = NULL;
3069         int i;
3070
3071         if (crtc_state->bigjoiner_slave)
3072                 master = crtc_state->bigjoiner_linked_crtc;
3073
3074         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3075
3076         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3077                 if (conn_state->crtc != &master->base)
3078                         continue;
3079
3080                 encoder = to_intel_encoder(conn_state->best_encoder);
3081                 break;
3082         }
3083
3084         if (!crtc_state->bigjoiner_slave) {
3085                 /* need to enable VDSC, which we skipped in pre-enable */
3086                 intel_dsc_enable(encoder, crtc_state);
3087         } else {
3088                 /*
3089                  * Enable sequence steps 1-7 on bigjoiner master
3090                  */
3091                 intel_encoders_pre_pll_enable(state, master);
3092                 if (master_crtc_state->shared_dpll)
3093                         intel_enable_shared_dpll(master_crtc_state);
3094                 intel_encoders_pre_enable(state, master);
3095
3096                 /* and DSC on slave */
3097                 intel_dsc_enable(NULL, crtc_state);
3098         }
3099
3100         if (DISPLAY_VER(dev_priv) >= 13)
3101                 intel_uncompressed_joiner_enable(crtc_state);
3102 }
3103
3104 static void hsw_crtc_enable(struct intel_atomic_state *state,
3105                             struct intel_crtc *crtc)
3106 {
3107         const struct intel_crtc_state *new_crtc_state =
3108                 intel_atomic_get_new_crtc_state(state, crtc);
3109         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3110         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3111         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3112         bool psl_clkgate_wa;
3113
3114         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3115                 return;
3116
3117         if (!new_crtc_state->bigjoiner) {
3118                 intel_encoders_pre_pll_enable(state, crtc);
3119
3120                 if (new_crtc_state->shared_dpll)
3121                         intel_enable_shared_dpll(new_crtc_state);
3122
3123                 intel_encoders_pre_enable(state, crtc);
3124         } else {
3125                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3126         }
3127
3128         intel_set_pipe_src_size(new_crtc_state);
3129         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3130                 bdw_set_pipemisc(new_crtc_state);
3131
3132         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3133                 intel_set_transcoder_timings(new_crtc_state);
3134
3135                 if (cpu_transcoder != TRANSCODER_EDP)
3136                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3137                                        new_crtc_state->pixel_multiplier - 1);
3138
3139                 if (new_crtc_state->has_pch_encoder)
3140                         intel_cpu_transcoder_set_m_n(new_crtc_state,
3141                                                      &new_crtc_state->fdi_m_n, NULL);
3142
3143                 hsw_set_frame_start_delay(new_crtc_state);
3144
3145                 hsw_set_transconf(new_crtc_state);
3146         }
3147
3148         crtc->active = true;
3149
3150         /* Display WA #1180: WaDisableScalarClockGating: glk */
3151         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3152                 new_crtc_state->pch_pfit.enabled;
3153         if (psl_clkgate_wa)
3154                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3155
3156         if (DISPLAY_VER(dev_priv) >= 9)
3157                 skl_pfit_enable(new_crtc_state);
3158         else
3159                 ilk_pfit_enable(new_crtc_state);
3160
3161         /*
3162          * On ILK+ LUT must be loaded before the pipe is running but with
3163          * clocks enabled
3164          */
3165         intel_color_load_luts(new_crtc_state);
3166         intel_color_commit(new_crtc_state);
3167         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
3168         if (DISPLAY_VER(dev_priv) < 9)
3169                 intel_disable_primary_plane(new_crtc_state);
3170
3171         hsw_set_linetime_wm(new_crtc_state);
3172
3173         if (DISPLAY_VER(dev_priv) >= 11)
3174                 icl_set_pipe_chicken(new_crtc_state);
3175
3176         intel_initial_watermarks(state, crtc);
3177
3178         if (DISPLAY_VER(dev_priv) >= 11) {
3179                 const struct intel_dbuf_state *dbuf_state =
3180                                 intel_atomic_get_new_dbuf_state(state);
3181
3182                 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3183         }
3184
3185         if (new_crtc_state->bigjoiner_slave)
3186                 intel_crtc_vblank_on(new_crtc_state);
3187
3188         intel_encoders_enable(state, crtc);
3189
3190         if (psl_clkgate_wa) {
3191                 intel_wait_for_vblank(dev_priv, pipe);
3192                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3193         }
3194
3195         /* If we change the relative order between pipe/planes enabling, we need
3196          * to change the workaround. */
3197         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3198         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3199                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3200                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3201         }
3202 }
3203
3204 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3205 {
3206         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3207         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3208         enum pipe pipe = crtc->pipe;
3209
3210         /* To avoid upsetting the power well on haswell only disable the pfit if
3211          * it's in use. The hw state code will make sure we get this right. */
3212         if (!old_crtc_state->pch_pfit.enabled)
3213                 return;
3214
3215         intel_de_write(dev_priv, PF_CTL(pipe), 0);
3216         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3217         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3218 }
3219
3220 static void ilk_crtc_disable(struct intel_atomic_state *state,
3221                              struct intel_crtc *crtc)
3222 {
3223         const struct intel_crtc_state *old_crtc_state =
3224                 intel_atomic_get_old_crtc_state(state, crtc);
3225         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3226         enum pipe pipe = crtc->pipe;
3227
3228         /*
3229          * Sometimes spurious CPU pipe underruns happen when the
3230          * pipe is already disabled, but FDI RX/TX is still enabled.
3231          * Happens at least with VGA+HDMI cloning. Suppress them.
3232          */
3233         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3234         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3235
3236         intel_encoders_disable(state, crtc);
3237
3238         intel_crtc_vblank_off(old_crtc_state);
3239
3240         intel_disable_transcoder(old_crtc_state);
3241
3242         ilk_pfit_disable(old_crtc_state);
3243
3244         if (old_crtc_state->has_pch_encoder)
3245                 ilk_fdi_disable(crtc);
3246
3247         intel_encoders_post_disable(state, crtc);
3248
3249         if (old_crtc_state->has_pch_encoder) {
3250                 ilk_disable_pch_transcoder(dev_priv, pipe);
3251
3252                 if (HAS_PCH_CPT(dev_priv)) {
3253                         i915_reg_t reg;
3254                         u32 temp;
3255
3256                         /* disable TRANS_DP_CTL */
3257                         reg = TRANS_DP_CTL(pipe);
3258                         temp = intel_de_read(dev_priv, reg);
3259                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3260                                   TRANS_DP_PORT_SEL_MASK);
3261                         temp |= TRANS_DP_PORT_SEL_NONE;
3262                         intel_de_write(dev_priv, reg, temp);
3263
3264                         /* disable DPLL_SEL */
3265                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3266                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3267                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3268                 }
3269
3270                 ilk_fdi_pll_disable(crtc);
3271         }
3272
3273         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3274         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3275 }
3276
3277 static void hsw_crtc_disable(struct intel_atomic_state *state,
3278                              struct intel_crtc *crtc)
3279 {
3280         /*
3281          * FIXME collapse everything to one hook.
3282          * Need care with mst->ddi interactions.
3283          */
3284         intel_encoders_disable(state, crtc);
3285         intel_encoders_post_disable(state, crtc);
3286 }
3287
3288 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3289 {
3290         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3291         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3292
3293         if (!crtc_state->gmch_pfit.control)
3294                 return;
3295
3296         /*
3297          * The panel fitter should only be adjusted whilst the pipe is disabled,
3298          * according to register description and PRM.
3299          */
3300         drm_WARN_ON(&dev_priv->drm,
3301                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3302         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
3303
3304         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3305                        crtc_state->gmch_pfit.pgm_ratios);
3306         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3307
3308         /* Border color in case we don't scale up to the full screen. Black by
3309          * default, change to something else for debugging. */
3310         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3311 }
3312
3313 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3314 {
3315         if (phy == PHY_NONE)
3316                 return false;
3317         else if (IS_DG2(dev_priv))
3318                 /*
3319                  * DG2 outputs labelled as "combo PHY" in the bspec use
3320                  * SNPS PHYs with completely different programming,
3321                  * hence we always return false here.
3322                  */
3323                 return false;
3324         else if (IS_ALDERLAKE_S(dev_priv))
3325                 return phy <= PHY_E;
3326         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3327                 return phy <= PHY_D;
3328         else if (IS_JSL_EHL(dev_priv))
3329                 return phy <= PHY_C;
3330         else if (DISPLAY_VER(dev_priv) >= 11)
3331                 return phy <= PHY_B;
3332         else
3333                 return false;
3334 }
3335
3336 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3337 {
3338         if (IS_DG2(dev_priv))
3339                 /* DG2's "TC1" output uses a SNPS PHY */
3340                 return false;
3341         else if (IS_ALDERLAKE_P(dev_priv))
3342                 return phy >= PHY_F && phy <= PHY_I;
3343         else if (IS_TIGERLAKE(dev_priv))
3344                 return phy >= PHY_D && phy <= PHY_I;
3345         else if (IS_ICELAKE(dev_priv))
3346                 return phy >= PHY_C && phy <= PHY_F;
3347         else
3348                 return false;
3349 }
3350
3351 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3352 {
3353         if (phy == PHY_NONE)
3354                 return false;
3355         else if (IS_DG2(dev_priv))
3356                 /*
3357                  * All four "combo" ports and the TC1 port (PHY E) use
3358                  * Synopsis PHYs.
3359                  */
3360                 return phy <= PHY_E;
3361
3362         return false;
3363 }
3364
3365 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3366 {
3367         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3368                 return PHY_D + port - PORT_D_XELPD;
3369         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3370                 return PHY_F + port - PORT_TC1;
3371         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3372                 return PHY_B + port - PORT_TC1;
3373         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3374                 return PHY_C + port - PORT_TC1;
3375         else if (IS_JSL_EHL(i915) && port == PORT_D)
3376                 return PHY_A;
3377
3378         return PHY_A + port - PORT_A;
3379 }
3380
3381 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3382 {
3383         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3384                 return TC_PORT_NONE;
3385
3386         if (DISPLAY_VER(dev_priv) >= 12)
3387                 return TC_PORT_1 + port - PORT_TC1;
3388         else
3389                 return TC_PORT_1 + port - PORT_C;
3390 }
3391
3392 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3393 {
3394         switch (port) {
3395         case PORT_A:
3396                 return POWER_DOMAIN_PORT_DDI_A_LANES;
3397         case PORT_B:
3398                 return POWER_DOMAIN_PORT_DDI_B_LANES;
3399         case PORT_C:
3400                 return POWER_DOMAIN_PORT_DDI_C_LANES;
3401         case PORT_D:
3402                 return POWER_DOMAIN_PORT_DDI_D_LANES;
3403         case PORT_E:
3404                 return POWER_DOMAIN_PORT_DDI_E_LANES;
3405         case PORT_F:
3406                 return POWER_DOMAIN_PORT_DDI_F_LANES;
3407         case PORT_G:
3408                 return POWER_DOMAIN_PORT_DDI_G_LANES;
3409         case PORT_H:
3410                 return POWER_DOMAIN_PORT_DDI_H_LANES;
3411         case PORT_I:
3412                 return POWER_DOMAIN_PORT_DDI_I_LANES;
3413         default:
3414                 MISSING_CASE(port);
3415                 return POWER_DOMAIN_PORT_OTHER;
3416         }
3417 }
3418
3419 enum intel_display_power_domain
3420 intel_aux_power_domain(struct intel_digital_port *dig_port)
3421 {
3422         if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
3423                 switch (dig_port->aux_ch) {
3424                 case AUX_CH_C:
3425                         return POWER_DOMAIN_AUX_C_TBT;
3426                 case AUX_CH_D:
3427                         return POWER_DOMAIN_AUX_D_TBT;
3428                 case AUX_CH_E:
3429                         return POWER_DOMAIN_AUX_E_TBT;
3430                 case AUX_CH_F:
3431                         return POWER_DOMAIN_AUX_F_TBT;
3432                 case AUX_CH_G:
3433                         return POWER_DOMAIN_AUX_G_TBT;
3434                 case AUX_CH_H:
3435                         return POWER_DOMAIN_AUX_H_TBT;
3436                 case AUX_CH_I:
3437                         return POWER_DOMAIN_AUX_I_TBT;
3438                 default:
3439                         MISSING_CASE(dig_port->aux_ch);
3440                         return POWER_DOMAIN_AUX_C_TBT;
3441                 }
3442         }
3443
3444         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3445 }
3446
3447 /*
3448  * Converts aux_ch to power_domain without caring about TBT ports for that use
3449  * intel_aux_power_domain()
3450  */
3451 enum intel_display_power_domain
3452 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3453 {
3454         switch (aux_ch) {
3455         case AUX_CH_A:
3456                 return POWER_DOMAIN_AUX_A;
3457         case AUX_CH_B:
3458                 return POWER_DOMAIN_AUX_B;
3459         case AUX_CH_C:
3460                 return POWER_DOMAIN_AUX_C;
3461         case AUX_CH_D:
3462                 return POWER_DOMAIN_AUX_D;
3463         case AUX_CH_E:
3464                 return POWER_DOMAIN_AUX_E;
3465         case AUX_CH_F:
3466                 return POWER_DOMAIN_AUX_F;
3467         case AUX_CH_G:
3468                 return POWER_DOMAIN_AUX_G;
3469         case AUX_CH_H:
3470                 return POWER_DOMAIN_AUX_H;
3471         case AUX_CH_I:
3472                 return POWER_DOMAIN_AUX_I;
3473         default:
3474                 MISSING_CASE(aux_ch);
3475                 return POWER_DOMAIN_AUX_A;
3476         }
3477 }
3478
3479 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3480 {
3481         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3483         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3484         struct drm_encoder *encoder;
3485         enum pipe pipe = crtc->pipe;
3486         u64 mask;
3487
3488         if (!crtc_state->hw.active)
3489                 return 0;
3490
3491         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3492         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
3493         if (crtc_state->pch_pfit.enabled ||
3494             crtc_state->pch_pfit.force_thru)
3495                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3496
3497         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3498                                   crtc_state->uapi.encoder_mask) {
3499                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3500
3501                 mask |= BIT_ULL(intel_encoder->power_domain);
3502         }
3503
3504         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3505                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3506
3507         if (crtc_state->shared_dpll)
3508                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3509
3510         if (crtc_state->dsc.compression_enable)
3511                 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
3512
3513         return mask;
3514 }
3515
3516 static u64
3517 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3518 {
3519         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3520         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3521         enum intel_display_power_domain domain;
3522         u64 domains, new_domains, old_domains;
3523
3524         domains = get_crtc_power_domains(crtc_state);
3525
3526         new_domains = domains & ~crtc->enabled_power_domains.mask;
3527         old_domains = crtc->enabled_power_domains.mask & ~domains;
3528
3529         for_each_power_domain(domain, new_domains)
3530                 intel_display_power_get_in_set(dev_priv,
3531                                                &crtc->enabled_power_domains,
3532                                                domain);
3533
3534         return old_domains;
3535 }
3536
3537 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3538                                            u64 domains)
3539 {
3540         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3541                                             &crtc->enabled_power_domains,
3542                                             domains);
3543 }
3544
3545 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3546                                    struct intel_crtc *crtc)
3547 {
3548         const struct intel_crtc_state *new_crtc_state =
3549                 intel_atomic_get_new_crtc_state(state, crtc);
3550         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3551         enum pipe pipe = crtc->pipe;
3552
3553         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3554                 return;
3555
3556         if (intel_crtc_has_dp_encoder(new_crtc_state))
3557                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3558
3559         intel_set_transcoder_timings(new_crtc_state);
3560         intel_set_pipe_src_size(new_crtc_state);
3561
3562         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3563                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3564                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3565         }
3566
3567         i9xx_set_pipeconf(new_crtc_state);
3568
3569         crtc->active = true;
3570
3571         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3572
3573         intel_encoders_pre_pll_enable(state, crtc);
3574
3575         if (IS_CHERRYVIEW(dev_priv))
3576                 chv_enable_pll(new_crtc_state);
3577         else
3578                 vlv_enable_pll(new_crtc_state);
3579
3580         intel_encoders_pre_enable(state, crtc);
3581
3582         i9xx_pfit_enable(new_crtc_state);
3583
3584         intel_color_load_luts(new_crtc_state);
3585         intel_color_commit(new_crtc_state);
3586         /* update DSPCNTR to configure gamma for pipe bottom color */
3587         intel_disable_primary_plane(new_crtc_state);
3588
3589         intel_initial_watermarks(state, crtc);
3590         intel_enable_transcoder(new_crtc_state);
3591
3592         intel_crtc_vblank_on(new_crtc_state);
3593
3594         intel_encoders_enable(state, crtc);
3595 }
3596
3597 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3598                              struct intel_crtc *crtc)
3599 {
3600         const struct intel_crtc_state *new_crtc_state =
3601                 intel_atomic_get_new_crtc_state(state, crtc);
3602         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3603         enum pipe pipe = crtc->pipe;
3604
3605         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3606                 return;
3607
3608         if (intel_crtc_has_dp_encoder(new_crtc_state))
3609                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3610
3611         intel_set_transcoder_timings(new_crtc_state);
3612         intel_set_pipe_src_size(new_crtc_state);
3613
3614         i9xx_set_pipeconf(new_crtc_state);
3615
3616         crtc->active = true;
3617
3618         if (DISPLAY_VER(dev_priv) != 2)
3619                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3620
3621         intel_encoders_pre_enable(state, crtc);
3622
3623         i9xx_enable_pll(new_crtc_state);
3624
3625         i9xx_pfit_enable(new_crtc_state);
3626
3627         intel_color_load_luts(new_crtc_state);
3628         intel_color_commit(new_crtc_state);
3629         /* update DSPCNTR to configure gamma for pipe bottom color */
3630         intel_disable_primary_plane(new_crtc_state);
3631
3632         if (!intel_initial_watermarks(state, crtc))
3633                 intel_update_watermarks(dev_priv);
3634         intel_enable_transcoder(new_crtc_state);
3635
3636         intel_crtc_vblank_on(new_crtc_state);
3637
3638         intel_encoders_enable(state, crtc);
3639
3640         /* prevents spurious underruns */
3641         if (DISPLAY_VER(dev_priv) == 2)
3642                 intel_wait_for_vblank(dev_priv, pipe);
3643 }
3644
3645 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3646 {
3647         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3648         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3649
3650         if (!old_crtc_state->gmch_pfit.control)
3651                 return;
3652
3653         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3654
3655         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3656                     intel_de_read(dev_priv, PFIT_CONTROL));
3657         intel_de_write(dev_priv, PFIT_CONTROL, 0);
3658 }
3659
3660 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3661                               struct intel_crtc *crtc)
3662 {
3663         struct intel_crtc_state *old_crtc_state =
3664                 intel_atomic_get_old_crtc_state(state, crtc);
3665         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3666         enum pipe pipe = crtc->pipe;
3667
3668         /*
3669          * On gen2 planes are double buffered but the pipe isn't, so we must
3670          * wait for planes to fully turn off before disabling the pipe.
3671          */
3672         if (DISPLAY_VER(dev_priv) == 2)
3673                 intel_wait_for_vblank(dev_priv, pipe);
3674
3675         intel_encoders_disable(state, crtc);
3676
3677         intel_crtc_vblank_off(old_crtc_state);
3678
3679         intel_disable_transcoder(old_crtc_state);
3680
3681         i9xx_pfit_disable(old_crtc_state);
3682
3683         intel_encoders_post_disable(state, crtc);
3684
3685         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3686                 if (IS_CHERRYVIEW(dev_priv))
3687                         chv_disable_pll(dev_priv, pipe);
3688                 else if (IS_VALLEYVIEW(dev_priv))
3689                         vlv_disable_pll(dev_priv, pipe);
3690                 else
3691                         i9xx_disable_pll(old_crtc_state);
3692         }
3693
3694         intel_encoders_post_pll_disable(state, crtc);
3695
3696         if (DISPLAY_VER(dev_priv) != 2)
3697                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3698
3699         if (!dev_priv->wm_disp->initial_watermarks)
3700                 intel_update_watermarks(dev_priv);
3701
3702         /* clock the pipe down to 640x480@60 to potentially save power */
3703         if (IS_I830(dev_priv))
3704                 i830_enable_pipe(dev_priv, pipe);
3705 }
3706
3707 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3708                                         struct drm_modeset_acquire_ctx *ctx)
3709 {
3710         struct intel_encoder *encoder;
3711         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3712         struct intel_bw_state *bw_state =
3713                 to_intel_bw_state(dev_priv->bw_obj.state);
3714         struct intel_cdclk_state *cdclk_state =
3715                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3716         struct intel_dbuf_state *dbuf_state =
3717                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3718         struct intel_crtc_state *crtc_state =
3719                 to_intel_crtc_state(crtc->base.state);
3720         struct intel_plane *plane;
3721         struct drm_atomic_state *state;
3722         struct intel_crtc_state *temp_crtc_state;
3723         enum pipe pipe = crtc->pipe;
3724         int ret;
3725
3726         if (!crtc_state->hw.active)
3727                 return;
3728
3729         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3730                 const struct intel_plane_state *plane_state =
3731                         to_intel_plane_state(plane->base.state);
3732
3733                 if (plane_state->uapi.visible)
3734                         intel_plane_disable_noatomic(crtc, plane);
3735         }
3736
3737         state = drm_atomic_state_alloc(&dev_priv->drm);
3738         if (!state) {
3739                 drm_dbg_kms(&dev_priv->drm,
3740                             "failed to disable [CRTC:%d:%s], out of memory",
3741                             crtc->base.base.id, crtc->base.name);
3742                 return;
3743         }
3744
3745         state->acquire_ctx = ctx;
3746
3747         /* Everything's already locked, -EDEADLK can't happen. */
3748         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3749         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3750
3751         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3752
3753         dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
3754
3755         drm_atomic_state_put(state);
3756
3757         drm_dbg_kms(&dev_priv->drm,
3758                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3759                     crtc->base.base.id, crtc->base.name);
3760
3761         crtc->active = false;
3762         crtc->base.enabled = false;
3763
3764         drm_WARN_ON(&dev_priv->drm,
3765                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3766         crtc_state->uapi.active = false;
3767         crtc_state->uapi.connector_mask = 0;
3768         crtc_state->uapi.encoder_mask = 0;
3769         intel_crtc_free_hw_state(crtc_state);
3770         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3771
3772         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3773                 encoder->base.crtc = NULL;
3774
3775         intel_fbc_disable(crtc);
3776         intel_update_watermarks(dev_priv);
3777         intel_disable_shared_dpll(crtc_state);
3778
3779         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3780
3781         cdclk_state->min_cdclk[pipe] = 0;
3782         cdclk_state->min_voltage_level[pipe] = 0;
3783         cdclk_state->active_pipes &= ~BIT(pipe);
3784
3785         dbuf_state->active_pipes &= ~BIT(pipe);
3786
3787         bw_state->data_rate[pipe] = 0;
3788         bw_state->num_active_planes[pipe] = 0;
3789 }
3790
3791 /*
3792  * turn all crtc's off, but do not adjust state
3793  * This has to be paired with a call to intel_modeset_setup_hw_state.
3794  */
3795 int intel_display_suspend(struct drm_device *dev)
3796 {
3797         struct drm_i915_private *dev_priv = to_i915(dev);
3798         struct drm_atomic_state *state;
3799         int ret;
3800
3801         if (!HAS_DISPLAY(dev_priv))
3802                 return 0;
3803
3804         state = drm_atomic_helper_suspend(dev);
3805         ret = PTR_ERR_OR_ZERO(state);
3806         if (ret)
3807                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3808                         ret);
3809         else
3810                 dev_priv->modeset_restore_state = state;
3811         return ret;
3812 }
3813
3814 void intel_encoder_destroy(struct drm_encoder *encoder)
3815 {
3816         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3817
3818         drm_encoder_cleanup(encoder);
3819         kfree(intel_encoder);
3820 }
3821
3822 /* Cross check the actual hw state with our own modeset state tracking (and it's
3823  * internal consistency). */
3824 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3825                                          struct drm_connector_state *conn_state)
3826 {
3827         struct intel_connector *connector = to_intel_connector(conn_state->connector);
3828         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3829
3830         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3831                     connector->base.base.id, connector->base.name);
3832
3833         if (connector->get_hw_state(connector)) {
3834                 struct intel_encoder *encoder = intel_attached_encoder(connector);
3835
3836                 I915_STATE_WARN(!crtc_state,
3837                          "connector enabled without attached crtc\n");
3838
3839                 if (!crtc_state)
3840                         return;
3841
3842                 I915_STATE_WARN(!crtc_state->hw.active,
3843                                 "connector is active, but attached crtc isn't\n");
3844
3845                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3846                         return;
3847
3848                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3849                         "atomic encoder doesn't match attached encoder\n");
3850
3851                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3852                         "attached encoder crtc differs from connector crtc\n");
3853         } else {
3854                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3855                                 "attached crtc is active, but connector isn't\n");
3856                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3857                         "best encoder set without crtc!\n");
3858         }
3859 }
3860
3861 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3862 {
3863         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3864         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3865
3866         /* IPS only exists on ULT machines and is tied to pipe A. */
3867         if (!hsw_crtc_supports_ips(crtc))
3868                 return false;
3869
3870         if (!dev_priv->params.enable_ips)
3871                 return false;
3872
3873         if (crtc_state->pipe_bpp > 24)
3874                 return false;
3875
3876         /*
3877          * We compare against max which means we must take
3878          * the increased cdclk requirement into account when
3879          * calculating the new cdclk.
3880          *
3881          * Should measure whether using a lower cdclk w/o IPS
3882          */
3883         if (IS_BROADWELL(dev_priv) &&
3884             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3885                 return false;
3886
3887         return true;
3888 }
3889
3890 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3891 {
3892         struct drm_i915_private *dev_priv =
3893                 to_i915(crtc_state->uapi.crtc->dev);
3894         struct intel_atomic_state *state =
3895                 to_intel_atomic_state(crtc_state->uapi.state);
3896
3897         crtc_state->ips_enabled = false;
3898
3899         if (!hsw_crtc_state_ips_capable(crtc_state))
3900                 return 0;
3901
3902         /*
3903          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3904          * enabled and disabled dynamically based on package C states,
3905          * user space can't make reliable use of the CRCs, so let's just
3906          * completely disable it.
3907          */
3908         if (crtc_state->crc_enabled)
3909                 return 0;
3910
3911         /* IPS should be fine as long as at least one plane is enabled. */
3912         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3913                 return 0;
3914
3915         if (IS_BROADWELL(dev_priv)) {
3916                 const struct intel_cdclk_state *cdclk_state;
3917
3918                 cdclk_state = intel_atomic_get_cdclk_state(state);
3919                 if (IS_ERR(cdclk_state))
3920                         return PTR_ERR(cdclk_state);
3921
3922                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3923                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3924                         return 0;
3925         }
3926
3927         crtc_state->ips_enabled = true;
3928
3929         return 0;
3930 }
3931
3932 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3933 {
3934         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3935
3936         /* GDG double wide on either pipe, otherwise pipe A only */
3937         return DISPLAY_VER(dev_priv) < 4 &&
3938                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3939 }
3940
3941 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3942 {
3943         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3944         struct drm_rect src;
3945
3946         /*
3947          * We only use IF-ID interlacing. If we ever use
3948          * PF-ID we'll need to adjust the pixel_rate here.
3949          */
3950
3951         if (!crtc_state->pch_pfit.enabled)
3952                 return pixel_rate;
3953
3954         drm_rect_init(&src, 0, 0,
3955                       crtc_state->pipe_src_w << 16,
3956                       crtc_state->pipe_src_h << 16);
3957
3958         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
3959                                    pixel_rate);
3960 }
3961
3962 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
3963                                          const struct drm_display_mode *timings)
3964 {
3965         mode->hdisplay = timings->crtc_hdisplay;
3966         mode->htotal = timings->crtc_htotal;
3967         mode->hsync_start = timings->crtc_hsync_start;
3968         mode->hsync_end = timings->crtc_hsync_end;
3969
3970         mode->vdisplay = timings->crtc_vdisplay;
3971         mode->vtotal = timings->crtc_vtotal;
3972         mode->vsync_start = timings->crtc_vsync_start;
3973         mode->vsync_end = timings->crtc_vsync_end;
3974
3975         mode->flags = timings->flags;
3976         mode->type = DRM_MODE_TYPE_DRIVER;
3977
3978         mode->clock = timings->crtc_clock;
3979
3980         drm_mode_set_name(mode);
3981 }
3982
3983 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
3984 {
3985         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3986
3987         if (HAS_GMCH(dev_priv))
3988                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
3989                 crtc_state->pixel_rate =
3990                         crtc_state->hw.pipe_mode.crtc_clock;
3991         else
3992                 crtc_state->pixel_rate =
3993                         ilk_pipe_pixel_rate(crtc_state);
3994 }
3995
3996 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
3997 {
3998         struct drm_display_mode *mode = &crtc_state->hw.mode;
3999         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4000         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4001
4002         drm_mode_copy(pipe_mode, adjusted_mode);
4003
4004         if (crtc_state->bigjoiner) {
4005                 /*
4006                  * transcoder is programmed to the full mode,
4007                  * but pipe timings are half of the transcoder mode
4008                  */
4009                 pipe_mode->crtc_hdisplay /= 2;
4010                 pipe_mode->crtc_hblank_start /= 2;
4011                 pipe_mode->crtc_hblank_end /= 2;
4012                 pipe_mode->crtc_hsync_start /= 2;
4013                 pipe_mode->crtc_hsync_end /= 2;
4014                 pipe_mode->crtc_htotal /= 2;
4015                 pipe_mode->crtc_clock /= 2;
4016         }
4017
4018         if (crtc_state->splitter.enable) {
4019                 int n = crtc_state->splitter.link_count;
4020                 int overlap = crtc_state->splitter.pixel_overlap;
4021
4022                 /*
4023                  * eDP MSO uses segment timings from EDID for transcoder
4024                  * timings, but full mode for everything else.
4025                  *
4026                  * h_full = (h_segment - pixel_overlap) * link_count
4027                  */
4028                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4029                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4030                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4031                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4032                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4033                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4034                 pipe_mode->crtc_clock *= n;
4035
4036                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4037                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4038         } else {
4039                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4040                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4041         }
4042
4043         intel_crtc_compute_pixel_rate(crtc_state);
4044
4045         drm_mode_copy(mode, adjusted_mode);
4046         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4047         mode->vdisplay = crtc_state->pipe_src_h;
4048 }
4049
4050 static void intel_encoder_get_config(struct intel_encoder *encoder,
4051                                      struct intel_crtc_state *crtc_state)
4052 {
4053         encoder->get_config(encoder, crtc_state);
4054
4055         intel_crtc_readout_derived_state(crtc_state);
4056 }
4057
4058 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4059                                      struct intel_crtc_state *pipe_config)
4060 {
4061         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4062         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4063         int clock_limit = dev_priv->max_dotclk_freq;
4064
4065         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4066
4067         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4068         if (pipe_config->bigjoiner) {
4069                 pipe_mode->crtc_clock /= 2;
4070                 pipe_mode->crtc_hdisplay /= 2;
4071                 pipe_mode->crtc_hblank_start /= 2;
4072                 pipe_mode->crtc_hblank_end /= 2;
4073                 pipe_mode->crtc_hsync_start /= 2;
4074                 pipe_mode->crtc_hsync_end /= 2;
4075                 pipe_mode->crtc_htotal /= 2;
4076                 pipe_config->pipe_src_w /= 2;
4077         }
4078
4079         if (pipe_config->splitter.enable) {
4080                 int n = pipe_config->splitter.link_count;
4081                 int overlap = pipe_config->splitter.pixel_overlap;
4082
4083                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4084                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4085                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4086                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4087                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4088                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4089                 pipe_mode->crtc_clock *= n;
4090         }
4091
4092         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4093
4094         if (DISPLAY_VER(dev_priv) < 4) {
4095                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4096
4097                 /*
4098                  * Enable double wide mode when the dot clock
4099                  * is > 90% of the (display) core speed.
4100                  */
4101                 if (intel_crtc_supports_double_wide(crtc) &&
4102                     pipe_mode->crtc_clock > clock_limit) {
4103                         clock_limit = dev_priv->max_dotclk_freq;
4104                         pipe_config->double_wide = true;
4105                 }
4106         }
4107
4108         if (pipe_mode->crtc_clock > clock_limit) {
4109                 drm_dbg_kms(&dev_priv->drm,
4110                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4111                             pipe_mode->crtc_clock, clock_limit,
4112                             yesno(pipe_config->double_wide));
4113                 return -EINVAL;
4114         }
4115
4116         /*
4117          * Pipe horizontal size must be even in:
4118          * - DVO ganged mode
4119          * - LVDS dual channel mode
4120          * - Double wide pipe
4121          */
4122         if (pipe_config->pipe_src_w & 1) {
4123                 if (pipe_config->double_wide) {
4124                         drm_dbg_kms(&dev_priv->drm,
4125                                     "Odd pipe source width not supported with double wide pipe\n");
4126                         return -EINVAL;
4127                 }
4128
4129                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4130                     intel_is_dual_link_lvds(dev_priv)) {
4131                         drm_dbg_kms(&dev_priv->drm,
4132                                     "Odd pipe source width not supported with dual link LVDS\n");
4133                         return -EINVAL;
4134                 }
4135         }
4136
4137         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4138          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4139          */
4140         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4141             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4142                 return -EINVAL;
4143
4144         intel_crtc_compute_pixel_rate(pipe_config);
4145
4146         if (pipe_config->has_pch_encoder)
4147                 return ilk_fdi_compute_config(crtc, pipe_config);
4148
4149         return 0;
4150 }
4151
4152 static void
4153 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4154 {
4155         while (*num > DATA_LINK_M_N_MASK ||
4156                *den > DATA_LINK_M_N_MASK) {
4157                 *num >>= 1;
4158                 *den >>= 1;
4159         }
4160 }
4161
4162 static void compute_m_n(unsigned int m, unsigned int n,
4163                         u32 *ret_m, u32 *ret_n,
4164                         bool constant_n)
4165 {
4166         /*
4167          * Several DP dongles in particular seem to be fussy about
4168          * too large link M/N values. Give N value as 0x8000 that
4169          * should be acceptable by specific devices. 0x8000 is the
4170          * specified fixed N value for asynchronous clock mode,
4171          * which the devices expect also in synchronous clock mode.
4172          */
4173         if (constant_n)
4174                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4175         else
4176                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4177
4178         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4179         intel_reduce_m_n_ratio(ret_m, ret_n);
4180 }
4181
4182 void
4183 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4184                        int pixel_clock, int link_clock,
4185                        struct intel_link_m_n *m_n,
4186                        bool constant_n, bool fec_enable)
4187 {
4188         u32 data_clock = bits_per_pixel * pixel_clock;
4189
4190         if (fec_enable)
4191                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
4192
4193         m_n->tu = 64;
4194         compute_m_n(data_clock,
4195                     link_clock * nlanes * 8,
4196                     &m_n->gmch_m, &m_n->gmch_n,
4197                     constant_n);
4198
4199         compute_m_n(pixel_clock, link_clock,
4200                     &m_n->link_m, &m_n->link_n,
4201                     constant_n);
4202 }
4203
4204 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4205 {
4206         /*
4207          * There may be no VBT; and if the BIOS enabled SSC we can
4208          * just keep using it to avoid unnecessary flicker.  Whereas if the
4209          * BIOS isn't using it, don't assume it will work even if the VBT
4210          * indicates as much.
4211          */
4212         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4213                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4214                                                        PCH_DREF_CONTROL) &
4215                         DREF_SSC1_ENABLE;
4216
4217                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4218                         drm_dbg_kms(&dev_priv->drm,
4219                                     "SSC %s by BIOS, overriding VBT which says %s\n",
4220                                     enableddisabled(bios_lvds_use_ssc),
4221                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
4222                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4223                 }
4224         }
4225 }
4226
4227 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4228                                          const struct intel_link_m_n *m_n)
4229 {
4230         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4231         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4232         enum pipe pipe = crtc->pipe;
4233
4234         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4235                        TU_SIZE(m_n->tu) | m_n->gmch_m);
4236         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4237         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4238         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4239 }
4240
4241 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4242                                  enum transcoder transcoder)
4243 {
4244         if (IS_HASWELL(dev_priv))
4245                 return transcoder == TRANSCODER_EDP;
4246
4247         /*
4248          * Strictly speaking some registers are available before
4249          * gen7, but we only support DRRS on gen7+
4250          */
4251         return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4252 }
4253
4254 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4255                                          const struct intel_link_m_n *m_n,
4256                                          const struct intel_link_m_n *m2_n2)
4257 {
4258         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4259         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4260         enum pipe pipe = crtc->pipe;
4261         enum transcoder transcoder = crtc_state->cpu_transcoder;
4262
4263         if (DISPLAY_VER(dev_priv) >= 5) {
4264                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4265                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4266                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4267                                m_n->gmch_n);
4268                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4269                                m_n->link_m);
4270                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4271                                m_n->link_n);
4272                 /*
4273                  *  M2_N2 registers are set only if DRRS is supported
4274                  * (to make sure the registers are not unnecessarily accessed).
4275                  */
4276                 if (m2_n2 && crtc_state->has_drrs &&
4277                     transcoder_has_m2_n2(dev_priv, transcoder)) {
4278                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4279                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4280                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4281                                        m2_n2->gmch_n);
4282                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4283                                        m2_n2->link_m);
4284                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4285                                        m2_n2->link_n);
4286                 }
4287         } else {
4288                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4289                                TU_SIZE(m_n->tu) | m_n->gmch_m);
4290                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4291                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4292                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4293         }
4294 }
4295
4296 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4297 {
4298         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4299         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4300
4301         if (m_n == M1_N1) {
4302                 dp_m_n = &crtc_state->dp_m_n;
4303                 dp_m2_n2 = &crtc_state->dp_m2_n2;
4304         } else if (m_n == M2_N2) {
4305
4306                 /*
4307                  * M2_N2 registers are not supported. Hence m2_n2 divider value
4308                  * needs to be programmed into M1_N1.
4309                  */
4310                 dp_m_n = &crtc_state->dp_m2_n2;
4311         } else {
4312                 drm_err(&i915->drm, "Unsupported divider value\n");
4313                 return;
4314         }
4315
4316         if (crtc_state->has_pch_encoder)
4317                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4318         else
4319                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4320 }
4321
4322 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4323 {
4324         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4325         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4326         enum pipe pipe = crtc->pipe;
4327         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4328         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4329         u32 crtc_vtotal, crtc_vblank_end;
4330         int vsyncshift = 0;
4331
4332         /* We need to be careful not to changed the adjusted mode, for otherwise
4333          * the hw state checker will get angry at the mismatch. */
4334         crtc_vtotal = adjusted_mode->crtc_vtotal;
4335         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4336
4337         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4338                 /* the chip adds 2 halflines automatically */
4339                 crtc_vtotal -= 1;
4340                 crtc_vblank_end -= 1;
4341
4342                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4343                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4344                 else
4345                         vsyncshift = adjusted_mode->crtc_hsync_start -
4346                                 adjusted_mode->crtc_htotal / 2;
4347                 if (vsyncshift < 0)
4348                         vsyncshift += adjusted_mode->crtc_htotal;
4349         }
4350
4351         if (DISPLAY_VER(dev_priv) > 3)
4352                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4353                                vsyncshift);
4354
4355         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4356                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4357         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4358                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4359         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4360                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4361
4362         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4363                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4364         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4365                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4366         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4367                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4368
4369         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4370          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4371          * documented on the DDI_FUNC_CTL register description, EDP Input Select
4372          * bits. */
4373         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4374             (pipe == PIPE_B || pipe == PIPE_C))
4375                 intel_de_write(dev_priv, VTOTAL(pipe),
4376                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4377
4378 }
4379
4380 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4381 {
4382         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4383         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4384         enum pipe pipe = crtc->pipe;
4385
4386         /* pipesrc controls the size that is scaled from, which should
4387          * always be the user's requested size.
4388          */
4389         intel_de_write(dev_priv, PIPESRC(pipe),
4390                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4391 }
4392
4393 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4394 {
4395         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4396         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4397
4398         if (DISPLAY_VER(dev_priv) == 2)
4399                 return false;
4400
4401         if (DISPLAY_VER(dev_priv) >= 9 ||
4402             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4403                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4404         else
4405                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4406 }
4407
4408 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4409                                          struct intel_crtc_state *pipe_config)
4410 {
4411         struct drm_device *dev = crtc->base.dev;
4412         struct drm_i915_private *dev_priv = to_i915(dev);
4413         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4414         u32 tmp;
4415
4416         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4417         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4418         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4419
4420         if (!transcoder_is_dsi(cpu_transcoder)) {
4421                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4422                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
4423                                                         (tmp & 0xffff) + 1;
4424                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
4425                                                 ((tmp >> 16) & 0xffff) + 1;
4426         }
4427         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4428         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4429         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4430
4431         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4432         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4433         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4434
4435         if (!transcoder_is_dsi(cpu_transcoder)) {
4436                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4437                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
4438                                                         (tmp & 0xffff) + 1;
4439                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
4440                                                 ((tmp >> 16) & 0xffff) + 1;
4441         }
4442         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4443         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4444         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4445
4446         if (intel_pipe_is_interlaced(pipe_config)) {
4447                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4448                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4449                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4450         }
4451 }
4452
4453 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4454                                     struct intel_crtc_state *pipe_config)
4455 {
4456         struct drm_device *dev = crtc->base.dev;
4457         struct drm_i915_private *dev_priv = to_i915(dev);
4458         u32 tmp;
4459
4460         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4461         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4462         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4463 }
4464
4465 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4466 {
4467         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4468         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4469         u32 pipeconf;
4470
4471         pipeconf = 0;
4472
4473         /* we keep both pipes enabled on 830 */
4474         if (IS_I830(dev_priv))
4475                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4476
4477         if (crtc_state->double_wide)
4478                 pipeconf |= PIPECONF_DOUBLE_WIDE;
4479
4480         /* only g4x and later have fancy bpc/dither controls */
4481         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4482             IS_CHERRYVIEW(dev_priv)) {
4483                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4484                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4485                         pipeconf |= PIPECONF_DITHER_EN |
4486                                     PIPECONF_DITHER_TYPE_SP;
4487
4488                 switch (crtc_state->pipe_bpp) {
4489                 case 18:
4490                         pipeconf |= PIPECONF_6BPC;
4491                         break;
4492                 case 24:
4493                         pipeconf |= PIPECONF_8BPC;
4494                         break;
4495                 case 30:
4496                         pipeconf |= PIPECONF_10BPC;
4497                         break;
4498                 default:
4499                         /* Case prevented by intel_choose_pipe_bpp_dither. */
4500                         BUG();
4501                 }
4502         }
4503
4504         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4505                 if (DISPLAY_VER(dev_priv) < 4 ||
4506                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4507                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4508                 else
4509                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4510         } else {
4511                 pipeconf |= PIPECONF_PROGRESSIVE;
4512         }
4513
4514         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4515              crtc_state->limited_color_range)
4516                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4517
4518         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4519
4520         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4521
4522         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4523         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4524 }
4525
4526 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4527 {
4528         if (IS_I830(dev_priv))
4529                 return false;
4530
4531         return DISPLAY_VER(dev_priv) >= 4 ||
4532                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4533 }
4534
4535 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4536 {
4537         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4538         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4539         u32 tmp;
4540
4541         if (!i9xx_has_pfit(dev_priv))
4542                 return;
4543
4544         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4545         if (!(tmp & PFIT_ENABLE))
4546                 return;
4547
4548         /* Check whether the pfit is attached to our pipe. */
4549         if (DISPLAY_VER(dev_priv) < 4) {
4550                 if (crtc->pipe != PIPE_B)
4551                         return;
4552         } else {
4553                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4554                         return;
4555         }
4556
4557         crtc_state->gmch_pfit.control = tmp;
4558         crtc_state->gmch_pfit.pgm_ratios =
4559                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4560 }
4561
4562 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4563                                struct intel_crtc_state *pipe_config)
4564 {
4565         struct drm_device *dev = crtc->base.dev;
4566         struct drm_i915_private *dev_priv = to_i915(dev);
4567         enum pipe pipe = crtc->pipe;
4568         struct dpll clock;
4569         u32 mdiv;
4570         int refclk = 100000;
4571
4572         /* In case of DSI, DPLL will not be used */
4573         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4574                 return;
4575
4576         vlv_dpio_get(dev_priv);
4577         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4578         vlv_dpio_put(dev_priv);
4579
4580         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4581         clock.m2 = mdiv & DPIO_M2DIV_MASK;
4582         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4583         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4584         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4585
4586         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4587 }
4588
4589 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4590                                struct intel_crtc_state *pipe_config)
4591 {
4592         struct drm_device *dev = crtc->base.dev;
4593         struct drm_i915_private *dev_priv = to_i915(dev);
4594         enum pipe pipe = crtc->pipe;
4595         enum dpio_channel port = vlv_pipe_to_channel(pipe);
4596         struct dpll clock;
4597         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4598         int refclk = 100000;
4599
4600         /* In case of DSI, DPLL will not be used */
4601         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4602                 return;
4603
4604         vlv_dpio_get(dev_priv);
4605         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4606         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4607         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4608         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4609         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4610         vlv_dpio_put(dev_priv);
4611
4612         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4613         clock.m2 = (pll_dw0 & 0xff) << 22;
4614         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4615                 clock.m2 |= pll_dw2 & 0x3fffff;
4616         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4617         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4618         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4619
4620         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4621 }
4622
4623 static enum intel_output_format
4624 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4625 {
4626         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4627         u32 tmp;
4628
4629         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4630
4631         if (tmp & PIPEMISC_YUV420_ENABLE) {
4632                 /* We support 4:2:0 in full blend mode only */
4633                 drm_WARN_ON(&dev_priv->drm,
4634                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4635
4636                 return INTEL_OUTPUT_FORMAT_YCBCR420;
4637         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4638                 return INTEL_OUTPUT_FORMAT_YCBCR444;
4639         } else {
4640                 return INTEL_OUTPUT_FORMAT_RGB;
4641         }
4642 }
4643
4644 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4645 {
4646         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4647         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4648         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4649         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4650         u32 tmp;
4651
4652         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4653
4654         if (tmp & DISPPLANE_GAMMA_ENABLE)
4655                 crtc_state->gamma_enable = true;
4656
4657         if (!HAS_GMCH(dev_priv) &&
4658             tmp & DISPPLANE_PIPE_CSC_ENABLE)
4659                 crtc_state->csc_enable = true;
4660 }
4661
4662 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4663                                  struct intel_crtc_state *pipe_config)
4664 {
4665         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4666         enum intel_display_power_domain power_domain;
4667         intel_wakeref_t wakeref;
4668         u32 tmp;
4669         bool ret;
4670
4671         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4672         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4673         if (!wakeref)
4674                 return false;
4675
4676         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4677         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4678         pipe_config->shared_dpll = NULL;
4679
4680         ret = false;
4681
4682         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4683         if (!(tmp & PIPECONF_ENABLE))
4684                 goto out;
4685
4686         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4687             IS_CHERRYVIEW(dev_priv)) {
4688                 switch (tmp & PIPECONF_BPC_MASK) {
4689                 case PIPECONF_6BPC:
4690                         pipe_config->pipe_bpp = 18;
4691                         break;
4692                 case PIPECONF_8BPC:
4693                         pipe_config->pipe_bpp = 24;
4694                         break;
4695                 case PIPECONF_10BPC:
4696                         pipe_config->pipe_bpp = 30;
4697                         break;
4698                 default:
4699                         break;
4700                 }
4701         }
4702
4703         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4704             (tmp & PIPECONF_COLOR_RANGE_SELECT))
4705                 pipe_config->limited_color_range = true;
4706
4707         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4708                 PIPECONF_GAMMA_MODE_SHIFT;
4709
4710         if (IS_CHERRYVIEW(dev_priv))
4711                 pipe_config->cgm_mode = intel_de_read(dev_priv,
4712                                                       CGM_PIPE_MODE(crtc->pipe));
4713
4714         i9xx_get_pipe_color_config(pipe_config);
4715         intel_color_get_config(pipe_config);
4716
4717         if (DISPLAY_VER(dev_priv) < 4)
4718                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4719
4720         intel_get_transcoder_timings(crtc, pipe_config);
4721         intel_get_pipe_src_size(crtc, pipe_config);
4722
4723         i9xx_get_pfit_config(pipe_config);
4724
4725         if (DISPLAY_VER(dev_priv) >= 4) {
4726                 /* No way to read it out on pipes B and C */
4727                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4728                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
4729                 else
4730                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4731                 pipe_config->pixel_multiplier =
4732                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4733                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4734                 pipe_config->dpll_hw_state.dpll_md = tmp;
4735         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4736                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4737                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4738                 pipe_config->pixel_multiplier =
4739                         ((tmp & SDVO_MULTIPLIER_MASK)
4740                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4741         } else {
4742                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
4743                  * port and will be fixed up in the encoder->get_config
4744                  * function. */
4745                 pipe_config->pixel_multiplier = 1;
4746         }
4747         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4748                                                         DPLL(crtc->pipe));
4749         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4750                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4751                                                                FP0(crtc->pipe));
4752                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4753                                                                FP1(crtc->pipe));
4754         } else {
4755                 /* Mask out read-only status bits. */
4756                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4757                                                      DPLL_PORTC_READY_MASK |
4758                                                      DPLL_PORTB_READY_MASK);
4759         }
4760
4761         if (IS_CHERRYVIEW(dev_priv))
4762                 chv_crtc_clock_get(crtc, pipe_config);
4763         else if (IS_VALLEYVIEW(dev_priv))
4764                 vlv_crtc_clock_get(crtc, pipe_config);
4765         else
4766                 i9xx_crtc_clock_get(crtc, pipe_config);
4767
4768         /*
4769          * Normally the dotclock is filled in by the encoder .get_config()
4770          * but in case the pipe is enabled w/o any ports we need a sane
4771          * default.
4772          */
4773         pipe_config->hw.adjusted_mode.crtc_clock =
4774                 pipe_config->port_clock / pipe_config->pixel_multiplier;
4775
4776         ret = true;
4777
4778 out:
4779         intel_display_power_put(dev_priv, power_domain, wakeref);
4780
4781         return ret;
4782 }
4783
4784 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4785 {
4786         struct intel_encoder *encoder;
4787         int i;
4788         u32 val, final;
4789         bool has_lvds = false;
4790         bool has_cpu_edp = false;
4791         bool has_panel = false;
4792         bool has_ck505 = false;
4793         bool can_ssc = false;
4794         bool using_ssc_source = false;
4795
4796         /* We need to take the global config into account */
4797         for_each_intel_encoder(&dev_priv->drm, encoder) {
4798                 switch (encoder->type) {
4799                 case INTEL_OUTPUT_LVDS:
4800                         has_panel = true;
4801                         has_lvds = true;
4802                         break;
4803                 case INTEL_OUTPUT_EDP:
4804                         has_panel = true;
4805                         if (encoder->port == PORT_A)
4806                                 has_cpu_edp = true;
4807                         break;
4808                 default:
4809                         break;
4810                 }
4811         }
4812
4813         if (HAS_PCH_IBX(dev_priv)) {
4814                 has_ck505 = dev_priv->vbt.display_clock_mode;
4815                 can_ssc = has_ck505;
4816         } else {
4817                 has_ck505 = false;
4818                 can_ssc = true;
4819         }
4820
4821         /* Check if any DPLLs are using the SSC source */
4822         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4823                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4824
4825                 if (!(temp & DPLL_VCO_ENABLE))
4826                         continue;
4827
4828                 if ((temp & PLL_REF_INPUT_MASK) ==
4829                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4830                         using_ssc_source = true;
4831                         break;
4832                 }
4833         }
4834
4835         drm_dbg_kms(&dev_priv->drm,
4836                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4837                     has_panel, has_lvds, has_ck505, using_ssc_source);
4838
4839         /* Ironlake: try to setup display ref clock before DPLL
4840          * enabling. This is only under driver's control after
4841          * PCH B stepping, previous chipset stepping should be
4842          * ignoring this setting.
4843          */
4844         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4845
4846         /* As we must carefully and slowly disable/enable each source in turn,
4847          * compute the final state we want first and check if we need to
4848          * make any changes at all.
4849          */
4850         final = val;
4851         final &= ~DREF_NONSPREAD_SOURCE_MASK;
4852         if (has_ck505)
4853                 final |= DREF_NONSPREAD_CK505_ENABLE;
4854         else
4855                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
4856
4857         final &= ~DREF_SSC_SOURCE_MASK;
4858         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4859         final &= ~DREF_SSC1_ENABLE;
4860
4861         if (has_panel) {
4862                 final |= DREF_SSC_SOURCE_ENABLE;
4863
4864                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
4865                         final |= DREF_SSC1_ENABLE;
4866
4867                 if (has_cpu_edp) {
4868                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
4869                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4870                         else
4871                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4872                 } else
4873                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4874         } else if (using_ssc_source) {
4875                 final |= DREF_SSC_SOURCE_ENABLE;
4876                 final |= DREF_SSC1_ENABLE;
4877         }
4878
4879         if (final == val)
4880                 return;
4881
4882         /* Always enable nonspread source */
4883         val &= ~DREF_NONSPREAD_SOURCE_MASK;
4884
4885         if (has_ck505)
4886                 val |= DREF_NONSPREAD_CK505_ENABLE;
4887         else
4888                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
4889
4890         if (has_panel) {
4891                 val &= ~DREF_SSC_SOURCE_MASK;
4892                 val |= DREF_SSC_SOURCE_ENABLE;
4893
4894                 /* SSC must be turned on before enabling the CPU output  */
4895                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4896                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4897                         val |= DREF_SSC1_ENABLE;
4898                 } else
4899                         val &= ~DREF_SSC1_ENABLE;
4900
4901                 /* Get SSC going before enabling the outputs */
4902                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4903                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4904                 udelay(200);
4905
4906                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4907
4908                 /* Enable CPU source on CPU attached eDP */
4909                 if (has_cpu_edp) {
4910                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4911                                 drm_dbg_kms(&dev_priv->drm,
4912                                             "Using SSC on eDP\n");
4913                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4914                         } else
4915                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4916                 } else
4917                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4918
4919                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4920                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4921                 udelay(200);
4922         } else {
4923                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4924
4925                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4926
4927                 /* Turn off CPU output */
4928                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4929
4930                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4931                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4932                 udelay(200);
4933
4934                 if (!using_ssc_source) {
4935                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4936
4937                         /* Turn off the SSC source */
4938                         val &= ~DREF_SSC_SOURCE_MASK;
4939                         val |= DREF_SSC_SOURCE_DISABLE;
4940
4941                         /* Turn off SSC1 */
4942                         val &= ~DREF_SSC1_ENABLE;
4943
4944                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4945                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4946                         udelay(200);
4947                 }
4948         }
4949
4950         BUG_ON(val != final);
4951 }
4952
4953 /* Implements 3 different sequences from BSpec chapter "Display iCLK
4954  * Programming" based on the parameters passed:
4955  * - Sequence to enable CLKOUT_DP
4956  * - Sequence to enable CLKOUT_DP without spread
4957  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
4958  */
4959 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
4960                                  bool with_spread, bool with_fdi)
4961 {
4962         u32 reg, tmp;
4963
4964         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
4965                      "FDI requires downspread\n"))
4966                 with_spread = true;
4967         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
4968                      with_fdi, "LP PCH doesn't have FDI\n"))
4969                 with_fdi = false;
4970
4971         mutex_lock(&dev_priv->sb_lock);
4972
4973         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4974         tmp &= ~SBI_SSCCTL_DISABLE;
4975         tmp |= SBI_SSCCTL_PATHALT;
4976         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4977
4978         udelay(24);
4979
4980         if (with_spread) {
4981                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4982                 tmp &= ~SBI_SSCCTL_PATHALT;
4983                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4984
4985                 if (with_fdi)
4986                         lpt_fdi_program_mphy(dev_priv);
4987         }
4988
4989         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
4990         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
4991         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
4992         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
4993
4994         mutex_unlock(&dev_priv->sb_lock);
4995 }
4996
4997 /* Sequence to disable CLKOUT_DP */
4998 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
4999 {
5000         u32 reg, tmp;
5001
5002         mutex_lock(&dev_priv->sb_lock);
5003
5004         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5005         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5006         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5007         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5008
5009         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5010         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5011                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5012                         tmp |= SBI_SSCCTL_PATHALT;
5013                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5014                         udelay(32);
5015                 }
5016                 tmp |= SBI_SSCCTL_DISABLE;
5017                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5018         }
5019
5020         mutex_unlock(&dev_priv->sb_lock);
5021 }
5022
5023 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5024
5025 static const u16 sscdivintphase[] = {
5026         [BEND_IDX( 50)] = 0x3B23,
5027         [BEND_IDX( 45)] = 0x3B23,
5028         [BEND_IDX( 40)] = 0x3C23,
5029         [BEND_IDX( 35)] = 0x3C23,
5030         [BEND_IDX( 30)] = 0x3D23,
5031         [BEND_IDX( 25)] = 0x3D23,
5032         [BEND_IDX( 20)] = 0x3E23,
5033         [BEND_IDX( 15)] = 0x3E23,
5034         [BEND_IDX( 10)] = 0x3F23,
5035         [BEND_IDX(  5)] = 0x3F23,
5036         [BEND_IDX(  0)] = 0x0025,
5037         [BEND_IDX( -5)] = 0x0025,
5038         [BEND_IDX(-10)] = 0x0125,
5039         [BEND_IDX(-15)] = 0x0125,
5040         [BEND_IDX(-20)] = 0x0225,
5041         [BEND_IDX(-25)] = 0x0225,
5042         [BEND_IDX(-30)] = 0x0325,
5043         [BEND_IDX(-35)] = 0x0325,
5044         [BEND_IDX(-40)] = 0x0425,
5045         [BEND_IDX(-45)] = 0x0425,
5046         [BEND_IDX(-50)] = 0x0525,
5047 };
5048
5049 /*
5050  * Bend CLKOUT_DP
5051  * steps -50 to 50 inclusive, in steps of 5
5052  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5053  * change in clock period = -(steps / 10) * 5.787 ps
5054  */
5055 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5056 {
5057         u32 tmp;
5058         int idx = BEND_IDX(steps);
5059
5060         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5061                 return;
5062
5063         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5064                 return;
5065
5066         mutex_lock(&dev_priv->sb_lock);
5067
5068         if (steps % 10 != 0)
5069                 tmp = 0xAAAAAAAB;
5070         else
5071                 tmp = 0x00000000;
5072         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5073
5074         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5075         tmp &= 0xffff0000;
5076         tmp |= sscdivintphase[idx];
5077         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5078
5079         mutex_unlock(&dev_priv->sb_lock);
5080 }
5081
5082 #undef BEND_IDX
5083
5084 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5085 {
5086         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5087         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5088
5089         if ((ctl & SPLL_PLL_ENABLE) == 0)
5090                 return false;
5091
5092         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5093             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5094                 return true;
5095
5096         if (IS_BROADWELL(dev_priv) &&
5097             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5098                 return true;
5099
5100         return false;
5101 }
5102
5103 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5104                                enum intel_dpll_id id)
5105 {
5106         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5107         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5108
5109         if ((ctl & WRPLL_PLL_ENABLE) == 0)
5110                 return false;
5111
5112         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5113                 return true;
5114
5115         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5116             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5117             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5118                 return true;
5119
5120         return false;
5121 }
5122
5123 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5124 {
5125         struct intel_encoder *encoder;
5126         bool has_fdi = false;
5127
5128         for_each_intel_encoder(&dev_priv->drm, encoder) {
5129                 switch (encoder->type) {
5130                 case INTEL_OUTPUT_ANALOG:
5131                         has_fdi = true;
5132                         break;
5133                 default:
5134                         break;
5135                 }
5136         }
5137
5138         /*
5139          * The BIOS may have decided to use the PCH SSC
5140          * reference so we must not disable it until the
5141          * relevant PLLs have stopped relying on it. We'll
5142          * just leave the PCH SSC reference enabled in case
5143          * any active PLL is using it. It will get disabled
5144          * after runtime suspend if we don't have FDI.
5145          *
5146          * TODO: Move the whole reference clock handling
5147          * to the modeset sequence proper so that we can
5148          * actually enable/disable/reconfigure these things
5149          * safely. To do that we need to introduce a real
5150          * clock hierarchy. That would also allow us to do
5151          * clock bending finally.
5152          */
5153         dev_priv->pch_ssc_use = 0;
5154
5155         if (spll_uses_pch_ssc(dev_priv)) {
5156                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5157                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5158         }
5159
5160         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5161                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5162                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5163         }
5164
5165         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5166                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5167                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5168         }
5169
5170         if (dev_priv->pch_ssc_use)
5171                 return;
5172
5173         if (has_fdi) {
5174                 lpt_bend_clkout_dp(dev_priv, 0);
5175                 lpt_enable_clkout_dp(dev_priv, true, true);
5176         } else {
5177                 lpt_disable_clkout_dp(dev_priv);
5178         }
5179 }
5180
5181 /*
5182  * Initialize reference clocks when the driver loads
5183  */
5184 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5185 {
5186         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5187                 ilk_init_pch_refclk(dev_priv);
5188         else if (HAS_PCH_LPT(dev_priv))
5189                 lpt_init_pch_refclk(dev_priv);
5190 }
5191
5192 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5193 {
5194         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5195         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5196         enum pipe pipe = crtc->pipe;
5197         u32 val;
5198
5199         val = 0;
5200
5201         switch (crtc_state->pipe_bpp) {
5202         case 18:
5203                 val |= PIPECONF_6BPC;
5204                 break;
5205         case 24:
5206                 val |= PIPECONF_8BPC;
5207                 break;
5208         case 30:
5209                 val |= PIPECONF_10BPC;
5210                 break;
5211         case 36:
5212                 val |= PIPECONF_12BPC;
5213                 break;
5214         default:
5215                 /* Case prevented by intel_choose_pipe_bpp_dither. */
5216                 BUG();
5217         }
5218
5219         if (crtc_state->dither)
5220                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5221
5222         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5223                 val |= PIPECONF_INTERLACED_ILK;
5224         else
5225                 val |= PIPECONF_PROGRESSIVE;
5226
5227         /*
5228          * This would end up with an odd purple hue over
5229          * the entire display. Make sure we don't do it.
5230          */
5231         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5232                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5233
5234         if (crtc_state->limited_color_range &&
5235             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5236                 val |= PIPECONF_COLOR_RANGE_SELECT;
5237
5238         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5239                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5240
5241         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5242
5243         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5244
5245         intel_de_write(dev_priv, PIPECONF(pipe), val);
5246         intel_de_posting_read(dev_priv, PIPECONF(pipe));
5247 }
5248
5249 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
5250 {
5251         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5252         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5253         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5254         u32 val = 0;
5255
5256         if (IS_HASWELL(dev_priv) && crtc_state->dither)
5257                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5258
5259         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5260                 val |= PIPECONF_INTERLACED_ILK;
5261         else
5262                 val |= PIPECONF_PROGRESSIVE;
5263
5264         if (IS_HASWELL(dev_priv) &&
5265             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5266                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5267
5268         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5269         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5270 }
5271
5272 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5273 {
5274         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5275         const struct intel_crtc_scaler_state *scaler_state =
5276                 &crtc_state->scaler_state;
5277
5278         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5279         u32 val = 0;
5280         int i;
5281
5282         switch (crtc_state->pipe_bpp) {
5283         case 18:
5284                 val |= PIPEMISC_6_BPC;
5285                 break;
5286         case 24:
5287                 val |= PIPEMISC_8_BPC;
5288                 break;
5289         case 30:
5290                 val |= PIPEMISC_10_BPC;
5291                 break;
5292         case 36:
5293                 /* Port output 12BPC defined for ADLP+ */
5294                 if (DISPLAY_VER(dev_priv) > 12)
5295                         val |= PIPEMISC_12_BPC_ADLP;
5296                 break;
5297         default:
5298                 MISSING_CASE(crtc_state->pipe_bpp);
5299                 break;
5300         }
5301
5302         if (crtc_state->dither)
5303                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5304
5305         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5306             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5307                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5308
5309         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5310                 val |= PIPEMISC_YUV420_ENABLE |
5311                         PIPEMISC_YUV420_MODE_FULL_BLEND;
5312
5313         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
5314                 val |= PIPEMISC_HDR_MODE_PRECISION;
5315
5316         if (DISPLAY_VER(dev_priv) >= 12)
5317                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5318
5319         if (IS_ALDERLAKE_P(dev_priv)) {
5320                 bool scaler_in_use = false;
5321
5322                 for (i = 0; i < crtc->num_scalers; i++) {
5323                         if (!scaler_state->scalers[i].in_use)
5324                                 continue;
5325
5326                         scaler_in_use = true;
5327                         break;
5328                 }
5329
5330                 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5331                              PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5332                              scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5333                              PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5334         }
5335
5336         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5337 }
5338
5339 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5340 {
5341         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5342         u32 tmp;
5343
5344         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5345
5346         switch (tmp & PIPEMISC_BPC_MASK) {
5347         case PIPEMISC_6_BPC:
5348                 return 18;
5349         case PIPEMISC_8_BPC:
5350                 return 24;
5351         case PIPEMISC_10_BPC:
5352                 return 30;
5353         /*
5354          * PORT OUTPUT 12 BPC defined for ADLP+.
5355          *
5356          * TODO:
5357          * For previous platforms with DSI interface, bits 5:7
5358          * are used for storing pipe_bpp irrespective of dithering.
5359          * Since the value of 12 BPC is not defined for these bits
5360          * on older platforms, need to find a workaround for 12 BPC
5361          * MIPI DSI HW readout.
5362          */
5363         case PIPEMISC_12_BPC_ADLP:
5364                 if (DISPLAY_VER(dev_priv) > 12)
5365                         return 36;
5366                 fallthrough;
5367         default:
5368                 MISSING_CASE(tmp);
5369                 return 0;
5370         }
5371 }
5372
5373 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5374 {
5375         /*
5376          * Account for spread spectrum to avoid
5377          * oversubscribing the link. Max center spread
5378          * is 2.5%; use 5% for safety's sake.
5379          */
5380         u32 bps = target_clock * bpp * 21 / 20;
5381         return DIV_ROUND_UP(bps, link_bw * 8);
5382 }
5383
5384 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5385                                          struct intel_link_m_n *m_n)
5386 {
5387         struct drm_device *dev = crtc->base.dev;
5388         struct drm_i915_private *dev_priv = to_i915(dev);
5389         enum pipe pipe = crtc->pipe;
5390
5391         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5392         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5393         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5394                 & ~TU_SIZE_MASK;
5395         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5396         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5397                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5398 }
5399
5400 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5401                                          enum transcoder transcoder,
5402                                          struct intel_link_m_n *m_n,
5403                                          struct intel_link_m_n *m2_n2)
5404 {
5405         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5406         enum pipe pipe = crtc->pipe;
5407
5408         if (DISPLAY_VER(dev_priv) >= 5) {
5409                 m_n->link_m = intel_de_read(dev_priv,
5410                                             PIPE_LINK_M1(transcoder));
5411                 m_n->link_n = intel_de_read(dev_priv,
5412                                             PIPE_LINK_N1(transcoder));
5413                 m_n->gmch_m = intel_de_read(dev_priv,
5414                                             PIPE_DATA_M1(transcoder))
5415                         & ~TU_SIZE_MASK;
5416                 m_n->gmch_n = intel_de_read(dev_priv,
5417                                             PIPE_DATA_N1(transcoder));
5418                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5419                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5420
5421                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5422                         m2_n2->link_m = intel_de_read(dev_priv,
5423                                                       PIPE_LINK_M2(transcoder));
5424                         m2_n2->link_n = intel_de_read(dev_priv,
5425                                                              PIPE_LINK_N2(transcoder));
5426                         m2_n2->gmch_m = intel_de_read(dev_priv,
5427                                                              PIPE_DATA_M2(transcoder))
5428                                         & ~TU_SIZE_MASK;
5429                         m2_n2->gmch_n = intel_de_read(dev_priv,
5430                                                              PIPE_DATA_N2(transcoder));
5431                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5432                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5433                 }
5434         } else {
5435                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5436                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5437                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5438                         & ~TU_SIZE_MASK;
5439                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5440                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5441                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5442         }
5443 }
5444
5445 void intel_dp_get_m_n(struct intel_crtc *crtc,
5446                       struct intel_crtc_state *pipe_config)
5447 {
5448         if (pipe_config->has_pch_encoder)
5449                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5450         else
5451                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5452                                              &pipe_config->dp_m_n,
5453                                              &pipe_config->dp_m2_n2);
5454 }
5455
5456 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5457                                    struct intel_crtc_state *pipe_config)
5458 {
5459         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5460                                      &pipe_config->fdi_m_n, NULL);
5461 }
5462
5463 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5464                                   u32 pos, u32 size)
5465 {
5466         drm_rect_init(&crtc_state->pch_pfit.dst,
5467                       pos >> 16, pos & 0xffff,
5468                       size >> 16, size & 0xffff);
5469 }
5470
5471 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5472 {
5473         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5474         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5475         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5476         int id = -1;
5477         int i;
5478
5479         /* find scaler attached to this pipe */
5480         for (i = 0; i < crtc->num_scalers; i++) {
5481                 u32 ctl, pos, size;
5482
5483                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5484                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5485                         continue;
5486
5487                 id = i;
5488                 crtc_state->pch_pfit.enabled = true;
5489
5490                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5491                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5492
5493                 ilk_get_pfit_pos_size(crtc_state, pos, size);
5494
5495                 scaler_state->scalers[i].in_use = true;
5496                 break;
5497         }
5498
5499         scaler_state->scaler_id = id;
5500         if (id >= 0)
5501                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5502         else
5503                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5504 }
5505
5506 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5507 {
5508         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5509         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5510         u32 ctl, pos, size;
5511
5512         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5513         if ((ctl & PF_ENABLE) == 0)
5514                 return;
5515
5516         crtc_state->pch_pfit.enabled = true;
5517
5518         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5519         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5520
5521         ilk_get_pfit_pos_size(crtc_state, pos, size);
5522
5523         /*
5524          * We currently do not free assignements of panel fitters on
5525          * ivb/hsw (since we don't use the higher upscaling modes which
5526          * differentiates them) so just WARN about this case for now.
5527          */
5528         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5529                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5530 }
5531
5532 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5533                                 struct intel_crtc_state *pipe_config)
5534 {
5535         struct drm_device *dev = crtc->base.dev;
5536         struct drm_i915_private *dev_priv = to_i915(dev);
5537         enum intel_display_power_domain power_domain;
5538         intel_wakeref_t wakeref;
5539         u32 tmp;
5540         bool ret;
5541
5542         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5543         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5544         if (!wakeref)
5545                 return false;
5546
5547         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5548         pipe_config->shared_dpll = NULL;
5549
5550         ret = false;
5551         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5552         if (!(tmp & PIPECONF_ENABLE))
5553                 goto out;
5554
5555         switch (tmp & PIPECONF_BPC_MASK) {
5556         case PIPECONF_6BPC:
5557                 pipe_config->pipe_bpp = 18;
5558                 break;
5559         case PIPECONF_8BPC:
5560                 pipe_config->pipe_bpp = 24;
5561                 break;
5562         case PIPECONF_10BPC:
5563                 pipe_config->pipe_bpp = 30;
5564                 break;
5565         case PIPECONF_12BPC:
5566                 pipe_config->pipe_bpp = 36;
5567                 break;
5568         default:
5569                 break;
5570         }
5571
5572         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5573                 pipe_config->limited_color_range = true;
5574
5575         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5576         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5577         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5578                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5579                 break;
5580         default:
5581                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5582                 break;
5583         }
5584
5585         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5586                 PIPECONF_GAMMA_MODE_SHIFT;
5587
5588         pipe_config->csc_mode = intel_de_read(dev_priv,
5589                                               PIPE_CSC_MODE(crtc->pipe));
5590
5591         i9xx_get_pipe_color_config(pipe_config);
5592         intel_color_get_config(pipe_config);
5593
5594         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5595                 struct intel_shared_dpll *pll;
5596                 enum intel_dpll_id pll_id;
5597                 bool pll_active;
5598
5599                 pipe_config->has_pch_encoder = true;
5600
5601                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5602                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5603                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5604
5605                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5606
5607                 if (HAS_PCH_IBX(dev_priv)) {
5608                         /*
5609                          * The pipe->pch transcoder and pch transcoder->pll
5610                          * mapping is fixed.
5611                          */
5612                         pll_id = (enum intel_dpll_id) crtc->pipe;
5613                 } else {
5614                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5615                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5616                                 pll_id = DPLL_ID_PCH_PLL_B;
5617                         else
5618                                 pll_id= DPLL_ID_PCH_PLL_A;
5619                 }
5620
5621                 pipe_config->shared_dpll =
5622                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
5623                 pll = pipe_config->shared_dpll;
5624
5625                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5626                                                      &pipe_config->dpll_hw_state);
5627                 drm_WARN_ON(dev, !pll_active);
5628
5629                 tmp = pipe_config->dpll_hw_state.dpll;
5630                 pipe_config->pixel_multiplier =
5631                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5632                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5633
5634                 ilk_pch_clock_get(crtc, pipe_config);
5635         } else {
5636                 pipe_config->pixel_multiplier = 1;
5637         }
5638
5639         intel_get_transcoder_timings(crtc, pipe_config);
5640         intel_get_pipe_src_size(crtc, pipe_config);
5641
5642         ilk_get_pfit_config(pipe_config);
5643
5644         ret = true;
5645
5646 out:
5647         intel_display_power_put(dev_priv, power_domain, wakeref);
5648
5649         return ret;
5650 }
5651
5652 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
5653                                            enum transcoder cpu_transcoder)
5654 {
5655         enum intel_display_power_domain power_domain;
5656         intel_wakeref_t wakeref;
5657         u32 tmp = 0;
5658
5659         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
5660
5661         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
5662                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
5663
5664         return tmp & TRANS_DDI_FUNC_ENABLE;
5665 }
5666
5667 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
5668 {
5669         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
5670
5671         if (DISPLAY_VER(i915) >= 11)
5672                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5673
5674         return panel_transcoder_mask;
5675 }
5676
5677 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
5678 {
5679         struct drm_device *dev = crtc->base.dev;
5680         struct drm_i915_private *dev_priv = to_i915(dev);
5681         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
5682         enum transcoder cpu_transcoder;
5683         u8 enabled_transcoders = 0;
5684
5685         /*
5686          * XXX: Do intel_display_power_get_if_enabled before reading this (for
5687          * consistency and less surprising code; it's in always on power).
5688          */
5689         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
5690                                        panel_transcoder_mask) {
5691                 enum intel_display_power_domain power_domain;
5692                 intel_wakeref_t wakeref;
5693                 enum pipe trans_pipe;
5694                 u32 tmp = 0;
5695
5696                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
5697                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
5698                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
5699
5700                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5701                         continue;
5702
5703                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5704                 default:
5705                         drm_WARN(dev, 1,
5706                                  "unknown pipe linked to transcoder %s\n",
5707                                  transcoder_name(cpu_transcoder));
5708                         fallthrough;
5709                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5710                 case TRANS_DDI_EDP_INPUT_A_ON:
5711                         trans_pipe = PIPE_A;
5712                         break;
5713                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5714                         trans_pipe = PIPE_B;
5715                         break;
5716                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5717                         trans_pipe = PIPE_C;
5718                         break;
5719                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
5720                         trans_pipe = PIPE_D;
5721                         break;
5722                 }
5723
5724                 if (trans_pipe == crtc->pipe)
5725                         enabled_transcoders |= BIT(cpu_transcoder);
5726         }
5727
5728         cpu_transcoder = (enum transcoder) crtc->pipe;
5729         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
5730                 enabled_transcoders |= BIT(cpu_transcoder);
5731
5732         return enabled_transcoders;
5733 }
5734
5735 static bool has_edp_transcoders(u8 enabled_transcoders)
5736 {
5737         return enabled_transcoders & BIT(TRANSCODER_EDP);
5738 }
5739
5740 static bool has_dsi_transcoders(u8 enabled_transcoders)
5741 {
5742         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
5743                                       BIT(TRANSCODER_DSI_1));
5744 }
5745
5746 static bool has_pipe_transcoders(u8 enabled_transcoders)
5747 {
5748         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
5749                                        BIT(TRANSCODER_DSI_0) |
5750                                        BIT(TRANSCODER_DSI_1));
5751 }
5752
5753 static void assert_enabled_transcoders(struct drm_i915_private *i915,
5754                                        u8 enabled_transcoders)
5755 {
5756         /* Only one type of transcoder please */
5757         drm_WARN_ON(&i915->drm,
5758                     has_edp_transcoders(enabled_transcoders) +
5759                     has_dsi_transcoders(enabled_transcoders) +
5760                     has_pipe_transcoders(enabled_transcoders) > 1);
5761
5762         /* Only DSI transcoders can be ganged */
5763         drm_WARN_ON(&i915->drm,
5764                     !has_dsi_transcoders(enabled_transcoders) &&
5765                     !is_power_of_2(enabled_transcoders));
5766 }
5767
5768 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5769                                      struct intel_crtc_state *pipe_config,
5770                                      struct intel_display_power_domain_set *power_domain_set)
5771 {
5772         struct drm_device *dev = crtc->base.dev;
5773         struct drm_i915_private *dev_priv = to_i915(dev);
5774         unsigned long enabled_transcoders;
5775         u32 tmp;
5776
5777         enabled_transcoders = hsw_enabled_transcoders(crtc);
5778         if (!enabled_transcoders)
5779                 return false;
5780
5781         assert_enabled_transcoders(dev_priv, enabled_transcoders);
5782
5783         /*
5784          * With the exception of DSI we should only ever have
5785          * a single enabled transcoder. With DSI let's just
5786          * pick the first one.
5787          */
5788         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
5789
5790         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5791                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5792                 return false;
5793
5794         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
5795                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
5796
5797                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
5798                         pipe_config->pch_pfit.force_thru = true;
5799         }
5800
5801         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5802
5803         return tmp & PIPECONF_ENABLE;
5804 }
5805
5806 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5807                                          struct intel_crtc_state *pipe_config,
5808                                          struct intel_display_power_domain_set *power_domain_set)
5809 {
5810         struct drm_device *dev = crtc->base.dev;
5811         struct drm_i915_private *dev_priv = to_i915(dev);
5812         enum transcoder cpu_transcoder;
5813         enum port port;
5814         u32 tmp;
5815
5816         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5817                 if (port == PORT_A)
5818                         cpu_transcoder = TRANSCODER_DSI_A;
5819                 else
5820                         cpu_transcoder = TRANSCODER_DSI_C;
5821
5822                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5823                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5824                         continue;
5825
5826                 /*
5827                  * The PLL needs to be enabled with a valid divider
5828                  * configuration, otherwise accessing DSI registers will hang
5829                  * the machine. See BSpec North Display Engine
5830                  * registers/MIPI[BXT]. We can break out here early, since we
5831                  * need the same DSI PLL to be enabled for both DSI ports.
5832                  */
5833                 if (!bxt_dsi_pll_is_enabled(dev_priv))
5834                         break;
5835
5836                 /* XXX: this works for video mode only */
5837                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5838                 if (!(tmp & DPI_ENABLE))
5839                         continue;
5840
5841                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5842                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5843                         continue;
5844
5845                 pipe_config->cpu_transcoder = cpu_transcoder;
5846                 break;
5847         }
5848
5849         return transcoder_is_dsi(pipe_config->cpu_transcoder);
5850 }
5851
5852 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5853                                    struct intel_crtc_state *pipe_config)
5854 {
5855         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5856         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5857         enum port port;
5858         u32 tmp;
5859
5860         if (transcoder_is_dsi(cpu_transcoder)) {
5861                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5862                                                 PORT_A : PORT_B;
5863         } else {
5864                 tmp = intel_de_read(dev_priv,
5865                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
5866                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5867                         return;
5868                 if (DISPLAY_VER(dev_priv) >= 12)
5869                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5870                 else
5871                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5872         }
5873
5874         /*
5875          * Haswell has only FDI/PCH transcoder A. It is which is connected to
5876          * DDI E. So just check whether this pipe is wired to DDI E and whether
5877          * the PCH transcoder is on.
5878          */
5879         if (DISPLAY_VER(dev_priv) < 9 &&
5880             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5881                 pipe_config->has_pch_encoder = true;
5882
5883                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5884                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5885                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
5886
5887                 ilk_get_fdi_m_n_config(crtc, pipe_config);
5888         }
5889 }
5890
5891 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5892                                 struct intel_crtc_state *pipe_config)
5893 {
5894         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5895         struct intel_display_power_domain_set power_domain_set = { };
5896         bool active;
5897         u32 tmp;
5898
5899         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5900                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
5901                 return false;
5902
5903         pipe_config->shared_dpll = NULL;
5904
5905         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5906
5907         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5908             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5909                 drm_WARN_ON(&dev_priv->drm, active);
5910                 active = true;
5911         }
5912
5913         intel_dsc_get_config(pipe_config);
5914         if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
5915                 intel_uncompressed_joiner_get_config(pipe_config);
5916
5917         if (!active) {
5918                 /* bigjoiner slave doesn't enable transcoder */
5919                 if (!pipe_config->bigjoiner_slave)
5920                         goto out;
5921
5922                 active = true;
5923                 pipe_config->pixel_multiplier = 1;
5924
5925                 /* we cannot read out most state, so don't bother.. */
5926                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5927         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5928             DISPLAY_VER(dev_priv) >= 11) {
5929                 hsw_get_ddi_port_state(crtc, pipe_config);
5930                 intel_get_transcoder_timings(crtc, pipe_config);
5931         }
5932
5933         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5934                 intel_vrr_get_config(crtc, pipe_config);
5935
5936         intel_get_pipe_src_size(crtc, pipe_config);
5937
5938         if (IS_HASWELL(dev_priv)) {
5939                 u32 tmp = intel_de_read(dev_priv,
5940                                         PIPECONF(pipe_config->cpu_transcoder));
5941
5942                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5943                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5944                 else
5945                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5946         } else {
5947                 pipe_config->output_format =
5948                         bdw_get_pipemisc_output_format(crtc);
5949         }
5950
5951         pipe_config->gamma_mode = intel_de_read(dev_priv,
5952                                                 GAMMA_MODE(crtc->pipe));
5953
5954         pipe_config->csc_mode = intel_de_read(dev_priv,
5955                                               PIPE_CSC_MODE(crtc->pipe));
5956
5957         if (DISPLAY_VER(dev_priv) >= 9) {
5958                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
5959
5960                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
5961                         pipe_config->gamma_enable = true;
5962
5963                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
5964                         pipe_config->csc_enable = true;
5965         } else {
5966                 i9xx_get_pipe_color_config(pipe_config);
5967         }
5968
5969         intel_color_get_config(pipe_config);
5970
5971         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
5972         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
5973         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5974                 pipe_config->ips_linetime =
5975                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
5976
5977         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5978                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
5979                 if (DISPLAY_VER(dev_priv) >= 9)
5980                         skl_get_pfit_config(pipe_config);
5981                 else
5982                         ilk_get_pfit_config(pipe_config);
5983         }
5984
5985         if (hsw_crtc_supports_ips(crtc)) {
5986                 if (IS_HASWELL(dev_priv))
5987                         pipe_config->ips_enabled = intel_de_read(dev_priv,
5988                                                                  IPS_CTL) & IPS_ENABLE;
5989                 else {
5990                         /*
5991                          * We cannot readout IPS state on broadwell, set to
5992                          * true so we can set it to a defined state on first
5993                          * commit.
5994                          */
5995                         pipe_config->ips_enabled = true;
5996                 }
5997         }
5998
5999         if (pipe_config->bigjoiner_slave) {
6000                 /* Cannot be read out as a slave, set to 0. */
6001                 pipe_config->pixel_multiplier = 0;
6002         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6003             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6004                 pipe_config->pixel_multiplier =
6005                         intel_de_read(dev_priv,
6006                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6007         } else {
6008                 pipe_config->pixel_multiplier = 1;
6009         }
6010
6011 out:
6012         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6013
6014         return active;
6015 }
6016
6017 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6018 {
6019         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6020         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6021
6022         if (!i915->display->get_pipe_config(crtc, crtc_state))
6023                 return false;
6024
6025         crtc_state->hw.active = true;
6026
6027         intel_crtc_readout_derived_state(crtc_state);
6028
6029         return true;
6030 }
6031
6032 /* VESA 640x480x72Hz mode to set on the pipe */
6033 static const struct drm_display_mode load_detect_mode = {
6034         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6035                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6036 };
6037
6038 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6039                                         struct drm_crtc *crtc)
6040 {
6041         struct drm_plane *plane;
6042         struct drm_plane_state *plane_state;
6043         int ret, i;
6044
6045         ret = drm_atomic_add_affected_planes(state, crtc);
6046         if (ret)
6047                 return ret;
6048
6049         for_each_new_plane_in_state(state, plane, plane_state, i) {
6050                 if (plane_state->crtc != crtc)
6051                         continue;
6052
6053                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6054                 if (ret)
6055                         return ret;
6056
6057                 drm_atomic_set_fb_for_plane(plane_state, NULL);
6058         }
6059
6060         return 0;
6061 }
6062
6063 int intel_get_load_detect_pipe(struct drm_connector *connector,
6064                                struct intel_load_detect_pipe *old,
6065                                struct drm_modeset_acquire_ctx *ctx)
6066 {
6067         struct intel_encoder *encoder =
6068                 intel_attached_encoder(to_intel_connector(connector));
6069         struct intel_crtc *possible_crtc;
6070         struct intel_crtc *crtc = NULL;
6071         struct drm_device *dev = encoder->base.dev;
6072         struct drm_i915_private *dev_priv = to_i915(dev);
6073         struct drm_mode_config *config = &dev->mode_config;
6074         struct drm_atomic_state *state = NULL, *restore_state = NULL;
6075         struct drm_connector_state *connector_state;
6076         struct intel_crtc_state *crtc_state;
6077         int ret;
6078
6079         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6080                     connector->base.id, connector->name,
6081                     encoder->base.base.id, encoder->base.name);
6082
6083         old->restore_state = NULL;
6084
6085         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6086
6087         /*
6088          * Algorithm gets a little messy:
6089          *
6090          *   - if the connector already has an assigned crtc, use it (but make
6091          *     sure it's on first)
6092          *
6093          *   - try to find the first unused crtc that can drive this connector,
6094          *     and use that if we find one
6095          */
6096
6097         /* See if we already have a CRTC for this connector */
6098         if (connector->state->crtc) {
6099                 crtc = to_intel_crtc(connector->state->crtc);
6100
6101                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6102                 if (ret)
6103                         goto fail;
6104
6105                 /* Make sure the crtc and connector are running */
6106                 goto found;
6107         }
6108
6109         /* Find an unused one (if possible) */
6110         for_each_intel_crtc(dev, possible_crtc) {
6111                 if (!(encoder->base.possible_crtcs &
6112                       drm_crtc_mask(&possible_crtc->base)))
6113                         continue;
6114
6115                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6116                 if (ret)
6117                         goto fail;
6118
6119                 if (possible_crtc->base.state->enable) {
6120                         drm_modeset_unlock(&possible_crtc->base.mutex);
6121                         continue;
6122                 }
6123
6124                 crtc = possible_crtc;
6125                 break;
6126         }
6127
6128         /*
6129          * If we didn't find an unused CRTC, don't use any.
6130          */
6131         if (!crtc) {
6132                 drm_dbg_kms(&dev_priv->drm,
6133                             "no pipe available for load-detect\n");
6134                 ret = -ENODEV;
6135                 goto fail;
6136         }
6137
6138 found:
6139         state = drm_atomic_state_alloc(dev);
6140         restore_state = drm_atomic_state_alloc(dev);
6141         if (!state || !restore_state) {
6142                 ret = -ENOMEM;
6143                 goto fail;
6144         }
6145
6146         state->acquire_ctx = ctx;
6147         restore_state->acquire_ctx = ctx;
6148
6149         connector_state = drm_atomic_get_connector_state(state, connector);
6150         if (IS_ERR(connector_state)) {
6151                 ret = PTR_ERR(connector_state);
6152                 goto fail;
6153         }
6154
6155         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6156         if (ret)
6157                 goto fail;
6158
6159         crtc_state = intel_atomic_get_crtc_state(state, crtc);
6160         if (IS_ERR(crtc_state)) {
6161                 ret = PTR_ERR(crtc_state);
6162                 goto fail;
6163         }
6164
6165         crtc_state->uapi.active = true;
6166
6167         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6168                                            &load_detect_mode);
6169         if (ret)
6170                 goto fail;
6171
6172         ret = intel_modeset_disable_planes(state, &crtc->base);
6173         if (ret)
6174                 goto fail;
6175
6176         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6177         if (!ret)
6178                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6179         if (!ret)
6180                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6181         if (ret) {
6182                 drm_dbg_kms(&dev_priv->drm,
6183                             "Failed to create a copy of old state to restore: %i\n",
6184                             ret);
6185                 goto fail;
6186         }
6187
6188         ret = drm_atomic_commit(state);
6189         if (ret) {
6190                 drm_dbg_kms(&dev_priv->drm,
6191                             "failed to set mode on load-detect pipe\n");
6192                 goto fail;
6193         }
6194
6195         old->restore_state = restore_state;
6196         drm_atomic_state_put(state);
6197
6198         /* let the connector get through one full cycle before testing */
6199         intel_wait_for_vblank(dev_priv, crtc->pipe);
6200         return true;
6201
6202 fail:
6203         if (state) {
6204                 drm_atomic_state_put(state);
6205                 state = NULL;
6206         }
6207         if (restore_state) {
6208                 drm_atomic_state_put(restore_state);
6209                 restore_state = NULL;
6210         }
6211
6212         if (ret == -EDEADLK)
6213                 return ret;
6214
6215         return false;
6216 }
6217
6218 void intel_release_load_detect_pipe(struct drm_connector *connector,
6219                                     struct intel_load_detect_pipe *old,
6220                                     struct drm_modeset_acquire_ctx *ctx)
6221 {
6222         struct intel_encoder *intel_encoder =
6223                 intel_attached_encoder(to_intel_connector(connector));
6224         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6225         struct drm_encoder *encoder = &intel_encoder->base;
6226         struct drm_atomic_state *state = old->restore_state;
6227         int ret;
6228
6229         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6230                     connector->base.id, connector->name,
6231                     encoder->base.id, encoder->name);
6232
6233         if (!state)
6234                 return;
6235
6236         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6237         if (ret)
6238                 drm_dbg_kms(&i915->drm,
6239                             "Couldn't release load detect pipe: %i\n", ret);
6240         drm_atomic_state_put(state);
6241 }
6242
6243 static int i9xx_pll_refclk(struct drm_device *dev,
6244                            const struct intel_crtc_state *pipe_config)
6245 {
6246         struct drm_i915_private *dev_priv = to_i915(dev);
6247         u32 dpll = pipe_config->dpll_hw_state.dpll;
6248
6249         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6250                 return dev_priv->vbt.lvds_ssc_freq;
6251         else if (HAS_PCH_SPLIT(dev_priv))
6252                 return 120000;
6253         else if (DISPLAY_VER(dev_priv) != 2)
6254                 return 96000;
6255         else
6256                 return 48000;
6257 }
6258
6259 /* Returns the clock of the currently programmed mode of the given pipe. */
6260 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6261                                 struct intel_crtc_state *pipe_config)
6262 {
6263         struct drm_device *dev = crtc->base.dev;
6264         struct drm_i915_private *dev_priv = to_i915(dev);
6265         u32 dpll = pipe_config->dpll_hw_state.dpll;
6266         u32 fp;
6267         struct dpll clock;
6268         int port_clock;
6269         int refclk = i9xx_pll_refclk(dev, pipe_config);
6270
6271         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6272                 fp = pipe_config->dpll_hw_state.fp0;
6273         else
6274                 fp = pipe_config->dpll_hw_state.fp1;
6275
6276         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6277         if (IS_PINEVIEW(dev_priv)) {
6278                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6279                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6280         } else {
6281                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6282                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6283         }
6284
6285         if (DISPLAY_VER(dev_priv) != 2) {
6286                 if (IS_PINEVIEW(dev_priv))
6287                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6288                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6289                 else
6290                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6291                                DPLL_FPA01_P1_POST_DIV_SHIFT);
6292
6293                 switch (dpll & DPLL_MODE_MASK) {
6294                 case DPLLB_MODE_DAC_SERIAL:
6295                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6296                                 5 : 10;
6297                         break;
6298                 case DPLLB_MODE_LVDS:
6299                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6300                                 7 : 14;
6301                         break;
6302                 default:
6303                         drm_dbg_kms(&dev_priv->drm,
6304                                     "Unknown DPLL mode %08x in programmed "
6305                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
6306                         return;
6307                 }
6308
6309                 if (IS_PINEVIEW(dev_priv))
6310                         port_clock = pnv_calc_dpll_params(refclk, &clock);
6311                 else
6312                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
6313         } else {
6314                 enum pipe lvds_pipe;
6315
6316                 if (IS_I85X(dev_priv) &&
6317                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
6318                     lvds_pipe == crtc->pipe) {
6319                         u32 lvds = intel_de_read(dev_priv, LVDS);
6320
6321                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6322                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
6323
6324                         if (lvds & LVDS_CLKB_POWER_UP)
6325                                 clock.p2 = 7;
6326                         else
6327                                 clock.p2 = 14;
6328                 } else {
6329                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
6330                                 clock.p1 = 2;
6331                         else {
6332                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6333                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6334                         }
6335                         if (dpll & PLL_P2_DIVIDE_BY_4)
6336                                 clock.p2 = 4;
6337                         else
6338                                 clock.p2 = 2;
6339                 }
6340
6341                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
6342         }
6343
6344         /*
6345          * This value includes pixel_multiplier. We will use
6346          * port_clock to compute adjusted_mode.crtc_clock in the
6347          * encoder's get_config() function.
6348          */
6349         pipe_config->port_clock = port_clock;
6350 }
6351
6352 int intel_dotclock_calculate(int link_freq,
6353                              const struct intel_link_m_n *m_n)
6354 {
6355         /*
6356          * The calculation for the data clock is:
6357          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6358          * But we want to avoid losing precison if possible, so:
6359          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6360          *
6361          * and the link clock is simpler:
6362          * link_clock = (m * link_clock) / n
6363          */
6364
6365         if (!m_n->link_n)
6366                 return 0;
6367
6368         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6369 }
6370
6371 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6372                               struct intel_crtc_state *pipe_config)
6373 {
6374         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6375
6376         /* read out port_clock from the DPLL */
6377         i9xx_crtc_clock_get(crtc, pipe_config);
6378
6379         /*
6380          * In case there is an active pipe without active ports,
6381          * we may need some idea for the dotclock anyway.
6382          * Calculate one based on the FDI configuration.
6383          */
6384         pipe_config->hw.adjusted_mode.crtc_clock =
6385                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6386                                          &pipe_config->fdi_m_n);
6387 }
6388
6389 /* Returns the currently programmed mode of the given encoder. */
6390 struct drm_display_mode *
6391 intel_encoder_current_mode(struct intel_encoder *encoder)
6392 {
6393         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6394         struct intel_crtc_state *crtc_state;
6395         struct drm_display_mode *mode;
6396         struct intel_crtc *crtc;
6397         enum pipe pipe;
6398
6399         if (!encoder->get_hw_state(encoder, &pipe))
6400                 return NULL;
6401
6402         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6403
6404         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6405         if (!mode)
6406                 return NULL;
6407
6408         crtc_state = intel_crtc_state_alloc(crtc);
6409         if (!crtc_state) {
6410                 kfree(mode);
6411                 return NULL;
6412         }
6413
6414         if (!intel_crtc_get_pipe_config(crtc_state)) {
6415                 kfree(crtc_state);
6416                 kfree(mode);
6417                 return NULL;
6418         }
6419
6420         intel_encoder_get_config(encoder, crtc_state);
6421
6422         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6423
6424         kfree(crtc_state);
6425
6426         return mode;
6427 }
6428
6429 /**
6430  * intel_wm_need_update - Check whether watermarks need updating
6431  * @cur: current plane state
6432  * @new: new plane state
6433  *
6434  * Check current plane state versus the new one to determine whether
6435  * watermarks need to be recalculated.
6436  *
6437  * Returns true or false.
6438  */
6439 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6440                                  struct intel_plane_state *new)
6441 {
6442         /* Update watermarks on tiling or size changes. */
6443         if (new->uapi.visible != cur->uapi.visible)
6444                 return true;
6445
6446         if (!cur->hw.fb || !new->hw.fb)
6447                 return false;
6448
6449         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6450             cur->hw.rotation != new->hw.rotation ||
6451             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6452             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6453             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6454             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6455                 return true;
6456
6457         return false;
6458 }
6459
6460 static bool needs_scaling(const struct intel_plane_state *state)
6461 {
6462         int src_w = drm_rect_width(&state->uapi.src) >> 16;
6463         int src_h = drm_rect_height(&state->uapi.src) >> 16;
6464         int dst_w = drm_rect_width(&state->uapi.dst);
6465         int dst_h = drm_rect_height(&state->uapi.dst);
6466
6467         return (src_w != dst_w || src_h != dst_h);
6468 }
6469
6470 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6471                                     struct intel_crtc_state *new_crtc_state,
6472                                     const struct intel_plane_state *old_plane_state,
6473                                     struct intel_plane_state *new_plane_state)
6474 {
6475         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6476         struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
6477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6478         bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
6479         bool was_crtc_enabled = old_crtc_state->hw.active;
6480         bool is_crtc_enabled = new_crtc_state->hw.active;
6481         bool turn_off, turn_on, visible, was_visible;
6482         int ret;
6483
6484         if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6485                 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
6486                 if (ret)
6487                         return ret;
6488         }
6489
6490         was_visible = old_plane_state->uapi.visible;
6491         visible = new_plane_state->uapi.visible;
6492
6493         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6494                 was_visible = false;
6495
6496         /*
6497          * Visibility is calculated as if the crtc was on, but
6498          * after scaler setup everything depends on it being off
6499          * when the crtc isn't active.
6500          *
6501          * FIXME this is wrong for watermarks. Watermarks should also
6502          * be computed as if the pipe would be active. Perhaps move
6503          * per-plane wm computation to the .check_plane() hook, and
6504          * only combine the results from all planes in the current place?
6505          */
6506         if (!is_crtc_enabled) {
6507                 intel_plane_set_invisible(new_crtc_state, new_plane_state);
6508                 visible = false;
6509         }
6510
6511         if (!was_visible && !visible)
6512                 return 0;
6513
6514         turn_off = was_visible && (!visible || mode_changed);
6515         turn_on = visible && (!was_visible || mode_changed);
6516
6517         drm_dbg_atomic(&dev_priv->drm,
6518                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6519                        crtc->base.base.id, crtc->base.name,
6520                        plane->base.base.id, plane->base.name,
6521                        was_visible, visible,
6522                        turn_off, turn_on, mode_changed);
6523
6524         if (turn_on) {
6525                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6526                         new_crtc_state->update_wm_pre = true;
6527
6528                 /* must disable cxsr around plane enable/disable */
6529                 if (plane->id != PLANE_CURSOR)
6530                         new_crtc_state->disable_cxsr = true;
6531         } else if (turn_off) {
6532                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6533                         new_crtc_state->update_wm_post = true;
6534
6535                 /* must disable cxsr around plane enable/disable */
6536                 if (plane->id != PLANE_CURSOR)
6537                         new_crtc_state->disable_cxsr = true;
6538         } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
6539                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6540                         /* FIXME bollocks */
6541                         new_crtc_state->update_wm_pre = true;
6542                         new_crtc_state->update_wm_post = true;
6543                 }
6544         }
6545
6546         if (visible || was_visible)
6547                 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
6548
6549         /*
6550          * ILK/SNB DVSACNTR/Sprite Enable
6551          * IVB SPR_CTL/Sprite Enable
6552          * "When in Self Refresh Big FIFO mode, a write to enable the
6553          *  plane will be internally buffered and delayed while Big FIFO
6554          *  mode is exiting."
6555          *
6556          * Which means that enabling the sprite can take an extra frame
6557          * when we start in big FIFO mode (LP1+). Thus we need to drop
6558          * down to LP0 and wait for vblank in order to make sure the
6559          * sprite gets enabled on the next vblank after the register write.
6560          * Doing otherwise would risk enabling the sprite one frame after
6561          * we've already signalled flip completion. We can resume LP1+
6562          * once the sprite has been enabled.
6563          *
6564          *
6565          * WaCxSRDisabledForSpriteScaling:ivb
6566          * IVB SPR_SCALE/Scaling Enable
6567          * "Low Power watermarks must be disabled for at least one
6568          *  frame before enabling sprite scaling, and kept disabled
6569          *  until sprite scaling is disabled."
6570          *
6571          * ILK/SNB DVSASCALE/Scaling Enable
6572          * "When in Self Refresh Big FIFO mode, scaling enable will be
6573          *  masked off while Big FIFO mode is exiting."
6574          *
6575          * Despite the w/a only being listed for IVB we assume that
6576          * the ILK/SNB note has similar ramifications, hence we apply
6577          * the w/a on all three platforms.
6578          *
6579          * With experimental results seems this is needed also for primary
6580          * plane, not only sprite plane.
6581          */
6582         if (plane->id != PLANE_CURSOR &&
6583             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6584              IS_IVYBRIDGE(dev_priv)) &&
6585             (turn_on || (!needs_scaling(old_plane_state) &&
6586                          needs_scaling(new_plane_state))))
6587                 new_crtc_state->disable_lp_wm = true;
6588
6589         return 0;
6590 }
6591
6592 static bool encoders_cloneable(const struct intel_encoder *a,
6593                                const struct intel_encoder *b)
6594 {
6595         /* masks could be asymmetric, so check both ways */
6596         return a == b || (a->cloneable & (1 << b->type) &&
6597                           b->cloneable & (1 << a->type));
6598 }
6599
6600 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6601                                          struct intel_crtc *crtc,
6602                                          struct intel_encoder *encoder)
6603 {
6604         struct intel_encoder *source_encoder;
6605         struct drm_connector *connector;
6606         struct drm_connector_state *connector_state;
6607         int i;
6608
6609         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6610                 if (connector_state->crtc != &crtc->base)
6611                         continue;
6612
6613                 source_encoder =
6614                         to_intel_encoder(connector_state->best_encoder);
6615                 if (!encoders_cloneable(encoder, source_encoder))
6616                         return false;
6617         }
6618
6619         return true;
6620 }
6621
6622 static int icl_add_linked_planes(struct intel_atomic_state *state)
6623 {
6624         struct intel_plane *plane, *linked;
6625         struct intel_plane_state *plane_state, *linked_plane_state;
6626         int i;
6627
6628         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6629                 linked = plane_state->planar_linked_plane;
6630
6631                 if (!linked)
6632                         continue;
6633
6634                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
6635                 if (IS_ERR(linked_plane_state))
6636                         return PTR_ERR(linked_plane_state);
6637
6638                 drm_WARN_ON(state->base.dev,
6639                             linked_plane_state->planar_linked_plane != plane);
6640                 drm_WARN_ON(state->base.dev,
6641                             linked_plane_state->planar_slave == plane_state->planar_slave);
6642         }
6643
6644         return 0;
6645 }
6646
6647 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6648 {
6649         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6650         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6651         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6652         struct intel_plane *plane, *linked;
6653         struct intel_plane_state *plane_state;
6654         int i;
6655
6656         if (DISPLAY_VER(dev_priv) < 11)
6657                 return 0;
6658
6659         /*
6660          * Destroy all old plane links and make the slave plane invisible
6661          * in the crtc_state->active_planes mask.
6662          */
6663         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6664                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6665                         continue;
6666
6667                 plane_state->planar_linked_plane = NULL;
6668                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
6669                         crtc_state->enabled_planes &= ~BIT(plane->id);
6670                         crtc_state->active_planes &= ~BIT(plane->id);
6671                         crtc_state->update_planes |= BIT(plane->id);
6672                 }
6673
6674                 plane_state->planar_slave = false;
6675         }
6676
6677         if (!crtc_state->nv12_planes)
6678                 return 0;
6679
6680         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6681                 struct intel_plane_state *linked_state = NULL;
6682
6683                 if (plane->pipe != crtc->pipe ||
6684                     !(crtc_state->nv12_planes & BIT(plane->id)))
6685                         continue;
6686
6687                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6688                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6689                                 continue;
6690
6691                         if (crtc_state->active_planes & BIT(linked->id))
6692                                 continue;
6693
6694                         linked_state = intel_atomic_get_plane_state(state, linked);
6695                         if (IS_ERR(linked_state))
6696                                 return PTR_ERR(linked_state);
6697
6698                         break;
6699                 }
6700
6701                 if (!linked_state) {
6702                         drm_dbg_kms(&dev_priv->drm,
6703                                     "Need %d free Y planes for planar YUV\n",
6704                                     hweight8(crtc_state->nv12_planes));
6705
6706                         return -EINVAL;
6707                 }
6708
6709                 plane_state->planar_linked_plane = linked;
6710
6711                 linked_state->planar_slave = true;
6712                 linked_state->planar_linked_plane = plane;
6713                 crtc_state->enabled_planes |= BIT(linked->id);
6714                 crtc_state->active_planes |= BIT(linked->id);
6715                 crtc_state->update_planes |= BIT(linked->id);
6716                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6717                             linked->base.name, plane->base.name);
6718
6719                 /* Copy parameters to slave plane */
6720                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6721                 linked_state->color_ctl = plane_state->color_ctl;
6722                 linked_state->view = plane_state->view;
6723
6724                 intel_plane_copy_hw_state(linked_state, plane_state);
6725                 linked_state->uapi.src = plane_state->uapi.src;
6726                 linked_state->uapi.dst = plane_state->uapi.dst;
6727
6728                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
6729                         if (linked->id == PLANE_SPRITE5)
6730                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6731                         else if (linked->id == PLANE_SPRITE4)
6732                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6733                         else if (linked->id == PLANE_SPRITE3)
6734                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6735                         else if (linked->id == PLANE_SPRITE2)
6736                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6737                         else
6738                                 MISSING_CASE(linked->id);
6739                 }
6740         }
6741
6742         return 0;
6743 }
6744
6745 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6746 {
6747         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6748         struct intel_atomic_state *state =
6749                 to_intel_atomic_state(new_crtc_state->uapi.state);
6750         const struct intel_crtc_state *old_crtc_state =
6751                 intel_atomic_get_old_crtc_state(state, crtc);
6752
6753         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6754 }
6755
6756 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6757 {
6758         const struct drm_display_mode *pipe_mode =
6759                 &crtc_state->hw.pipe_mode;
6760         int linetime_wm;
6761
6762         if (!crtc_state->hw.enable)
6763                 return 0;
6764
6765         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6766                                         pipe_mode->crtc_clock);
6767
6768         return min(linetime_wm, 0x1ff);
6769 }
6770
6771 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6772                                const struct intel_cdclk_state *cdclk_state)
6773 {
6774         const struct drm_display_mode *pipe_mode =
6775                 &crtc_state->hw.pipe_mode;
6776         int linetime_wm;
6777
6778         if (!crtc_state->hw.enable)
6779                 return 0;
6780
6781         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6782                                         cdclk_state->logical.cdclk);
6783
6784         return min(linetime_wm, 0x1ff);
6785 }
6786
6787 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6788 {
6789         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6790         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6791         const struct drm_display_mode *pipe_mode =
6792                 &crtc_state->hw.pipe_mode;
6793         int linetime_wm;
6794
6795         if (!crtc_state->hw.enable)
6796                 return 0;
6797
6798         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6799                                    crtc_state->pixel_rate);
6800
6801         /* Display WA #1135: BXT:ALL GLK:ALL */
6802         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6803             dev_priv->ipc_enabled)
6804                 linetime_wm /= 2;
6805
6806         return min(linetime_wm, 0x1ff);
6807 }
6808
6809 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6810                                    struct intel_crtc *crtc)
6811 {
6812         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6813         struct intel_crtc_state *crtc_state =
6814                 intel_atomic_get_new_crtc_state(state, crtc);
6815         const struct intel_cdclk_state *cdclk_state;
6816
6817         if (DISPLAY_VER(dev_priv) >= 9)
6818                 crtc_state->linetime = skl_linetime_wm(crtc_state);
6819         else
6820                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
6821
6822         if (!hsw_crtc_supports_ips(crtc))
6823                 return 0;
6824
6825         cdclk_state = intel_atomic_get_cdclk_state(state);
6826         if (IS_ERR(cdclk_state))
6827                 return PTR_ERR(cdclk_state);
6828
6829         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6830                                                        cdclk_state);
6831
6832         return 0;
6833 }
6834
6835 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6836                                    struct intel_crtc *crtc)
6837 {
6838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6839         struct intel_crtc_state *crtc_state =
6840                 intel_atomic_get_new_crtc_state(state, crtc);
6841         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6842         int ret;
6843
6844         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6845             mode_changed && !crtc_state->hw.active)
6846                 crtc_state->update_wm_post = true;
6847
6848         if (mode_changed && crtc_state->hw.enable &&
6849             dev_priv->dpll_funcs &&
6850             !crtc_state->bigjoiner_slave &&
6851             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6852                 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
6853                 if (ret)
6854                         return ret;
6855         }
6856
6857         /*
6858          * May need to update pipe gamma enable bits
6859          * when C8 planes are getting enabled/disabled.
6860          */
6861         if (c8_planes_changed(crtc_state))
6862                 crtc_state->uapi.color_mgmt_changed = true;
6863
6864         if (mode_changed || crtc_state->update_pipe ||
6865             crtc_state->uapi.color_mgmt_changed) {
6866                 ret = intel_color_check(crtc_state);
6867                 if (ret)
6868                         return ret;
6869         }
6870
6871         ret = intel_compute_pipe_wm(state, crtc);
6872         if (ret) {
6873                 drm_dbg_kms(&dev_priv->drm,
6874                             "Target pipe watermarks are invalid\n");
6875                 return ret;
6876         }
6877
6878         /*
6879          * Calculate 'intermediate' watermarks that satisfy both the
6880          * old state and the new state.  We can program these
6881          * immediately.
6882          */
6883         ret = intel_compute_intermediate_wm(state, crtc);
6884         if (ret) {
6885                 drm_dbg_kms(&dev_priv->drm,
6886                             "No valid intermediate pipe watermarks are possible\n");
6887                 return ret;
6888         }
6889
6890         if (DISPLAY_VER(dev_priv) >= 9) {
6891                 if (mode_changed || crtc_state->update_pipe) {
6892                         ret = skl_update_scaler_crtc(crtc_state);
6893                         if (ret)
6894                                 return ret;
6895                 }
6896
6897                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6898                 if (ret)
6899                         return ret;
6900         }
6901
6902         if (HAS_IPS(dev_priv)) {
6903                 ret = hsw_compute_ips_config(crtc_state);
6904                 if (ret)
6905                         return ret;
6906         }
6907
6908         if (DISPLAY_VER(dev_priv) >= 9 ||
6909             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6910                 ret = hsw_compute_linetime_wm(state, crtc);
6911                 if (ret)
6912                         return ret;
6913
6914         }
6915
6916         ret = intel_psr2_sel_fetch_update(state, crtc);
6917         if (ret)
6918                 return ret;
6919
6920         return 0;
6921 }
6922
6923 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
6924 {
6925         struct intel_connector *connector;
6926         struct drm_connector_list_iter conn_iter;
6927
6928         drm_connector_list_iter_begin(dev, &conn_iter);
6929         for_each_intel_connector_iter(connector, &conn_iter) {
6930                 struct drm_connector_state *conn_state = connector->base.state;
6931                 struct intel_encoder *encoder =
6932                         to_intel_encoder(connector->base.encoder);
6933
6934                 if (conn_state->crtc)
6935                         drm_connector_put(&connector->base);
6936
6937                 if (encoder) {
6938                         struct intel_crtc *crtc =
6939                                 to_intel_crtc(encoder->base.crtc);
6940                         const struct intel_crtc_state *crtc_state =
6941                                 to_intel_crtc_state(crtc->base.state);
6942
6943                         conn_state->best_encoder = &encoder->base;
6944                         conn_state->crtc = &crtc->base;
6945                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
6946
6947                         drm_connector_get(&connector->base);
6948                 } else {
6949                         conn_state->best_encoder = NULL;
6950                         conn_state->crtc = NULL;
6951                 }
6952         }
6953         drm_connector_list_iter_end(&conn_iter);
6954 }
6955
6956 static int
6957 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
6958                       struct intel_crtc_state *pipe_config)
6959 {
6960         struct drm_connector *connector = conn_state->connector;
6961         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
6962         const struct drm_display_info *info = &connector->display_info;
6963         int bpp;
6964
6965         switch (conn_state->max_bpc) {
6966         case 6 ... 7:
6967                 bpp = 6 * 3;
6968                 break;
6969         case 8 ... 9:
6970                 bpp = 8 * 3;
6971                 break;
6972         case 10 ... 11:
6973                 bpp = 10 * 3;
6974                 break;
6975         case 12 ... 16:
6976                 bpp = 12 * 3;
6977                 break;
6978         default:
6979                 MISSING_CASE(conn_state->max_bpc);
6980                 return -EINVAL;
6981         }
6982
6983         if (bpp < pipe_config->pipe_bpp) {
6984                 drm_dbg_kms(&i915->drm,
6985                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
6986                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
6987                             connector->base.id, connector->name,
6988                             bpp, 3 * info->bpc,
6989                             3 * conn_state->max_requested_bpc,
6990                             pipe_config->pipe_bpp);
6991
6992                 pipe_config->pipe_bpp = bpp;
6993         }
6994
6995         return 0;
6996 }
6997
6998 static int
6999 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7000                           struct intel_crtc_state *pipe_config)
7001 {
7002         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7003         struct drm_atomic_state *state = pipe_config->uapi.state;
7004         struct drm_connector *connector;
7005         struct drm_connector_state *connector_state;
7006         int bpp, i;
7007
7008         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7009             IS_CHERRYVIEW(dev_priv)))
7010                 bpp = 10*3;
7011         else if (DISPLAY_VER(dev_priv) >= 5)
7012                 bpp = 12*3;
7013         else
7014                 bpp = 8*3;
7015
7016         pipe_config->pipe_bpp = bpp;
7017
7018         /* Clamp display bpp to connector max bpp */
7019         for_each_new_connector_in_state(state, connector, connector_state, i) {
7020                 int ret;
7021
7022                 if (connector_state->crtc != &crtc->base)
7023                         continue;
7024
7025                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7026                 if (ret)
7027                         return ret;
7028         }
7029
7030         return 0;
7031 }
7032
7033 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7034                                     const struct drm_display_mode *mode)
7035 {
7036         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7037                     "type: 0x%x flags: 0x%x\n",
7038                     mode->crtc_clock,
7039                     mode->crtc_hdisplay, mode->crtc_hsync_start,
7040                     mode->crtc_hsync_end, mode->crtc_htotal,
7041                     mode->crtc_vdisplay, mode->crtc_vsync_start,
7042                     mode->crtc_vsync_end, mode->crtc_vtotal,
7043                     mode->type, mode->flags);
7044 }
7045
7046 static void
7047 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7048                       const char *id, unsigned int lane_count,
7049                       const struct intel_link_m_n *m_n)
7050 {
7051         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7052
7053         drm_dbg_kms(&i915->drm,
7054                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7055                     id, lane_count,
7056                     m_n->gmch_m, m_n->gmch_n,
7057                     m_n->link_m, m_n->link_n, m_n->tu);
7058 }
7059
7060 static void
7061 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7062                      const union hdmi_infoframe *frame)
7063 {
7064         if (!drm_debug_enabled(DRM_UT_KMS))
7065                 return;
7066
7067         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7068 }
7069
7070 static void
7071 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7072                       const struct drm_dp_vsc_sdp *vsc)
7073 {
7074         if (!drm_debug_enabled(DRM_UT_KMS))
7075                 return;
7076
7077         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7078 }
7079
7080 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7081
7082 static const char * const output_type_str[] = {
7083         OUTPUT_TYPE(UNUSED),
7084         OUTPUT_TYPE(ANALOG),
7085         OUTPUT_TYPE(DVO),
7086         OUTPUT_TYPE(SDVO),
7087         OUTPUT_TYPE(LVDS),
7088         OUTPUT_TYPE(TVOUT),
7089         OUTPUT_TYPE(HDMI),
7090         OUTPUT_TYPE(DP),
7091         OUTPUT_TYPE(EDP),
7092         OUTPUT_TYPE(DSI),
7093         OUTPUT_TYPE(DDI),
7094         OUTPUT_TYPE(DP_MST),
7095 };
7096
7097 #undef OUTPUT_TYPE
7098
7099 static void snprintf_output_types(char *buf, size_t len,
7100                                   unsigned int output_types)
7101 {
7102         char *str = buf;
7103         int i;
7104
7105         str[0] = '\0';
7106
7107         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7108                 int r;
7109
7110                 if ((output_types & BIT(i)) == 0)
7111                         continue;
7112
7113                 r = snprintf(str, len, "%s%s",
7114                              str != buf ? "," : "", output_type_str[i]);
7115                 if (r >= len)
7116                         break;
7117                 str += r;
7118                 len -= r;
7119
7120                 output_types &= ~BIT(i);
7121         }
7122
7123         WARN_ON_ONCE(output_types != 0);
7124 }
7125
7126 static const char * const output_format_str[] = {
7127         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7128         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7129         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7130 };
7131
7132 static const char *output_formats(enum intel_output_format format)
7133 {
7134         if (format >= ARRAY_SIZE(output_format_str))
7135                 return "invalid";
7136         return output_format_str[format];
7137 }
7138
7139 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7140 {
7141         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7142         struct drm_i915_private *i915 = to_i915(plane->base.dev);
7143         const struct drm_framebuffer *fb = plane_state->hw.fb;
7144
7145         if (!fb) {
7146                 drm_dbg_kms(&i915->drm,
7147                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7148                             plane->base.base.id, plane->base.name,
7149                             yesno(plane_state->uapi.visible));
7150                 return;
7151         }
7152
7153         drm_dbg_kms(&i915->drm,
7154                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7155                     plane->base.base.id, plane->base.name,
7156                     fb->base.id, fb->width, fb->height, &fb->format->format,
7157                     fb->modifier, yesno(plane_state->uapi.visible));
7158         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7159                     plane_state->hw.rotation, plane_state->scaler_id);
7160         if (plane_state->uapi.visible)
7161                 drm_dbg_kms(&i915->drm,
7162                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7163                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
7164                             DRM_RECT_ARG(&plane_state->uapi.dst));
7165 }
7166
7167 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7168                                    struct intel_atomic_state *state,
7169                                    const char *context)
7170 {
7171         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7172         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7173         const struct intel_plane_state *plane_state;
7174         struct intel_plane *plane;
7175         char buf[64];
7176         int i;
7177
7178         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7179                     crtc->base.base.id, crtc->base.name,
7180                     yesno(pipe_config->hw.enable), context);
7181
7182         if (!pipe_config->hw.enable)
7183                 goto dump_planes;
7184
7185         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7186         drm_dbg_kms(&dev_priv->drm,
7187                     "active: %s, output_types: %s (0x%x), output format: %s\n",
7188                     yesno(pipe_config->hw.active),
7189                     buf, pipe_config->output_types,
7190                     output_formats(pipe_config->output_format));
7191
7192         drm_dbg_kms(&dev_priv->drm,
7193                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7194                     transcoder_name(pipe_config->cpu_transcoder),
7195                     pipe_config->pipe_bpp, pipe_config->dither);
7196
7197         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7198                     transcoder_name(pipe_config->mst_master_transcoder));
7199
7200         drm_dbg_kms(&dev_priv->drm,
7201                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7202                     transcoder_name(pipe_config->master_transcoder),
7203                     pipe_config->sync_mode_slaves_mask);
7204
7205         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7206                     pipe_config->bigjoiner_slave ? "slave" :
7207                     pipe_config->bigjoiner ? "master" : "no");
7208
7209         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7210                     enableddisabled(pipe_config->splitter.enable),
7211                     pipe_config->splitter.link_count,
7212                     pipe_config->splitter.pixel_overlap);
7213
7214         if (pipe_config->has_pch_encoder)
7215                 intel_dump_m_n_config(pipe_config, "fdi",
7216                                       pipe_config->fdi_lanes,
7217                                       &pipe_config->fdi_m_n);
7218
7219         if (intel_crtc_has_dp_encoder(pipe_config)) {
7220                 intel_dump_m_n_config(pipe_config, "dp m_n",
7221                                 pipe_config->lane_count, &pipe_config->dp_m_n);
7222                 if (pipe_config->has_drrs)
7223                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
7224                                               pipe_config->lane_count,
7225                                               &pipe_config->dp_m2_n2);
7226         }
7227
7228         drm_dbg_kms(&dev_priv->drm,
7229                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7230                     pipe_config->has_audio, pipe_config->has_infoframe,
7231                     pipe_config->infoframes.enable);
7232
7233         if (pipe_config->infoframes.enable &
7234             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7235                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7236                             pipe_config->infoframes.gcp);
7237         if (pipe_config->infoframes.enable &
7238             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7239                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7240         if (pipe_config->infoframes.enable &
7241             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7242                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7243         if (pipe_config->infoframes.enable &
7244             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7245                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7246         if (pipe_config->infoframes.enable &
7247             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7248                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7249         if (pipe_config->infoframes.enable &
7250             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7251                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7252         if (pipe_config->infoframes.enable &
7253             intel_hdmi_infoframe_enable(DP_SDP_VSC))
7254                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7255
7256         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7257                     yesno(pipe_config->vrr.enable),
7258                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7259                     pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7260                     pipe_config->vrr.flipline,
7261                     intel_vrr_vmin_vblank_start(pipe_config),
7262                     intel_vrr_vmax_vblank_start(pipe_config));
7263
7264         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7265         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7266         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7267         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7268         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7269         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7270         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7271         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7272         drm_dbg_kms(&dev_priv->drm,
7273                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7274                     pipe_config->port_clock,
7275                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7276                     pipe_config->pixel_rate);
7277
7278         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7279                     pipe_config->linetime, pipe_config->ips_linetime);
7280
7281         if (DISPLAY_VER(dev_priv) >= 9)
7282                 drm_dbg_kms(&dev_priv->drm,
7283                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7284                             crtc->num_scalers,
7285                             pipe_config->scaler_state.scaler_users,
7286                             pipe_config->scaler_state.scaler_id);
7287
7288         if (HAS_GMCH(dev_priv))
7289                 drm_dbg_kms(&dev_priv->drm,
7290                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7291                             pipe_config->gmch_pfit.control,
7292                             pipe_config->gmch_pfit.pgm_ratios,
7293                             pipe_config->gmch_pfit.lvds_border_bits);
7294         else
7295                 drm_dbg_kms(&dev_priv->drm,
7296                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7297                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7298                             enableddisabled(pipe_config->pch_pfit.enabled),
7299                             yesno(pipe_config->pch_pfit.force_thru));
7300
7301         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7302                     pipe_config->ips_enabled, pipe_config->double_wide);
7303
7304         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7305
7306         if (IS_CHERRYVIEW(dev_priv))
7307                 drm_dbg_kms(&dev_priv->drm,
7308                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7309                             pipe_config->cgm_mode, pipe_config->gamma_mode,
7310                             pipe_config->gamma_enable, pipe_config->csc_enable);
7311         else
7312                 drm_dbg_kms(&dev_priv->drm,
7313                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7314                             pipe_config->csc_mode, pipe_config->gamma_mode,
7315                             pipe_config->gamma_enable, pipe_config->csc_enable);
7316
7317         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7318                     pipe_config->hw.degamma_lut ?
7319                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7320                     pipe_config->hw.gamma_lut ?
7321                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7322
7323 dump_planes:
7324         if (!state)
7325                 return;
7326
7327         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7328                 if (plane->pipe == crtc->pipe)
7329                         intel_dump_plane_state(plane_state);
7330         }
7331 }
7332
7333 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7334 {
7335         struct drm_device *dev = state->base.dev;
7336         struct drm_connector *connector;
7337         struct drm_connector_list_iter conn_iter;
7338         unsigned int used_ports = 0;
7339         unsigned int used_mst_ports = 0;
7340         bool ret = true;
7341
7342         /*
7343          * We're going to peek into connector->state,
7344          * hence connection_mutex must be held.
7345          */
7346         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7347
7348         /*
7349          * Walk the connector list instead of the encoder
7350          * list to detect the problem on ddi platforms
7351          * where there's just one encoder per digital port.
7352          */
7353         drm_connector_list_iter_begin(dev, &conn_iter);
7354         drm_for_each_connector_iter(connector, &conn_iter) {
7355                 struct drm_connector_state *connector_state;
7356                 struct intel_encoder *encoder;
7357
7358                 connector_state =
7359                         drm_atomic_get_new_connector_state(&state->base,
7360                                                            connector);
7361                 if (!connector_state)
7362                         connector_state = connector->state;
7363
7364                 if (!connector_state->best_encoder)
7365                         continue;
7366
7367                 encoder = to_intel_encoder(connector_state->best_encoder);
7368
7369                 drm_WARN_ON(dev, !connector_state->crtc);
7370
7371                 switch (encoder->type) {
7372                 case INTEL_OUTPUT_DDI:
7373                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7374                                 break;
7375                         fallthrough;
7376                 case INTEL_OUTPUT_DP:
7377                 case INTEL_OUTPUT_HDMI:
7378                 case INTEL_OUTPUT_EDP:
7379                         /* the same port mustn't appear more than once */
7380                         if (used_ports & BIT(encoder->port))
7381                                 ret = false;
7382
7383                         used_ports |= BIT(encoder->port);
7384                         break;
7385                 case INTEL_OUTPUT_DP_MST:
7386                         used_mst_ports |=
7387                                 1 << encoder->port;
7388                         break;
7389                 default:
7390                         break;
7391                 }
7392         }
7393         drm_connector_list_iter_end(&conn_iter);
7394
7395         /* can't mix MST and SST/HDMI on the same port */
7396         if (used_ports & used_mst_ports)
7397                 return false;
7398
7399         return ret;
7400 }
7401
7402 static void
7403 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7404                                            struct intel_crtc_state *crtc_state)
7405 {
7406         const struct intel_crtc_state *from_crtc_state = crtc_state;
7407
7408         if (crtc_state->bigjoiner_slave) {
7409                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
7410                                                                   crtc_state->bigjoiner_linked_crtc);
7411
7412                 /* No need to copy state if the master state is unchanged */
7413                 if (!from_crtc_state)
7414                         return;
7415         }
7416
7417         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7418 }
7419
7420 static void
7421 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7422                                  struct intel_crtc_state *crtc_state)
7423 {
7424         crtc_state->hw.enable = crtc_state->uapi.enable;
7425         crtc_state->hw.active = crtc_state->uapi.active;
7426         crtc_state->hw.mode = crtc_state->uapi.mode;
7427         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7428         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7429
7430         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7431 }
7432
7433 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7434 {
7435         if (crtc_state->bigjoiner_slave)
7436                 return;
7437
7438         crtc_state->uapi.enable = crtc_state->hw.enable;
7439         crtc_state->uapi.active = crtc_state->hw.active;
7440         drm_WARN_ON(crtc_state->uapi.crtc->dev,
7441                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7442
7443         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7444         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7445
7446         /* copy color blobs to uapi */
7447         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7448                                   crtc_state->hw.degamma_lut);
7449         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7450                                   crtc_state->hw.gamma_lut);
7451         drm_property_replace_blob(&crtc_state->uapi.ctm,
7452                                   crtc_state->hw.ctm);
7453 }
7454
7455 static int
7456 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7457                           const struct intel_crtc_state *from_crtc_state)
7458 {
7459         struct intel_crtc_state *saved_state;
7460         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7461
7462         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7463         if (!saved_state)
7464                 return -ENOMEM;
7465
7466         saved_state->uapi = crtc_state->uapi;
7467         saved_state->scaler_state = crtc_state->scaler_state;
7468         saved_state->shared_dpll = crtc_state->shared_dpll;
7469         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7470         saved_state->crc_enabled = crtc_state->crc_enabled;
7471
7472         intel_crtc_free_hw_state(crtc_state);
7473         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7474         kfree(saved_state);
7475
7476         /* Re-init hw state */
7477         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7478         crtc_state->hw.enable = from_crtc_state->hw.enable;
7479         crtc_state->hw.active = from_crtc_state->hw.active;
7480         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7481         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7482
7483         /* Some fixups */
7484         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7485         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7486         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7487         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7488         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7489         crtc_state->bigjoiner_slave = true;
7490         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7491         crtc_state->has_audio = false;
7492
7493         return 0;
7494 }
7495
7496 static int
7497 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7498                                  struct intel_crtc_state *crtc_state)
7499 {
7500         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7501         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7502         struct intel_crtc_state *saved_state;
7503
7504         saved_state = intel_crtc_state_alloc(crtc);
7505         if (!saved_state)
7506                 return -ENOMEM;
7507
7508         /* free the old crtc_state->hw members */
7509         intel_crtc_free_hw_state(crtc_state);
7510
7511         /* FIXME: before the switch to atomic started, a new pipe_config was
7512          * kzalloc'd. Code that depends on any field being zero should be
7513          * fixed, so that the crtc_state can be safely duplicated. For now,
7514          * only fields that are know to not cause problems are preserved. */
7515
7516         saved_state->uapi = crtc_state->uapi;
7517         saved_state->scaler_state = crtc_state->scaler_state;
7518         saved_state->shared_dpll = crtc_state->shared_dpll;
7519         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7520         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7521                sizeof(saved_state->icl_port_dplls));
7522         saved_state->crc_enabled = crtc_state->crc_enabled;
7523         if (IS_G4X(dev_priv) ||
7524             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7525                 saved_state->wm = crtc_state->wm;
7526
7527         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7528         kfree(saved_state);
7529
7530         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7531
7532         return 0;
7533 }
7534
7535 static int
7536 intel_modeset_pipe_config(struct intel_atomic_state *state,
7537                           struct intel_crtc_state *pipe_config)
7538 {
7539         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7540         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7541         struct drm_connector *connector;
7542         struct drm_connector_state *connector_state;
7543         int base_bpp, ret, i;
7544         bool retry = true;
7545
7546         pipe_config->cpu_transcoder =
7547                 (enum transcoder) to_intel_crtc(crtc)->pipe;
7548
7549         /*
7550          * Sanitize sync polarity flags based on requested ones. If neither
7551          * positive or negative polarity is requested, treat this as meaning
7552          * negative polarity.
7553          */
7554         if (!(pipe_config->hw.adjusted_mode.flags &
7555               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7556                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7557
7558         if (!(pipe_config->hw.adjusted_mode.flags &
7559               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7560                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7561
7562         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7563                                         pipe_config);
7564         if (ret)
7565                 return ret;
7566
7567         base_bpp = pipe_config->pipe_bpp;
7568
7569         /*
7570          * Determine the real pipe dimensions. Note that stereo modes can
7571          * increase the actual pipe size due to the frame doubling and
7572          * insertion of additional space for blanks between the frame. This
7573          * is stored in the crtc timings. We use the requested mode to do this
7574          * computation to clearly distinguish it from the adjusted mode, which
7575          * can be changed by the connectors in the below retry loop.
7576          */
7577         drm_mode_get_hv_timing(&pipe_config->hw.mode,
7578                                &pipe_config->pipe_src_w,
7579                                &pipe_config->pipe_src_h);
7580
7581         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7582                 struct intel_encoder *encoder =
7583                         to_intel_encoder(connector_state->best_encoder);
7584
7585                 if (connector_state->crtc != crtc)
7586                         continue;
7587
7588                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7589                         drm_dbg_kms(&i915->drm,
7590                                     "rejecting invalid cloning configuration\n");
7591                         return -EINVAL;
7592                 }
7593
7594                 /*
7595                  * Determine output_types before calling the .compute_config()
7596                  * hooks so that the hooks can use this information safely.
7597                  */
7598                 if (encoder->compute_output_type)
7599                         pipe_config->output_types |=
7600                                 BIT(encoder->compute_output_type(encoder, pipe_config,
7601                                                                  connector_state));
7602                 else
7603                         pipe_config->output_types |= BIT(encoder->type);
7604         }
7605
7606 encoder_retry:
7607         /* Ensure the port clock defaults are reset when retrying. */
7608         pipe_config->port_clock = 0;
7609         pipe_config->pixel_multiplier = 1;
7610
7611         /* Fill in default crtc timings, allow encoders to overwrite them. */
7612         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7613                               CRTC_STEREO_DOUBLE);
7614
7615         /* Pass our mode to the connectors and the CRTC to give them a chance to
7616          * adjust it according to limitations or connector properties, and also
7617          * a chance to reject the mode entirely.
7618          */
7619         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7620                 struct intel_encoder *encoder =
7621                         to_intel_encoder(connector_state->best_encoder);
7622
7623                 if (connector_state->crtc != crtc)
7624                         continue;
7625
7626                 ret = encoder->compute_config(encoder, pipe_config,
7627                                               connector_state);
7628                 if (ret < 0) {
7629                         if (ret != -EDEADLK)
7630                                 drm_dbg_kms(&i915->drm,
7631                                             "Encoder config failure: %d\n",
7632                                             ret);
7633                         return ret;
7634                 }
7635         }
7636
7637         /* Set default port clock if not overwritten by the encoder. Needs to be
7638          * done afterwards in case the encoder adjusts the mode. */
7639         if (!pipe_config->port_clock)
7640                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7641                         * pipe_config->pixel_multiplier;
7642
7643         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7644         if (ret == -EDEADLK)
7645                 return ret;
7646         if (ret < 0) {
7647                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7648                 return ret;
7649         }
7650
7651         if (ret == I915_DISPLAY_CONFIG_RETRY) {
7652                 if (drm_WARN(&i915->drm, !retry,
7653                              "loop in pipe configuration computation\n"))
7654                         return -EINVAL;
7655
7656                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7657                 retry = false;
7658                 goto encoder_retry;
7659         }
7660
7661         /* Dithering seems to not pass-through bits correctly when it should, so
7662          * only enable it on 6bpc panels and when its not a compliance
7663          * test requesting 6bpc video pattern.
7664          */
7665         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7666                 !pipe_config->dither_force_disable;
7667         drm_dbg_kms(&i915->drm,
7668                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7669                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7670
7671         return 0;
7672 }
7673
7674 static int
7675 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7676 {
7677         struct intel_atomic_state *state =
7678                 to_intel_atomic_state(crtc_state->uapi.state);
7679         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7680         struct drm_connector_state *conn_state;
7681         struct drm_connector *connector;
7682         int i;
7683
7684         for_each_new_connector_in_state(&state->base, connector,
7685                                         conn_state, i) {
7686                 struct intel_encoder *encoder =
7687                         to_intel_encoder(conn_state->best_encoder);
7688                 int ret;
7689
7690                 if (conn_state->crtc != &crtc->base ||
7691                     !encoder->compute_config_late)
7692                         continue;
7693
7694                 ret = encoder->compute_config_late(encoder, crtc_state,
7695                                                    conn_state);
7696                 if (ret)
7697                         return ret;
7698         }
7699
7700         return 0;
7701 }
7702
7703 bool intel_fuzzy_clock_check(int clock1, int clock2)
7704 {
7705         int diff;
7706
7707         if (clock1 == clock2)
7708                 return true;
7709
7710         if (!clock1 || !clock2)
7711                 return false;
7712
7713         diff = abs(clock1 - clock2);
7714
7715         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7716                 return true;
7717
7718         return false;
7719 }
7720
7721 static bool
7722 intel_compare_m_n(unsigned int m, unsigned int n,
7723                   unsigned int m2, unsigned int n2,
7724                   bool exact)
7725 {
7726         if (m == m2 && n == n2)
7727                 return true;
7728
7729         if (exact || !m || !n || !m2 || !n2)
7730                 return false;
7731
7732         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7733
7734         if (n > n2) {
7735                 while (n > n2) {
7736                         m2 <<= 1;
7737                         n2 <<= 1;
7738                 }
7739         } else if (n < n2) {
7740                 while (n < n2) {
7741                         m <<= 1;
7742                         n <<= 1;
7743                 }
7744         }
7745
7746         if (n != n2)
7747                 return false;
7748
7749         return intel_fuzzy_clock_check(m, m2);
7750 }
7751
7752 static bool
7753 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7754                        const struct intel_link_m_n *m2_n2,
7755                        bool exact)
7756 {
7757         return m_n->tu == m2_n2->tu &&
7758                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7759                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7760                 intel_compare_m_n(m_n->link_m, m_n->link_n,
7761                                   m2_n2->link_m, m2_n2->link_n, exact);
7762 }
7763
7764 static bool
7765 intel_compare_infoframe(const union hdmi_infoframe *a,
7766                         const union hdmi_infoframe *b)
7767 {
7768         return memcmp(a, b, sizeof(*a)) == 0;
7769 }
7770
7771 static bool
7772 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7773                          const struct drm_dp_vsc_sdp *b)
7774 {
7775         return memcmp(a, b, sizeof(*a)) == 0;
7776 }
7777
7778 static void
7779 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7780                                bool fastset, const char *name,
7781                                const union hdmi_infoframe *a,
7782                                const union hdmi_infoframe *b)
7783 {
7784         if (fastset) {
7785                 if (!drm_debug_enabled(DRM_UT_KMS))
7786                         return;
7787
7788                 drm_dbg_kms(&dev_priv->drm,
7789                             "fastset mismatch in %s infoframe\n", name);
7790                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7791                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7792                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7793                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7794         } else {
7795                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7796                 drm_err(&dev_priv->drm, "expected:\n");
7797                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7798                 drm_err(&dev_priv->drm, "found:\n");
7799                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7800         }
7801 }
7802
7803 static void
7804 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7805                                 bool fastset, const char *name,
7806                                 const struct drm_dp_vsc_sdp *a,
7807                                 const struct drm_dp_vsc_sdp *b)
7808 {
7809         if (fastset) {
7810                 if (!drm_debug_enabled(DRM_UT_KMS))
7811                         return;
7812
7813                 drm_dbg_kms(&dev_priv->drm,
7814                             "fastset mismatch in %s dp sdp\n", name);
7815                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
7816                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7817                 drm_dbg_kms(&dev_priv->drm, "found:\n");
7818                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7819         } else {
7820                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7821                 drm_err(&dev_priv->drm, "expected:\n");
7822                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7823                 drm_err(&dev_priv->drm, "found:\n");
7824                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7825         }
7826 }
7827
7828 static void __printf(4, 5)
7829 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7830                      const char *name, const char *format, ...)
7831 {
7832         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7833         struct va_format vaf;
7834         va_list args;
7835
7836         va_start(args, format);
7837         vaf.fmt = format;
7838         vaf.va = &args;
7839
7840         if (fastset)
7841                 drm_dbg_kms(&i915->drm,
7842                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7843                             crtc->base.base.id, crtc->base.name, name, &vaf);
7844         else
7845                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7846                         crtc->base.base.id, crtc->base.name, name, &vaf);
7847
7848         va_end(args);
7849 }
7850
7851 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7852 {
7853         if (dev_priv->params.fastboot != -1)
7854                 return dev_priv->params.fastboot;
7855
7856         /* Enable fastboot by default on Skylake and newer */
7857         if (DISPLAY_VER(dev_priv) >= 9)
7858                 return true;
7859
7860         /* Enable fastboot by default on VLV and CHV */
7861         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7862                 return true;
7863
7864         /* Disabled by default on all others */
7865         return false;
7866 }
7867
7868 static bool
7869 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7870                           const struct intel_crtc_state *pipe_config,
7871                           bool fastset)
7872 {
7873         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7874         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7875         bool ret = true;
7876         u32 bp_gamma = 0;
7877         bool fixup_inherited = fastset &&
7878                 current_config->inherited && !pipe_config->inherited;
7879
7880         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7881                 drm_dbg_kms(&dev_priv->drm,
7882                             "initial modeset and fastboot not set\n");
7883                 ret = false;
7884         }
7885
7886 #define PIPE_CONF_CHECK_X(name) do { \
7887         if (current_config->name != pipe_config->name) { \
7888                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7889                                      "(expected 0x%08x, found 0x%08x)", \
7890                                      current_config->name, \
7891                                      pipe_config->name); \
7892                 ret = false; \
7893         } \
7894 } while (0)
7895
7896 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
7897         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
7898                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7899                                      "(expected 0x%08x, found 0x%08x)", \
7900                                      current_config->name & (mask), \
7901                                      pipe_config->name & (mask)); \
7902                 ret = false; \
7903         } \
7904 } while (0)
7905
7906 #define PIPE_CONF_CHECK_I(name) do { \
7907         if (current_config->name != pipe_config->name) { \
7908                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7909                                      "(expected %i, found %i)", \
7910                                      current_config->name, \
7911                                      pipe_config->name); \
7912                 ret = false; \
7913         } \
7914 } while (0)
7915
7916 #define PIPE_CONF_CHECK_BOOL(name) do { \
7917         if (current_config->name != pipe_config->name) { \
7918                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
7919                                      "(expected %s, found %s)", \
7920                                      yesno(current_config->name), \
7921                                      yesno(pipe_config->name)); \
7922                 ret = false; \
7923         } \
7924 } while (0)
7925
7926 /*
7927  * Checks state where we only read out the enabling, but not the entire
7928  * state itself (like full infoframes or ELD for audio). These states
7929  * require a full modeset on bootup to fix up.
7930  */
7931 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
7932         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
7933                 PIPE_CONF_CHECK_BOOL(name); \
7934         } else { \
7935                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7936                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
7937                                      yesno(current_config->name), \
7938                                      yesno(pipe_config->name)); \
7939                 ret = false; \
7940         } \
7941 } while (0)
7942
7943 #define PIPE_CONF_CHECK_P(name) do { \
7944         if (current_config->name != pipe_config->name) { \
7945                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7946                                      "(expected %p, found %p)", \
7947                                      current_config->name, \
7948                                      pipe_config->name); \
7949                 ret = false; \
7950         } \
7951 } while (0)
7952
7953 #define PIPE_CONF_CHECK_M_N(name) do { \
7954         if (!intel_compare_link_m_n(&current_config->name, \
7955                                     &pipe_config->name,\
7956                                     !fastset)) { \
7957                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7958                                      "(expected tu %i gmch %i/%i link %i/%i, " \
7959                                      "found tu %i, gmch %i/%i link %i/%i)", \
7960                                      current_config->name.tu, \
7961                                      current_config->name.gmch_m, \
7962                                      current_config->name.gmch_n, \
7963                                      current_config->name.link_m, \
7964                                      current_config->name.link_n, \
7965                                      pipe_config->name.tu, \
7966                                      pipe_config->name.gmch_m, \
7967                                      pipe_config->name.gmch_n, \
7968                                      pipe_config->name.link_m, \
7969                                      pipe_config->name.link_n); \
7970                 ret = false; \
7971         } \
7972 } while (0)
7973
7974 /* This is required for BDW+ where there is only one set of registers for
7975  * switching between high and low RR.
7976  * This macro can be used whenever a comparison has to be made between one
7977  * hw state and multiple sw state variables.
7978  */
7979 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
7980         if (!intel_compare_link_m_n(&current_config->name, \
7981                                     &pipe_config->name, !fastset) && \
7982             !intel_compare_link_m_n(&current_config->alt_name, \
7983                                     &pipe_config->name, !fastset)) { \
7984                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
7985                                      "(expected tu %i gmch %i/%i link %i/%i, " \
7986                                      "or tu %i gmch %i/%i link %i/%i, " \
7987                                      "found tu %i, gmch %i/%i link %i/%i)", \
7988                                      current_config->name.tu, \
7989                                      current_config->name.gmch_m, \
7990                                      current_config->name.gmch_n, \
7991                                      current_config->name.link_m, \
7992                                      current_config->name.link_n, \
7993                                      current_config->alt_name.tu, \
7994                                      current_config->alt_name.gmch_m, \
7995                                      current_config->alt_name.gmch_n, \
7996                                      current_config->alt_name.link_m, \
7997                                      current_config->alt_name.link_n, \
7998                                      pipe_config->name.tu, \
7999                                      pipe_config->name.gmch_m, \
8000                                      pipe_config->name.gmch_n, \
8001                                      pipe_config->name.link_m, \
8002                                      pipe_config->name.link_n); \
8003                 ret = false; \
8004         } \
8005 } while (0)
8006
8007 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8008         if ((current_config->name ^ pipe_config->name) & (mask)) { \
8009                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8010                                      "(%x) (expected %i, found %i)", \
8011                                      (mask), \
8012                                      current_config->name & (mask), \
8013                                      pipe_config->name & (mask)); \
8014                 ret = false; \
8015         } \
8016 } while (0)
8017
8018 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8019         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8020                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8021                                      "(expected %i, found %i)", \
8022                                      current_config->name, \
8023                                      pipe_config->name); \
8024                 ret = false; \
8025         } \
8026 } while (0)
8027
8028 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8029         if (!intel_compare_infoframe(&current_config->infoframes.name, \
8030                                      &pipe_config->infoframes.name)) { \
8031                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8032                                                &current_config->infoframes.name, \
8033                                                &pipe_config->infoframes.name); \
8034                 ret = false; \
8035         } \
8036 } while (0)
8037
8038 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8039         if (!current_config->has_psr && !pipe_config->has_psr && \
8040             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8041                                       &pipe_config->infoframes.name)) { \
8042                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8043                                                 &current_config->infoframes.name, \
8044                                                 &pipe_config->infoframes.name); \
8045                 ret = false; \
8046         } \
8047 } while (0)
8048
8049 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8050         if (current_config->name1 != pipe_config->name1) { \
8051                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8052                                 "(expected %i, found %i, won't compare lut values)", \
8053                                 current_config->name1, \
8054                                 pipe_config->name1); \
8055                 ret = false;\
8056         } else { \
8057                 if (!intel_color_lut_equal(current_config->name2, \
8058                                         pipe_config->name2, pipe_config->name1, \
8059                                         bit_precision)) { \
8060                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8061                                         "hw_state doesn't match sw_state"); \
8062                         ret = false; \
8063                 } \
8064         } \
8065 } while (0)
8066
8067 #define PIPE_CONF_QUIRK(quirk) \
8068         ((current_config->quirks | pipe_config->quirks) & (quirk))
8069
8070         PIPE_CONF_CHECK_I(cpu_transcoder);
8071
8072         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8073         PIPE_CONF_CHECK_I(fdi_lanes);
8074         PIPE_CONF_CHECK_M_N(fdi_m_n);
8075
8076         PIPE_CONF_CHECK_I(lane_count);
8077         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8078
8079         if (DISPLAY_VER(dev_priv) < 8) {
8080                 PIPE_CONF_CHECK_M_N(dp_m_n);
8081
8082                 if (current_config->has_drrs)
8083                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
8084         } else
8085                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8086
8087         PIPE_CONF_CHECK_X(output_types);
8088
8089         /* FIXME do the readout properly and get rid of this quirk */
8090         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8091                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8092                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8093                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8094                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8095                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8096                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8097
8098                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8099                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8100                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8101                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8102                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8103                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8104
8105                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8106                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8107                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8108                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8109                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8110                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8111
8112                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8113                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8114                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8115                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8116                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8117                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8118
8119                 PIPE_CONF_CHECK_I(pixel_multiplier);
8120
8121                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8122                                       DRM_MODE_FLAG_INTERLACE);
8123
8124                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8125                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8126                                               DRM_MODE_FLAG_PHSYNC);
8127                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8128                                               DRM_MODE_FLAG_NHSYNC);
8129                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8130                                               DRM_MODE_FLAG_PVSYNC);
8131                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8132                                               DRM_MODE_FLAG_NVSYNC);
8133                 }
8134         }
8135
8136         PIPE_CONF_CHECK_I(output_format);
8137         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8138         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8139             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8140                 PIPE_CONF_CHECK_BOOL(limited_color_range);
8141
8142         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8143         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8144         PIPE_CONF_CHECK_BOOL(has_infoframe);
8145         /* FIXME do the readout properly and get rid of this quirk */
8146         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8147                 PIPE_CONF_CHECK_BOOL(fec_enable);
8148
8149         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8150
8151         PIPE_CONF_CHECK_X(gmch_pfit.control);
8152         /* pfit ratios are autocomputed by the hw on gen4+ */
8153         if (DISPLAY_VER(dev_priv) < 4)
8154                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8155         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8156
8157         /*
8158          * Changing the EDP transcoder input mux
8159          * (A_ONOFF vs. A_ON) requires a full modeset.
8160          */
8161         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8162
8163         if (!fastset) {
8164                 PIPE_CONF_CHECK_I(pipe_src_w);
8165                 PIPE_CONF_CHECK_I(pipe_src_h);
8166
8167                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8168                 if (current_config->pch_pfit.enabled) {
8169                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8170                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8171                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8172                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8173                 }
8174
8175                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8176                 /* FIXME do the readout properly and get rid of this quirk */
8177                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8178                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8179
8180                 PIPE_CONF_CHECK_X(gamma_mode);
8181                 if (IS_CHERRYVIEW(dev_priv))
8182                         PIPE_CONF_CHECK_X(cgm_mode);
8183                 else
8184                         PIPE_CONF_CHECK_X(csc_mode);
8185                 PIPE_CONF_CHECK_BOOL(gamma_enable);
8186                 PIPE_CONF_CHECK_BOOL(csc_enable);
8187
8188                 PIPE_CONF_CHECK_I(linetime);
8189                 PIPE_CONF_CHECK_I(ips_linetime);
8190
8191                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8192                 if (bp_gamma)
8193                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8194
8195                 if (current_config->active_planes) {
8196                         PIPE_CONF_CHECK_BOOL(has_psr);
8197                         PIPE_CONF_CHECK_BOOL(has_psr2);
8198                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8199                         PIPE_CONF_CHECK_I(dc3co_exitline);
8200                 }
8201         }
8202
8203         PIPE_CONF_CHECK_BOOL(double_wide);
8204
8205         if (dev_priv->dpll.mgr)
8206                 PIPE_CONF_CHECK_P(shared_dpll);
8207
8208         /* FIXME do the readout properly and get rid of this quirk */
8209         if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8210                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8211                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8212                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8213                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8214                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8215                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8216                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8217                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8218                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8219                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8220                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8221                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8222                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8223                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8224                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8225                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8226                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8227                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8228                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8229                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8230                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8231                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8232                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8233                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8234                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8235                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8236                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8237                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8238                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8239                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8240                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8241         }
8242
8243         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8244                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8245                 PIPE_CONF_CHECK_X(dsi_pll.div);
8246
8247                 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8248                         PIPE_CONF_CHECK_I(pipe_bpp);
8249
8250                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8251                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8252                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8253
8254                 PIPE_CONF_CHECK_I(min_voltage_level);
8255         }
8256
8257         if (current_config->has_psr || pipe_config->has_psr)
8258                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8259                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8260         else
8261                 PIPE_CONF_CHECK_X(infoframes.enable);
8262
8263         PIPE_CONF_CHECK_X(infoframes.gcp);
8264         PIPE_CONF_CHECK_INFOFRAME(avi);
8265         PIPE_CONF_CHECK_INFOFRAME(spd);
8266         PIPE_CONF_CHECK_INFOFRAME(hdmi);
8267         PIPE_CONF_CHECK_INFOFRAME(drm);
8268         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8269
8270         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8271         PIPE_CONF_CHECK_I(master_transcoder);
8272         PIPE_CONF_CHECK_BOOL(bigjoiner);
8273         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8274         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8275
8276         PIPE_CONF_CHECK_I(dsc.compression_enable);
8277         PIPE_CONF_CHECK_I(dsc.dsc_split);
8278         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8279
8280         PIPE_CONF_CHECK_BOOL(splitter.enable);
8281         PIPE_CONF_CHECK_I(splitter.link_count);
8282         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8283
8284         PIPE_CONF_CHECK_I(mst_master_transcoder);
8285
8286         PIPE_CONF_CHECK_BOOL(vrr.enable);
8287         PIPE_CONF_CHECK_I(vrr.vmin);
8288         PIPE_CONF_CHECK_I(vrr.vmax);
8289         PIPE_CONF_CHECK_I(vrr.flipline);
8290         PIPE_CONF_CHECK_I(vrr.pipeline_full);
8291         PIPE_CONF_CHECK_I(vrr.guardband);
8292
8293 #undef PIPE_CONF_CHECK_X
8294 #undef PIPE_CONF_CHECK_I
8295 #undef PIPE_CONF_CHECK_BOOL
8296 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8297 #undef PIPE_CONF_CHECK_P
8298 #undef PIPE_CONF_CHECK_FLAGS
8299 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8300 #undef PIPE_CONF_CHECK_COLOR_LUT
8301 #undef PIPE_CONF_QUIRK
8302
8303         return ret;
8304 }
8305
8306 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8307                                            const struct intel_crtc_state *pipe_config)
8308 {
8309         if (pipe_config->has_pch_encoder) {
8310                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8311                                                             &pipe_config->fdi_m_n);
8312                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8313
8314                 /*
8315                  * FDI already provided one idea for the dotclock.
8316                  * Yell if the encoder disagrees.
8317                  */
8318                 drm_WARN(&dev_priv->drm,
8319                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8320                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8321                          fdi_dotclock, dotclock);
8322         }
8323 }
8324
8325 static void verify_wm_state(struct intel_crtc *crtc,
8326                             struct intel_crtc_state *new_crtc_state)
8327 {
8328         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8329         struct skl_hw_state {
8330                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8331                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8332                 struct skl_pipe_wm wm;
8333         } *hw;
8334         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8335         int level, max_level = ilk_wm_max_level(dev_priv);
8336         struct intel_plane *plane;
8337         u8 hw_enabled_slices;
8338
8339         if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8340                 return;
8341
8342         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8343         if (!hw)
8344                 return;
8345
8346         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8347
8348         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8349
8350         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8351
8352         if (DISPLAY_VER(dev_priv) >= 11 &&
8353             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8354                 drm_err(&dev_priv->drm,
8355                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8356                         dev_priv->dbuf.enabled_slices,
8357                         hw_enabled_slices);
8358
8359         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8360                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8361                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8362
8363                 /* Watermarks */
8364                 for (level = 0; level <= max_level; level++) {
8365                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8366                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8367
8368                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8369                                 continue;
8370
8371                         drm_err(&dev_priv->drm,
8372                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8373                                 plane->base.base.id, plane->base.name, level,
8374                                 sw_wm_level->enable,
8375                                 sw_wm_level->blocks,
8376                                 sw_wm_level->lines,
8377                                 hw_wm_level->enable,
8378                                 hw_wm_level->blocks,
8379                                 hw_wm_level->lines);
8380                 }
8381
8382                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8383                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8384
8385                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8386                         drm_err(&dev_priv->drm,
8387                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8388                                 plane->base.base.id, plane->base.name,
8389                                 sw_wm_level->enable,
8390                                 sw_wm_level->blocks,
8391                                 sw_wm_level->lines,
8392                                 hw_wm_level->enable,
8393                                 hw_wm_level->blocks,
8394                                 hw_wm_level->lines);
8395                 }
8396
8397                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8398                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8399
8400                 if (HAS_HW_SAGV_WM(dev_priv) &&
8401                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8402                         drm_err(&dev_priv->drm,
8403                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8404                                 plane->base.base.id, plane->base.name,
8405                                 sw_wm_level->enable,
8406                                 sw_wm_level->blocks,
8407                                 sw_wm_level->lines,
8408                                 hw_wm_level->enable,
8409                                 hw_wm_level->blocks,
8410                                 hw_wm_level->lines);
8411                 }
8412
8413                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8414                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8415
8416                 if (HAS_HW_SAGV_WM(dev_priv) &&
8417                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8418                         drm_err(&dev_priv->drm,
8419                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8420                                 plane->base.base.id, plane->base.name,
8421                                 sw_wm_level->enable,
8422                                 sw_wm_level->blocks,
8423                                 sw_wm_level->lines,
8424                                 hw_wm_level->enable,
8425                                 hw_wm_level->blocks,
8426                                 hw_wm_level->lines);
8427                 }
8428
8429                 /* DDB */
8430                 hw_ddb_entry = &hw->ddb_y[plane->id];
8431                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8432
8433                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8434                         drm_err(&dev_priv->drm,
8435                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8436                                 plane->base.base.id, plane->base.name,
8437                                 sw_ddb_entry->start, sw_ddb_entry->end,
8438                                 hw_ddb_entry->start, hw_ddb_entry->end);
8439                 }
8440         }
8441
8442         kfree(hw);
8443 }
8444
8445 static void
8446 verify_connector_state(struct intel_atomic_state *state,
8447                        struct intel_crtc *crtc)
8448 {
8449         struct drm_connector *connector;
8450         struct drm_connector_state *new_conn_state;
8451         int i;
8452
8453         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8454                 struct drm_encoder *encoder = connector->encoder;
8455                 struct intel_crtc_state *crtc_state = NULL;
8456
8457                 if (new_conn_state->crtc != &crtc->base)
8458                         continue;
8459
8460                 if (crtc)
8461                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8462
8463                 intel_connector_verify_state(crtc_state, new_conn_state);
8464
8465                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8466                      "connector's atomic encoder doesn't match legacy encoder\n");
8467         }
8468 }
8469
8470 static void
8471 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8472 {
8473         struct intel_encoder *encoder;
8474         struct drm_connector *connector;
8475         struct drm_connector_state *old_conn_state, *new_conn_state;
8476         int i;
8477
8478         for_each_intel_encoder(&dev_priv->drm, encoder) {
8479                 bool enabled = false, found = false;
8480                 enum pipe pipe;
8481
8482                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8483                             encoder->base.base.id,
8484                             encoder->base.name);
8485
8486                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8487                                                    new_conn_state, i) {
8488                         if (old_conn_state->best_encoder == &encoder->base)
8489                                 found = true;
8490
8491                         if (new_conn_state->best_encoder != &encoder->base)
8492                                 continue;
8493                         found = enabled = true;
8494
8495                         I915_STATE_WARN(new_conn_state->crtc !=
8496                                         encoder->base.crtc,
8497                              "connector's crtc doesn't match encoder crtc\n");
8498                 }
8499
8500                 if (!found)
8501                         continue;
8502
8503                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8504                      "encoder's enabled state mismatch "
8505                      "(expected %i, found %i)\n",
8506                      !!encoder->base.crtc, enabled);
8507
8508                 if (!encoder->base.crtc) {
8509                         bool active;
8510
8511                         active = encoder->get_hw_state(encoder, &pipe);
8512                         I915_STATE_WARN(active,
8513                              "encoder detached but still enabled on pipe %c.\n",
8514                              pipe_name(pipe));
8515                 }
8516         }
8517 }
8518
8519 static void
8520 verify_crtc_state(struct intel_crtc *crtc,
8521                   struct intel_crtc_state *old_crtc_state,
8522                   struct intel_crtc_state *new_crtc_state)
8523 {
8524         struct drm_device *dev = crtc->base.dev;
8525         struct drm_i915_private *dev_priv = to_i915(dev);
8526         struct intel_encoder *encoder;
8527         struct intel_crtc_state *pipe_config = old_crtc_state;
8528         struct drm_atomic_state *state = old_crtc_state->uapi.state;
8529         struct intel_crtc *master = crtc;
8530
8531         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8532         intel_crtc_free_hw_state(old_crtc_state);
8533         intel_crtc_state_reset(old_crtc_state, crtc);
8534         old_crtc_state->uapi.state = state;
8535
8536         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8537                     crtc->base.name);
8538
8539         pipe_config->hw.enable = new_crtc_state->hw.enable;
8540
8541         intel_crtc_get_pipe_config(pipe_config);
8542
8543         /* we keep both pipes enabled on 830 */
8544         if (IS_I830(dev_priv) && pipe_config->hw.active)
8545                 pipe_config->hw.active = new_crtc_state->hw.active;
8546
8547         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8548                         "crtc active state doesn't match with hw state "
8549                         "(expected %i, found %i)\n",
8550                         new_crtc_state->hw.active, pipe_config->hw.active);
8551
8552         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8553                         "transitional active state does not match atomic hw state "
8554                         "(expected %i, found %i)\n",
8555                         new_crtc_state->hw.active, crtc->active);
8556
8557         if (new_crtc_state->bigjoiner_slave)
8558                 master = new_crtc_state->bigjoiner_linked_crtc;
8559
8560         for_each_encoder_on_crtc(dev, &master->base, encoder) {
8561                 enum pipe pipe;
8562                 bool active;
8563
8564                 active = encoder->get_hw_state(encoder, &pipe);
8565                 I915_STATE_WARN(active != new_crtc_state->hw.active,
8566                                 "[ENCODER:%i] active %i with crtc active %i\n",
8567                                 encoder->base.base.id, active,
8568                                 new_crtc_state->hw.active);
8569
8570                 I915_STATE_WARN(active && master->pipe != pipe,
8571                                 "Encoder connected to wrong pipe %c\n",
8572                                 pipe_name(pipe));
8573
8574                 if (active)
8575                         intel_encoder_get_config(encoder, pipe_config);
8576         }
8577
8578         if (!new_crtc_state->hw.active)
8579                 return;
8580
8581         if (new_crtc_state->bigjoiner_slave)
8582                 /* No PLLs set for slave */
8583                 pipe_config->shared_dpll = NULL;
8584
8585         intel_pipe_config_sanity_check(dev_priv, pipe_config);
8586
8587         if (!intel_pipe_config_compare(new_crtc_state,
8588                                        pipe_config, false)) {
8589                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
8590                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8591                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8592         }
8593 }
8594
8595 static void
8596 intel_verify_planes(struct intel_atomic_state *state)
8597 {
8598         struct intel_plane *plane;
8599         const struct intel_plane_state *plane_state;
8600         int i;
8601
8602         for_each_new_intel_plane_in_state(state, plane,
8603                                           plane_state, i)
8604                 assert_plane(plane, plane_state->planar_slave ||
8605                              plane_state->uapi.visible);
8606 }
8607
8608 static void
8609 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8610                          struct intel_shared_dpll *pll,
8611                          struct intel_crtc *crtc,
8612                          struct intel_crtc_state *new_crtc_state)
8613 {
8614         struct intel_dpll_hw_state dpll_hw_state;
8615         u8 pipe_mask;
8616         bool active;
8617
8618         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8619
8620         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8621
8622         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8623
8624         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8625                 I915_STATE_WARN(!pll->on && pll->active_mask,
8626                      "pll in active use but not on in sw tracking\n");
8627                 I915_STATE_WARN(pll->on && !pll->active_mask,
8628                      "pll is on but not used by any active pipe\n");
8629                 I915_STATE_WARN(pll->on != active,
8630                      "pll on state mismatch (expected %i, found %i)\n",
8631                      pll->on, active);
8632         }
8633
8634         if (!crtc) {
8635                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8636                                 "more active pll users than references: 0x%x vs 0x%x\n",
8637                                 pll->active_mask, pll->state.pipe_mask);
8638
8639                 return;
8640         }
8641
8642         pipe_mask = BIT(crtc->pipe);
8643
8644         if (new_crtc_state->hw.active)
8645                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8646                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8647                                 pipe_name(crtc->pipe), pll->active_mask);
8648         else
8649                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8650                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8651                                 pipe_name(crtc->pipe), pll->active_mask);
8652
8653         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8654                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8655                         pipe_mask, pll->state.pipe_mask);
8656
8657         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8658                                           &dpll_hw_state,
8659                                           sizeof(dpll_hw_state)),
8660                         "pll hw state mismatch\n");
8661 }
8662
8663 static void
8664 verify_shared_dpll_state(struct intel_crtc *crtc,
8665                          struct intel_crtc_state *old_crtc_state,
8666                          struct intel_crtc_state *new_crtc_state)
8667 {
8668         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8669
8670         if (new_crtc_state->shared_dpll)
8671                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8672
8673         if (old_crtc_state->shared_dpll &&
8674             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8675                 u8 pipe_mask = BIT(crtc->pipe);
8676                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8677
8678                 I915_STATE_WARN(pll->active_mask & pipe_mask,
8679                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8680                                 pipe_name(crtc->pipe), pll->active_mask);
8681                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8682                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8683                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
8684         }
8685 }
8686
8687 static void
8688 verify_mpllb_state(struct intel_atomic_state *state,
8689                    struct intel_crtc_state *new_crtc_state)
8690 {
8691         struct drm_i915_private *i915 = to_i915(state->base.dev);
8692         struct intel_mpllb_state mpllb_hw_state = { 0 };
8693         struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
8694         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8695         struct intel_encoder *encoder;
8696
8697         if (!IS_DG2(i915))
8698                 return;
8699
8700         if (!new_crtc_state->hw.active)
8701                 return;
8702
8703         if (new_crtc_state->bigjoiner_slave)
8704                 return;
8705
8706         encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
8707         intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
8708
8709 #define MPLLB_CHECK(name) do { \
8710         if (mpllb_sw_state->name != mpllb_hw_state.name) { \
8711                 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
8712                                      "(expected 0x%08x, found 0x%08x)", \
8713                                      mpllb_sw_state->name, \
8714                                      mpllb_hw_state.name); \
8715         } \
8716 } while (0)
8717
8718         MPLLB_CHECK(mpllb_cp);
8719         MPLLB_CHECK(mpllb_div);
8720         MPLLB_CHECK(mpllb_div2);
8721         MPLLB_CHECK(mpllb_fracn1);
8722         MPLLB_CHECK(mpllb_fracn2);
8723         MPLLB_CHECK(mpllb_sscen);
8724         MPLLB_CHECK(mpllb_sscstep);
8725
8726         /*
8727          * ref_control is handled by the hardware/firemware and never
8728          * programmed by the software, but the proper values are supplied
8729          * in the bspec for verification purposes.
8730          */
8731         MPLLB_CHECK(ref_control);
8732
8733 #undef MPLLB_CHECK
8734 }
8735
8736 static void
8737 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8738                           struct intel_atomic_state *state,
8739                           struct intel_crtc_state *old_crtc_state,
8740                           struct intel_crtc_state *new_crtc_state)
8741 {
8742         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8743                 return;
8744
8745         verify_wm_state(crtc, new_crtc_state);
8746         verify_connector_state(state, crtc);
8747         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8748         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8749         verify_mpllb_state(state, new_crtc_state);
8750 }
8751
8752 static void
8753 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8754 {
8755         int i;
8756
8757         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8758                 verify_single_dpll_state(dev_priv,
8759                                          &dev_priv->dpll.shared_dplls[i],
8760                                          NULL, NULL);
8761 }
8762
8763 static void
8764 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8765                               struct intel_atomic_state *state)
8766 {
8767         verify_encoder_state(dev_priv, state);
8768         verify_connector_state(state, NULL);
8769         verify_disabled_dpll_state(dev_priv);
8770 }
8771
8772 int intel_modeset_all_pipes(struct intel_atomic_state *state)
8773 {
8774         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8775         struct intel_crtc *crtc;
8776
8777         /*
8778          * Add all pipes to the state, and force
8779          * a modeset on all the active ones.
8780          */
8781         for_each_intel_crtc(&dev_priv->drm, crtc) {
8782                 struct intel_crtc_state *crtc_state;
8783                 int ret;
8784
8785                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8786                 if (IS_ERR(crtc_state))
8787                         return PTR_ERR(crtc_state);
8788
8789                 if (!crtc_state->hw.active ||
8790                     drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
8791                         continue;
8792
8793                 crtc_state->uapi.mode_changed = true;
8794
8795                 ret = drm_atomic_add_affected_connectors(&state->base,
8796                                                          &crtc->base);
8797                 if (ret)
8798                         return ret;
8799
8800                 ret = intel_atomic_add_affected_planes(state, crtc);
8801                 if (ret)
8802                         return ret;
8803
8804                 crtc_state->update_planes |= crtc_state->active_planes;
8805         }
8806
8807         return 0;
8808 }
8809
8810 static void
8811 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8812 {
8813         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8814         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8815         struct drm_display_mode adjusted_mode =
8816                 crtc_state->hw.adjusted_mode;
8817
8818         if (crtc_state->vrr.enable) {
8819                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8820                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8821                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8822                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8823         }
8824
8825         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8826
8827         crtc->mode_flags = crtc_state->mode_flags;
8828
8829         /*
8830          * The scanline counter increments at the leading edge of hsync.
8831          *
8832          * On most platforms it starts counting from vtotal-1 on the
8833          * first active line. That means the scanline counter value is
8834          * always one less than what we would expect. Ie. just after
8835          * start of vblank, which also occurs at start of hsync (on the
8836          * last active line), the scanline counter will read vblank_start-1.
8837          *
8838          * On gen2 the scanline counter starts counting from 1 instead
8839          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8840          * to keep the value positive), instead of adding one.
8841          *
8842          * On HSW+ the behaviour of the scanline counter depends on the output
8843          * type. For DP ports it behaves like most other platforms, but on HDMI
8844          * there's an extra 1 line difference. So we need to add two instead of
8845          * one to the value.
8846          *
8847          * On VLV/CHV DSI the scanline counter would appear to increment
8848          * approx. 1/3 of a scanline before start of vblank. Unfortunately
8849          * that means we can't tell whether we're in vblank or not while
8850          * we're on that particular line. We must still set scanline_offset
8851          * to 1 so that the vblank timestamps come out correct when we query
8852          * the scanline counter from within the vblank interrupt handler.
8853          * However if queried just before the start of vblank we'll get an
8854          * answer that's slightly in the future.
8855          */
8856         if (DISPLAY_VER(dev_priv) == 2) {
8857                 int vtotal;
8858
8859                 vtotal = adjusted_mode.crtc_vtotal;
8860                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8861                         vtotal /= 2;
8862
8863                 crtc->scanline_offset = vtotal - 1;
8864         } else if (HAS_DDI(dev_priv) &&
8865                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8866                 crtc->scanline_offset = 2;
8867         } else {
8868                 crtc->scanline_offset = 1;
8869         }
8870 }
8871
8872 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8873 {
8874         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8875         struct intel_crtc_state *new_crtc_state;
8876         struct intel_crtc *crtc;
8877         int i;
8878
8879         if (!dev_priv->dpll_funcs)
8880                 return;
8881
8882         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8883                 if (!intel_crtc_needs_modeset(new_crtc_state))
8884                         continue;
8885
8886                 intel_release_shared_dplls(state, crtc);
8887         }
8888 }
8889
8890 /*
8891  * This implements the workaround described in the "notes" section of the mode
8892  * set sequence documentation. When going from no pipes or single pipe to
8893  * multiple pipes, and planes are enabled after the pipe, we need to wait at
8894  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8895  */
8896 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8897 {
8898         struct intel_crtc_state *crtc_state;
8899         struct intel_crtc *crtc;
8900         struct intel_crtc_state *first_crtc_state = NULL;
8901         struct intel_crtc_state *other_crtc_state = NULL;
8902         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8903         int i;
8904
8905         /* look at all crtc's that are going to be enabled in during modeset */
8906         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8907                 if (!crtc_state->hw.active ||
8908                     !intel_crtc_needs_modeset(crtc_state))
8909                         continue;
8910
8911                 if (first_crtc_state) {
8912                         other_crtc_state = crtc_state;
8913                         break;
8914                 } else {
8915                         first_crtc_state = crtc_state;
8916                         first_pipe = crtc->pipe;
8917                 }
8918         }
8919
8920         /* No workaround needed? */
8921         if (!first_crtc_state)
8922                 return 0;
8923
8924         /* w/a possibly needed, check how many crtc's are already enabled. */
8925         for_each_intel_crtc(state->base.dev, crtc) {
8926                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8927                 if (IS_ERR(crtc_state))
8928                         return PTR_ERR(crtc_state);
8929
8930                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8931
8932                 if (!crtc_state->hw.active ||
8933                     intel_crtc_needs_modeset(crtc_state))
8934                         continue;
8935
8936                 /* 2 or more enabled crtcs means no need for w/a */
8937                 if (enabled_pipe != INVALID_PIPE)
8938                         return 0;
8939
8940                 enabled_pipe = crtc->pipe;
8941         }
8942
8943         if (enabled_pipe != INVALID_PIPE)
8944                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8945         else if (other_crtc_state)
8946                 other_crtc_state->hsw_workaround_pipe = first_pipe;
8947
8948         return 0;
8949 }
8950
8951 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8952                            u8 active_pipes)
8953 {
8954         const struct intel_crtc_state *crtc_state;
8955         struct intel_crtc *crtc;
8956         int i;
8957
8958         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8959                 if (crtc_state->hw.active)
8960                         active_pipes |= BIT(crtc->pipe);
8961                 else
8962                         active_pipes &= ~BIT(crtc->pipe);
8963         }
8964
8965         return active_pipes;
8966 }
8967
8968 static int intel_modeset_checks(struct intel_atomic_state *state)
8969 {
8970         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8971
8972         state->modeset = true;
8973
8974         if (IS_HASWELL(dev_priv))
8975                 return hsw_mode_set_planes_workaround(state);
8976
8977         return 0;
8978 }
8979
8980 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
8981                                      struct intel_crtc_state *new_crtc_state)
8982 {
8983         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
8984                 return;
8985
8986         new_crtc_state->uapi.mode_changed = false;
8987         new_crtc_state->update_pipe = true;
8988 }
8989
8990 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
8991                                     struct intel_crtc_state *new_crtc_state)
8992 {
8993         /*
8994          * If we're not doing the full modeset we want to
8995          * keep the current M/N values as they may be
8996          * sufficiently different to the computed values
8997          * to cause problems.
8998          *
8999          * FIXME: should really copy more fuzzy state here
9000          */
9001         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9002         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9003         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9004         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9005 }
9006
9007 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9008                                           struct intel_crtc *crtc,
9009                                           u8 plane_ids_mask)
9010 {
9011         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9012         struct intel_plane *plane;
9013
9014         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9015                 struct intel_plane_state *plane_state;
9016
9017                 if ((plane_ids_mask & BIT(plane->id)) == 0)
9018                         continue;
9019
9020                 plane_state = intel_atomic_get_plane_state(state, plane);
9021                 if (IS_ERR(plane_state))
9022                         return PTR_ERR(plane_state);
9023         }
9024
9025         return 0;
9026 }
9027
9028 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9029                                      struct intel_crtc *crtc)
9030 {
9031         const struct intel_crtc_state *old_crtc_state =
9032                 intel_atomic_get_old_crtc_state(state, crtc);
9033         const struct intel_crtc_state *new_crtc_state =
9034                 intel_atomic_get_new_crtc_state(state, crtc);
9035
9036         return intel_crtc_add_planes_to_state(state, crtc,
9037                                               old_crtc_state->enabled_planes |
9038                                               new_crtc_state->enabled_planes);
9039 }
9040
9041 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9042 {
9043         /* See {hsw,vlv,ivb}_plane_ratio() */
9044         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9045                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9046                 IS_IVYBRIDGE(dev_priv);
9047 }
9048
9049 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9050                                            struct intel_crtc *crtc,
9051                                            struct intel_crtc *other)
9052 {
9053         const struct intel_plane_state *plane_state;
9054         struct intel_plane *plane;
9055         u8 plane_ids = 0;
9056         int i;
9057
9058         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9059                 if (plane->pipe == crtc->pipe)
9060                         plane_ids |= BIT(plane->id);
9061         }
9062
9063         return intel_crtc_add_planes_to_state(state, other, plane_ids);
9064 }
9065
9066 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9067 {
9068         const struct intel_crtc_state *crtc_state;
9069         struct intel_crtc *crtc;
9070         int i;
9071
9072         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9073                 int ret;
9074
9075                 if (!crtc_state->bigjoiner)
9076                         continue;
9077
9078                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9079                                                       crtc_state->bigjoiner_linked_crtc);
9080                 if (ret)
9081                         return ret;
9082         }
9083
9084         return 0;
9085 }
9086
9087 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9088 {
9089         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9090         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9091         struct intel_plane_state *plane_state;
9092         struct intel_plane *plane;
9093         struct intel_crtc *crtc;
9094         int i, ret;
9095
9096         ret = icl_add_linked_planes(state);
9097         if (ret)
9098                 return ret;
9099
9100         ret = intel_bigjoiner_add_affected_planes(state);
9101         if (ret)
9102                 return ret;
9103
9104         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9105                 ret = intel_plane_atomic_check(state, plane);
9106                 if (ret) {
9107                         drm_dbg_atomic(&dev_priv->drm,
9108                                        "[PLANE:%d:%s] atomic driver check failed\n",
9109                                        plane->base.base.id, plane->base.name);
9110                         return ret;
9111                 }
9112         }
9113
9114         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9115                                             new_crtc_state, i) {
9116                 u8 old_active_planes, new_active_planes;
9117
9118                 ret = icl_check_nv12_planes(new_crtc_state);
9119                 if (ret)
9120                         return ret;
9121
9122                 /*
9123                  * On some platforms the number of active planes affects
9124                  * the planes' minimum cdclk calculation. Add such planes
9125                  * to the state before we compute the minimum cdclk.
9126                  */
9127                 if (!active_planes_affects_min_cdclk(dev_priv))
9128                         continue;
9129
9130                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9131                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9132
9133                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
9134                         continue;
9135
9136                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9137                 if (ret)
9138                         return ret;
9139         }
9140
9141         return 0;
9142 }
9143
9144 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9145                                     bool *need_cdclk_calc)
9146 {
9147         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9148         const struct intel_cdclk_state *old_cdclk_state;
9149         const struct intel_cdclk_state *new_cdclk_state;
9150         struct intel_plane_state *plane_state;
9151         struct intel_bw_state *new_bw_state;
9152         struct intel_plane *plane;
9153         int min_cdclk = 0;
9154         enum pipe pipe;
9155         int ret;
9156         int i;
9157         /*
9158          * active_planes bitmask has been updated, and potentially
9159          * affected planes are part of the state. We can now
9160          * compute the minimum cdclk for each plane.
9161          */
9162         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9163                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9164                 if (ret)
9165                         return ret;
9166         }
9167
9168         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9169         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9170
9171         if (new_cdclk_state &&
9172             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9173                 *need_cdclk_calc = true;
9174
9175         ret = intel_cdclk_bw_calc_min_cdclk(state);
9176         if (ret)
9177                 return ret;
9178
9179         new_bw_state = intel_atomic_get_new_bw_state(state);
9180
9181         if (!new_cdclk_state || !new_bw_state)
9182                 return 0;
9183
9184         for_each_pipe(dev_priv, pipe) {
9185                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9186
9187                 /*
9188                  * Currently do this change only if we need to increase
9189                  */
9190                 if (new_bw_state->min_cdclk > min_cdclk)
9191                         *need_cdclk_calc = true;
9192         }
9193
9194         return 0;
9195 }
9196
9197 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9198 {
9199         struct intel_crtc_state *crtc_state;
9200         struct intel_crtc *crtc;
9201         int i;
9202
9203         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9204                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9205                 int ret;
9206
9207                 ret = intel_crtc_atomic_check(state, crtc);
9208                 if (ret) {
9209                         drm_dbg_atomic(&i915->drm,
9210                                        "[CRTC:%d:%s] atomic driver check failed\n",
9211                                        crtc->base.base.id, crtc->base.name);
9212                         return ret;
9213                 }
9214         }
9215
9216         return 0;
9217 }
9218
9219 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9220                                                u8 transcoders)
9221 {
9222         const struct intel_crtc_state *new_crtc_state;
9223         struct intel_crtc *crtc;
9224         int i;
9225
9226         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9227                 if (new_crtc_state->hw.enable &&
9228                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9229                     intel_crtc_needs_modeset(new_crtc_state))
9230                         return true;
9231         }
9232
9233         return false;
9234 }
9235
9236 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9237                                         struct intel_crtc *crtc,
9238                                         struct intel_crtc_state *old_crtc_state,
9239                                         struct intel_crtc_state *new_crtc_state)
9240 {
9241         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9242         struct intel_crtc *slave, *master;
9243
9244         /* slave being enabled, is master is still claiming this crtc? */
9245         if (old_crtc_state->bigjoiner_slave) {
9246                 slave = crtc;
9247                 master = old_crtc_state->bigjoiner_linked_crtc;
9248                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9249                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9250                         goto claimed;
9251         }
9252
9253         if (!new_crtc_state->bigjoiner)
9254                 return 0;
9255
9256         slave = intel_dsc_get_bigjoiner_secondary(crtc);
9257         if (!slave) {
9258                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9259                               "CRTC + 1 to be used, doesn't exist\n",
9260                               crtc->base.base.id, crtc->base.name);
9261                 return -EINVAL;
9262         }
9263
9264         new_crtc_state->bigjoiner_linked_crtc = slave;
9265         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9266         master = crtc;
9267         if (IS_ERR(slave_crtc_state))
9268                 return PTR_ERR(slave_crtc_state);
9269
9270         /* master being enabled, slave was already configured? */
9271         if (slave_crtc_state->uapi.enable)
9272                 goto claimed;
9273
9274         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9275                       slave->base.base.id, slave->base.name);
9276
9277         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9278
9279 claimed:
9280         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9281                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9282                       slave->base.base.id, slave->base.name,
9283                       master->base.base.id, master->base.name);
9284         return -EINVAL;
9285 }
9286
9287 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9288                                  struct intel_crtc_state *master_crtc_state)
9289 {
9290         struct intel_crtc_state *slave_crtc_state =
9291                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9292
9293         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9294         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9295         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9296         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9297 }
9298
9299 /**
9300  * DOC: asynchronous flip implementation
9301  *
9302  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9303  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9304  * Correspondingly, support is currently added for primary plane only.
9305  *
9306  * Async flip can only change the plane surface address, so anything else
9307  * changing is rejected from the intel_atomic_check_async() function.
9308  * Once this check is cleared, flip done interrupt is enabled using
9309  * the intel_crtc_enable_flip_done() function.
9310  *
9311  * As soon as the surface address register is written, flip done interrupt is
9312  * generated and the requested events are sent to the usersapce in the interrupt
9313  * handler itself. The timestamp and sequence sent during the flip done event
9314  * correspond to the last vblank and have no relation to the actual time when
9315  * the flip done event was sent.
9316  */
9317 static int intel_atomic_check_async(struct intel_atomic_state *state)
9318 {
9319         struct drm_i915_private *i915 = to_i915(state->base.dev);
9320         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9321         const struct intel_plane_state *new_plane_state, *old_plane_state;
9322         struct intel_crtc *crtc;
9323         struct intel_plane *plane;
9324         int i;
9325
9326         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9327                                             new_crtc_state, i) {
9328                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9329                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9330                         return -EINVAL;
9331                 }
9332
9333                 if (!new_crtc_state->hw.active) {
9334                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9335                         return -EINVAL;
9336                 }
9337                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9338                         drm_dbg_kms(&i915->drm,
9339                                     "Active planes cannot be changed during async flip\n");
9340                         return -EINVAL;
9341                 }
9342         }
9343
9344         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9345                                              new_plane_state, i) {
9346                 /*
9347                  * TODO: Async flip is only supported through the page flip IOCTL
9348                  * as of now. So support currently added for primary plane only.
9349                  * Support for other planes on platforms on which supports
9350                  * this(vlv/chv and icl+) should be added when async flip is
9351                  * enabled in the atomic IOCTL path.
9352                  */
9353                 if (!plane->async_flip)
9354                         return -EINVAL;
9355
9356                 /*
9357                  * FIXME: This check is kept generic for all platforms.
9358                  * Need to verify this for all gen9 platforms to enable
9359                  * this selectively if required.
9360                  */
9361                 switch (new_plane_state->hw.fb->modifier) {
9362                 case I915_FORMAT_MOD_X_TILED:
9363                 case I915_FORMAT_MOD_Y_TILED:
9364                 case I915_FORMAT_MOD_Yf_TILED:
9365                         break;
9366                 default:
9367                         drm_dbg_kms(&i915->drm,
9368                                     "Linear memory/CCS does not support async flips\n");
9369                         return -EINVAL;
9370                 }
9371
9372                 if (old_plane_state->view.color_plane[0].stride !=
9373                     new_plane_state->view.color_plane[0].stride) {
9374                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9375                         return -EINVAL;
9376                 }
9377
9378                 if (old_plane_state->hw.fb->modifier !=
9379                     new_plane_state->hw.fb->modifier) {
9380                         drm_dbg_kms(&i915->drm,
9381                                     "Framebuffer modifiers cannot be changed in async flip\n");
9382                         return -EINVAL;
9383                 }
9384
9385                 if (old_plane_state->hw.fb->format !=
9386                     new_plane_state->hw.fb->format) {
9387                         drm_dbg_kms(&i915->drm,
9388                                     "Framebuffer format cannot be changed in async flip\n");
9389                         return -EINVAL;
9390                 }
9391
9392                 if (old_plane_state->hw.rotation !=
9393                     new_plane_state->hw.rotation) {
9394                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9395                         return -EINVAL;
9396                 }
9397
9398                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9399                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9400                         drm_dbg_kms(&i915->drm,
9401                                     "Plane size/co-ordinates cannot be changed in async flip\n");
9402                         return -EINVAL;
9403                 }
9404
9405                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9406                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9407                         return -EINVAL;
9408                 }
9409
9410                 if (old_plane_state->hw.pixel_blend_mode !=
9411                     new_plane_state->hw.pixel_blend_mode) {
9412                         drm_dbg_kms(&i915->drm,
9413                                     "Pixel blend mode cannot be changed in async flip\n");
9414                         return -EINVAL;
9415                 }
9416
9417                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9418                         drm_dbg_kms(&i915->drm,
9419                                     "Color encoding cannot be changed in async flip\n");
9420                         return -EINVAL;
9421                 }
9422
9423                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9424                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9425                         return -EINVAL;
9426                 }
9427         }
9428
9429         return 0;
9430 }
9431
9432 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9433 {
9434         struct intel_crtc_state *crtc_state;
9435         struct intel_crtc *crtc;
9436         int i;
9437
9438         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9439                 struct intel_crtc_state *linked_crtc_state;
9440                 struct intel_crtc *linked_crtc;
9441                 int ret;
9442
9443                 if (!crtc_state->bigjoiner)
9444                         continue;
9445
9446                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
9447                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9448                 if (IS_ERR(linked_crtc_state))
9449                         return PTR_ERR(linked_crtc_state);
9450
9451                 if (!intel_crtc_needs_modeset(crtc_state))
9452                         continue;
9453
9454                 linked_crtc_state->uapi.mode_changed = true;
9455
9456                 ret = drm_atomic_add_affected_connectors(&state->base,
9457                                                          &linked_crtc->base);
9458                 if (ret)
9459                         return ret;
9460
9461                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
9462                 if (ret)
9463                         return ret;
9464         }
9465
9466         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9467                 /* Kill old bigjoiner link, we may re-establish afterwards */
9468                 if (intel_crtc_needs_modeset(crtc_state) &&
9469                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9470                         kill_bigjoiner_slave(state, crtc_state);
9471         }
9472
9473         return 0;
9474 }
9475
9476 /**
9477  * intel_atomic_check - validate state object
9478  * @dev: drm device
9479  * @_state: state to validate
9480  */
9481 static int intel_atomic_check(struct drm_device *dev,
9482                               struct drm_atomic_state *_state)
9483 {
9484         struct drm_i915_private *dev_priv = to_i915(dev);
9485         struct intel_atomic_state *state = to_intel_atomic_state(_state);
9486         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9487         struct intel_crtc *crtc;
9488         int ret, i;
9489         bool any_ms = false;
9490
9491         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9492                                             new_crtc_state, i) {
9493                 if (new_crtc_state->inherited != old_crtc_state->inherited)
9494                         new_crtc_state->uapi.mode_changed = true;
9495         }
9496
9497         intel_vrr_check_modeset(state);
9498
9499         ret = drm_atomic_helper_check_modeset(dev, &state->base);
9500         if (ret)
9501                 goto fail;
9502
9503         ret = intel_bigjoiner_add_affected_crtcs(state);
9504         if (ret)
9505                 goto fail;
9506
9507         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9508                                             new_crtc_state, i) {
9509                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
9510                         /* Light copy */
9511                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9512
9513                         continue;
9514                 }
9515
9516                 if (!new_crtc_state->uapi.enable) {
9517                         if (!new_crtc_state->bigjoiner_slave) {
9518                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9519                                 any_ms = true;
9520                         }
9521                         continue;
9522                 }
9523
9524                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9525                 if (ret)
9526                         goto fail;
9527
9528                 ret = intel_modeset_pipe_config(state, new_crtc_state);
9529                 if (ret)
9530                         goto fail;
9531
9532                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9533                                                    new_crtc_state);
9534                 if (ret)
9535                         goto fail;
9536         }
9537
9538         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9539                                             new_crtc_state, i) {
9540                 if (!intel_crtc_needs_modeset(new_crtc_state))
9541                         continue;
9542
9543                 ret = intel_modeset_pipe_config_late(new_crtc_state);
9544                 if (ret)
9545                         goto fail;
9546
9547                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9548         }
9549
9550         /**
9551          * Check if fastset is allowed by external dependencies like other
9552          * pipes and transcoders.
9553          *
9554          * Right now it only forces a fullmodeset when the MST master
9555          * transcoder did not changed but the pipe of the master transcoder
9556          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9557          * in case of port synced crtcs, if one of the synced crtcs
9558          * needs a full modeset, all other synced crtcs should be
9559          * forced a full modeset.
9560          */
9561         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9562                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9563                         continue;
9564
9565                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9566                         enum transcoder master = new_crtc_state->mst_master_transcoder;
9567
9568                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9569                                 new_crtc_state->uapi.mode_changed = true;
9570                                 new_crtc_state->update_pipe = false;
9571                         }
9572                 }
9573
9574                 if (is_trans_port_sync_mode(new_crtc_state)) {
9575                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
9576
9577                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9578                                 trans |= BIT(new_crtc_state->master_transcoder);
9579
9580                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
9581                                 new_crtc_state->uapi.mode_changed = true;
9582                                 new_crtc_state->update_pipe = false;
9583                         }
9584                 }
9585
9586                 if (new_crtc_state->bigjoiner) {
9587                         struct intel_crtc_state *linked_crtc_state =
9588                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9589
9590                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
9591                                 new_crtc_state->uapi.mode_changed = true;
9592                                 new_crtc_state->update_pipe = false;
9593                         }
9594                 }
9595         }
9596
9597         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9598                                             new_crtc_state, i) {
9599                 if (intel_crtc_needs_modeset(new_crtc_state)) {
9600                         any_ms = true;
9601                         continue;
9602                 }
9603
9604                 if (!new_crtc_state->update_pipe)
9605                         continue;
9606
9607                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9608         }
9609
9610         if (any_ms && !check_digital_port_conflicts(state)) {
9611                 drm_dbg_kms(&dev_priv->drm,
9612                             "rejecting conflicting digital port configuration\n");
9613                 ret = -EINVAL;
9614                 goto fail;
9615         }
9616
9617         ret = drm_dp_mst_atomic_check(&state->base);
9618         if (ret)
9619                 goto fail;
9620
9621         ret = intel_atomic_check_planes(state);
9622         if (ret)
9623                 goto fail;
9624
9625         intel_fbc_choose_crtc(dev_priv, state);
9626         ret = intel_compute_global_watermarks(state);
9627         if (ret)
9628                 goto fail;
9629
9630         ret = intel_bw_atomic_check(state);
9631         if (ret)
9632                 goto fail;
9633
9634         ret = intel_atomic_check_cdclk(state, &any_ms);
9635         if (ret)
9636                 goto fail;
9637
9638         if (intel_any_crtc_needs_modeset(state))
9639                 any_ms = true;
9640
9641         if (any_ms) {
9642                 ret = intel_modeset_checks(state);
9643                 if (ret)
9644                         goto fail;
9645
9646                 ret = intel_modeset_calc_cdclk(state);
9647                 if (ret)
9648                         return ret;
9649
9650                 intel_modeset_clear_plls(state);
9651         }
9652
9653         ret = intel_atomic_check_crtcs(state);
9654         if (ret)
9655                 goto fail;
9656
9657         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9658                                             new_crtc_state, i) {
9659                 if (new_crtc_state->uapi.async_flip) {
9660                         ret = intel_atomic_check_async(state);
9661                         if (ret)
9662                                 goto fail;
9663                 }
9664
9665                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
9666                     !new_crtc_state->update_pipe)
9667                         continue;
9668
9669                 intel_dump_pipe_config(new_crtc_state, state,
9670                                        intel_crtc_needs_modeset(new_crtc_state) ?
9671                                        "[modeset]" : "[fastset]");
9672         }
9673
9674         return 0;
9675
9676  fail:
9677         if (ret == -EDEADLK)
9678                 return ret;
9679
9680         /*
9681          * FIXME would probably be nice to know which crtc specifically
9682          * caused the failure, in cases where we can pinpoint it.
9683          */
9684         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9685                                             new_crtc_state, i)
9686                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9687
9688         return ret;
9689 }
9690
9691 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9692 {
9693         struct intel_crtc_state *crtc_state;
9694         struct intel_crtc *crtc;
9695         int i, ret;
9696
9697         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9698         if (ret < 0)
9699                 return ret;
9700
9701         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9702                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9703
9704                 if (mode_changed || crtc_state->update_pipe ||
9705                     crtc_state->uapi.color_mgmt_changed) {
9706                         intel_dsb_prepare(crtc_state);
9707                 }
9708         }
9709
9710         return 0;
9711 }
9712
9713 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9714                                   struct intel_crtc_state *crtc_state)
9715 {
9716         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9717
9718         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
9719                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9720
9721         if (crtc_state->has_pch_encoder) {
9722                 enum pipe pch_transcoder =
9723                         intel_crtc_pch_transcoder(crtc);
9724
9725                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9726         }
9727 }
9728
9729 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9730                                const struct intel_crtc_state *new_crtc_state)
9731 {
9732         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9733         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9734
9735         /*
9736          * Update pipe size and adjust fitter if needed: the reason for this is
9737          * that in compute_mode_changes we check the native mode (not the pfit
9738          * mode) to see if we can flip rather than do a full mode set. In the
9739          * fastboot case, we'll flip, but if we don't update the pipesrc and
9740          * pfit state, we'll end up with a big fb scanned out into the wrong
9741          * sized surface.
9742          */
9743         intel_set_pipe_src_size(new_crtc_state);
9744
9745         /* on skylake this is done by detaching scalers */
9746         if (DISPLAY_VER(dev_priv) >= 9) {
9747                 if (new_crtc_state->pch_pfit.enabled)
9748                         skl_pfit_enable(new_crtc_state);
9749         } else if (HAS_PCH_SPLIT(dev_priv)) {
9750                 if (new_crtc_state->pch_pfit.enabled)
9751                         ilk_pfit_enable(new_crtc_state);
9752                 else if (old_crtc_state->pch_pfit.enabled)
9753                         ilk_pfit_disable(old_crtc_state);
9754         }
9755
9756         /*
9757          * The register is supposedly single buffered so perhaps
9758          * not 100% correct to do this here. But SKL+ calculate
9759          * this based on the adjust pixel rate so pfit changes do
9760          * affect it and so it must be updated for fastsets.
9761          * HSW/BDW only really need this here for fastboot, after
9762          * that the value should not change without a full modeset.
9763          */
9764         if (DISPLAY_VER(dev_priv) >= 9 ||
9765             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9766                 hsw_set_linetime_wm(new_crtc_state);
9767
9768         if (DISPLAY_VER(dev_priv) >= 11)
9769                 icl_set_pipe_chicken(new_crtc_state);
9770 }
9771
9772 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
9773                                    struct intel_crtc *crtc)
9774 {
9775         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9776         const struct intel_crtc_state *old_crtc_state =
9777                 intel_atomic_get_old_crtc_state(state, crtc);
9778         const struct intel_crtc_state *new_crtc_state =
9779                 intel_atomic_get_new_crtc_state(state, crtc);
9780         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9781
9782         /*
9783          * During modesets pipe configuration was programmed as the
9784          * CRTC was enabled.
9785          */
9786         if (!modeset) {
9787                 if (new_crtc_state->uapi.color_mgmt_changed ||
9788                     new_crtc_state->update_pipe)
9789                         intel_color_commit(new_crtc_state);
9790
9791                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9792                         bdw_set_pipemisc(new_crtc_state);
9793
9794                 if (new_crtc_state->update_pipe)
9795                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
9796         }
9797
9798         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9799
9800         intel_atomic_update_watermarks(state, crtc);
9801 }
9802
9803 static void commit_pipe_post_planes(struct intel_atomic_state *state,
9804                                     struct intel_crtc *crtc)
9805 {
9806         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9807         const struct intel_crtc_state *new_crtc_state =
9808                 intel_atomic_get_new_crtc_state(state, crtc);
9809
9810         /*
9811          * Disable the scaler(s) after the plane(s) so that we don't
9812          * get a catastrophic underrun even if the two operations
9813          * end up happening in two different frames.
9814          */
9815         if (DISPLAY_VER(dev_priv) >= 9 &&
9816             !intel_crtc_needs_modeset(new_crtc_state))
9817                 skl_detach_scalers(new_crtc_state);
9818 }
9819
9820 static void intel_enable_crtc(struct intel_atomic_state *state,
9821                               struct intel_crtc *crtc)
9822 {
9823         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9824         const struct intel_crtc_state *new_crtc_state =
9825                 intel_atomic_get_new_crtc_state(state, crtc);
9826
9827         if (!intel_crtc_needs_modeset(new_crtc_state))
9828                 return;
9829
9830         intel_crtc_update_active_timings(new_crtc_state);
9831
9832         dev_priv->display->crtc_enable(state, crtc);
9833
9834         if (new_crtc_state->bigjoiner_slave)
9835                 return;
9836
9837         /* vblanks work again, re-enable pipe CRC. */
9838         intel_crtc_enable_pipe_crc(crtc);
9839 }
9840
9841 static void intel_update_crtc(struct intel_atomic_state *state,
9842                               struct intel_crtc *crtc)
9843 {
9844         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9845         const struct intel_crtc_state *old_crtc_state =
9846                 intel_atomic_get_old_crtc_state(state, crtc);
9847         struct intel_crtc_state *new_crtc_state =
9848                 intel_atomic_get_new_crtc_state(state, crtc);
9849         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9850
9851         if (!modeset) {
9852                 if (new_crtc_state->preload_luts &&
9853                     (new_crtc_state->uapi.color_mgmt_changed ||
9854                      new_crtc_state->update_pipe))
9855                         intel_color_load_luts(new_crtc_state);
9856
9857                 intel_pre_plane_update(state, crtc);
9858
9859                 if (new_crtc_state->update_pipe)
9860                         intel_encoders_update_pipe(state, crtc);
9861         }
9862
9863         intel_fbc_update(state, crtc);
9864
9865         /* Perform vblank evasion around commit operation */
9866         intel_pipe_update_start(new_crtc_state);
9867
9868         commit_pipe_pre_planes(state, crtc);
9869
9870         if (DISPLAY_VER(dev_priv) >= 9)
9871                 skl_update_planes_on_crtc(state, crtc);
9872         else
9873                 i9xx_update_planes_on_crtc(state, crtc);
9874
9875         commit_pipe_post_planes(state, crtc);
9876
9877         intel_pipe_update_end(new_crtc_state);
9878
9879         /*
9880          * We usually enable FIFO underrun interrupts as part of the
9881          * CRTC enable sequence during modesets.  But when we inherit a
9882          * valid pipe configuration from the BIOS we need to take care
9883          * of enabling them on the CRTC's first fastset.
9884          */
9885         if (new_crtc_state->update_pipe && !modeset &&
9886             old_crtc_state->inherited)
9887                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9888 }
9889
9890 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9891                                           struct intel_crtc_state *old_crtc_state,
9892                                           struct intel_crtc_state *new_crtc_state,
9893                                           struct intel_crtc *crtc)
9894 {
9895         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9896
9897         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9898
9899         intel_encoders_pre_disable(state, crtc);
9900
9901         intel_crtc_disable_planes(state, crtc);
9902
9903         /*
9904          * We still need special handling for disabling bigjoiner master
9905          * and slaves since for slave we do not have encoder or plls
9906          * so we dont need to disable those.
9907          */
9908         if (old_crtc_state->bigjoiner) {
9909                 intel_crtc_disable_planes(state,
9910                                           old_crtc_state->bigjoiner_linked_crtc);
9911                 old_crtc_state->bigjoiner_linked_crtc->active = false;
9912         }
9913
9914         /*
9915          * We need to disable pipe CRC before disabling the pipe,
9916          * or we race against vblank off.
9917          */
9918         intel_crtc_disable_pipe_crc(crtc);
9919
9920         dev_priv->display->crtc_disable(state, crtc);
9921         crtc->active = false;
9922         intel_fbc_disable(crtc);
9923         intel_disable_shared_dpll(old_crtc_state);
9924
9925         /* FIXME unify this for all platforms */
9926         if (!new_crtc_state->hw.active &&
9927             !HAS_GMCH(dev_priv))
9928                 intel_initial_watermarks(state, crtc);
9929 }
9930
9931 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9932 {
9933         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
9934         struct intel_crtc *crtc;
9935         u32 handled = 0;
9936         int i;
9937
9938         /* Only disable port sync and MST slaves */
9939         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9940                                             new_crtc_state, i) {
9941                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
9942                         continue;
9943
9944                 if (!old_crtc_state->hw.active)
9945                         continue;
9946
9947                 /* In case of Transcoder port Sync master slave CRTCs can be
9948                  * assigned in any order and we need to make sure that
9949                  * slave CRTCs are disabled first and then master CRTC since
9950                  * Slave vblanks are masked till Master Vblanks.
9951                  */
9952                 if (!is_trans_port_sync_slave(old_crtc_state) &&
9953                     !intel_dp_mst_is_slave_trans(old_crtc_state))
9954                         continue;
9955
9956                 intel_pre_plane_update(state, crtc);
9957                 intel_old_crtc_state_disables(state, old_crtc_state,
9958                                               new_crtc_state, crtc);
9959                 handled |= BIT(crtc->pipe);
9960         }
9961
9962         /* Disable everything else left on */
9963         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9964                                             new_crtc_state, i) {
9965                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
9966                     (handled & BIT(crtc->pipe)) ||
9967                     old_crtc_state->bigjoiner_slave)
9968                         continue;
9969
9970                 intel_pre_plane_update(state, crtc);
9971                 if (old_crtc_state->bigjoiner) {
9972                         struct intel_crtc *slave =
9973                                 old_crtc_state->bigjoiner_linked_crtc;
9974
9975                         intel_pre_plane_update(state, slave);
9976                 }
9977
9978                 if (old_crtc_state->hw.active)
9979                         intel_old_crtc_state_disables(state, old_crtc_state,
9980                                                       new_crtc_state, crtc);
9981         }
9982 }
9983
9984 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
9985 {
9986         struct intel_crtc_state *new_crtc_state;
9987         struct intel_crtc *crtc;
9988         int i;
9989
9990         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9991                 if (!new_crtc_state->hw.active)
9992                         continue;
9993
9994                 intel_enable_crtc(state, crtc);
9995                 intel_update_crtc(state, crtc);
9996         }
9997 }
9998
9999 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10000 {
10001         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10002         struct intel_crtc *crtc;
10003         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10004         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10005         u8 update_pipes = 0, modeset_pipes = 0;
10006         int i;
10007
10008         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10009                 enum pipe pipe = crtc->pipe;
10010
10011                 if (!new_crtc_state->hw.active)
10012                         continue;
10013
10014                 /* ignore allocations for crtc's that have been turned off. */
10015                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10016                         entries[pipe] = old_crtc_state->wm.skl.ddb;
10017                         update_pipes |= BIT(pipe);
10018                 } else {
10019                         modeset_pipes |= BIT(pipe);
10020                 }
10021         }
10022
10023         /*
10024          * Whenever the number of active pipes changes, we need to make sure we
10025          * update the pipes in the right order so that their ddb allocations
10026          * never overlap with each other between CRTC updates. Otherwise we'll
10027          * cause pipe underruns and other bad stuff.
10028          *
10029          * So first lets enable all pipes that do not need a fullmodeset as
10030          * those don't have any external dependency.
10031          */
10032         while (update_pipes) {
10033                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10034                                                     new_crtc_state, i) {
10035                         enum pipe pipe = crtc->pipe;
10036
10037                         if ((update_pipes & BIT(pipe)) == 0)
10038                                 continue;
10039
10040                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10041                                                         entries, I915_MAX_PIPES, pipe))
10042                                 continue;
10043
10044                         entries[pipe] = new_crtc_state->wm.skl.ddb;
10045                         update_pipes &= ~BIT(pipe);
10046
10047                         intel_update_crtc(state, crtc);
10048
10049                         /*
10050                          * If this is an already active pipe, it's DDB changed,
10051                          * and this isn't the last pipe that needs updating
10052                          * then we need to wait for a vblank to pass for the
10053                          * new ddb allocation to take effect.
10054                          */
10055                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10056                                                  &old_crtc_state->wm.skl.ddb) &&
10057                             (update_pipes | modeset_pipes))
10058                                 intel_wait_for_vblank(dev_priv, pipe);
10059                 }
10060         }
10061
10062         update_pipes = modeset_pipes;
10063
10064         /*
10065          * Enable all pipes that needs a modeset and do not depends on other
10066          * pipes
10067          */
10068         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10069                 enum pipe pipe = crtc->pipe;
10070
10071                 if ((modeset_pipes & BIT(pipe)) == 0)
10072                         continue;
10073
10074                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10075                     is_trans_port_sync_master(new_crtc_state) ||
10076                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10077                         continue;
10078
10079                 modeset_pipes &= ~BIT(pipe);
10080
10081                 intel_enable_crtc(state, crtc);
10082         }
10083
10084         /*
10085          * Then we enable all remaining pipes that depend on other
10086          * pipes: MST slaves and port sync masters, big joiner master
10087          */
10088         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10089                 enum pipe pipe = crtc->pipe;
10090
10091                 if ((modeset_pipes & BIT(pipe)) == 0)
10092                         continue;
10093
10094                 modeset_pipes &= ~BIT(pipe);
10095
10096                 intel_enable_crtc(state, crtc);
10097         }
10098
10099         /*
10100          * Finally we do the plane updates/etc. for all pipes that got enabled.
10101          */
10102         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10103                 enum pipe pipe = crtc->pipe;
10104
10105                 if ((update_pipes & BIT(pipe)) == 0)
10106                         continue;
10107
10108                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10109                                                                         entries, I915_MAX_PIPES, pipe));
10110
10111                 entries[pipe] = new_crtc_state->wm.skl.ddb;
10112                 update_pipes &= ~BIT(pipe);
10113
10114                 intel_update_crtc(state, crtc);
10115         }
10116
10117         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10118         drm_WARN_ON(&dev_priv->drm, update_pipes);
10119 }
10120
10121 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10122 {
10123         struct intel_atomic_state *state, *next;
10124         struct llist_node *freed;
10125
10126         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10127         llist_for_each_entry_safe(state, next, freed, freed)
10128                 drm_atomic_state_put(&state->base);
10129 }
10130
10131 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10132 {
10133         struct drm_i915_private *dev_priv =
10134                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10135
10136         intel_atomic_helper_free_state(dev_priv);
10137 }
10138
10139 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10140 {
10141         struct wait_queue_entry wait_fence, wait_reset;
10142         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10143
10144         init_wait_entry(&wait_fence, 0);
10145         init_wait_entry(&wait_reset, 0);
10146         for (;;) {
10147                 prepare_to_wait(&intel_state->commit_ready.wait,
10148                                 &wait_fence, TASK_UNINTERRUPTIBLE);
10149                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10150                                               I915_RESET_MODESET),
10151                                 &wait_reset, TASK_UNINTERRUPTIBLE);
10152
10153
10154                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10155                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10156                         break;
10157
10158                 schedule();
10159         }
10160         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10161         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10162                                   I915_RESET_MODESET),
10163                     &wait_reset);
10164 }
10165
10166 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10167 {
10168         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10169         struct intel_crtc *crtc;
10170         int i;
10171
10172         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10173                                             new_crtc_state, i)
10174                 intel_dsb_cleanup(old_crtc_state);
10175 }
10176
10177 static void intel_atomic_cleanup_work(struct work_struct *work)
10178 {
10179         struct intel_atomic_state *state =
10180                 container_of(work, struct intel_atomic_state, base.commit_work);
10181         struct drm_i915_private *i915 = to_i915(state->base.dev);
10182
10183         intel_cleanup_dsbs(state);
10184         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10185         drm_atomic_helper_commit_cleanup_done(&state->base);
10186         drm_atomic_state_put(&state->base);
10187
10188         intel_atomic_helper_free_state(i915);
10189 }
10190
10191 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10192 {
10193         struct drm_i915_private *i915 = to_i915(state->base.dev);
10194         struct intel_plane *plane;
10195         struct intel_plane_state *plane_state;
10196         int i;
10197
10198         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10199                 struct drm_framebuffer *fb = plane_state->hw.fb;
10200                 int ret;
10201
10202                 if (!fb ||
10203                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10204                         continue;
10205
10206                 /*
10207                  * The layout of the fast clear color value expected by HW
10208                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10209                  * - 4 x 4 bytes per-channel value
10210                  *   (in surface type specific float/int format provided by the fb user)
10211                  * - 8 bytes native color value used by the display
10212                  *   (converted/written by GPU during a fast clear operation using the
10213                  *    above per-channel values)
10214                  *
10215                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
10216                  * caller made sure that the object is synced wrt. the related color clear value
10217                  * GPU write on it.
10218                  */
10219                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10220                                                      fb->offsets[2] + 16,
10221                                                      &plane_state->ccval,
10222                                                      sizeof(plane_state->ccval));
10223                 /* The above could only fail if the FB obj has an unexpected backing store type. */
10224                 drm_WARN_ON(&i915->drm, ret);
10225         }
10226 }
10227
10228 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10229 {
10230         struct drm_device *dev = state->base.dev;
10231         struct drm_i915_private *dev_priv = to_i915(dev);
10232         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10233         struct intel_crtc *crtc;
10234         u64 put_domains[I915_MAX_PIPES] = {};
10235         intel_wakeref_t wakeref = 0;
10236         int i;
10237
10238         intel_atomic_commit_fence_wait(state);
10239
10240         drm_atomic_helper_wait_for_dependencies(&state->base);
10241
10242         if (state->modeset)
10243                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10244
10245         intel_atomic_prepare_plane_clear_colors(state);
10246
10247         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10248                                             new_crtc_state, i) {
10249                 if (intel_crtc_needs_modeset(new_crtc_state) ||
10250                     new_crtc_state->update_pipe) {
10251
10252                         put_domains[crtc->pipe] =
10253                                 modeset_get_crtc_power_domains(new_crtc_state);
10254                 }
10255         }
10256
10257         intel_commit_modeset_disables(state);
10258
10259         /* FIXME: Eventually get rid of our crtc->config pointer */
10260         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10261                 crtc->config = new_crtc_state;
10262
10263         if (state->modeset) {
10264                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10265
10266                 intel_set_cdclk_pre_plane_update(state);
10267
10268                 intel_modeset_verify_disabled(dev_priv, state);
10269         }
10270
10271         intel_sagv_pre_plane_update(state);
10272
10273         /* Complete the events for pipes that have now been disabled */
10274         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10275                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10276
10277                 /* Complete events for now disable pipes here. */
10278                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10279                         spin_lock_irq(&dev->event_lock);
10280                         drm_crtc_send_vblank_event(&crtc->base,
10281                                                    new_crtc_state->uapi.event);
10282                         spin_unlock_irq(&dev->event_lock);
10283
10284                         new_crtc_state->uapi.event = NULL;
10285                 }
10286         }
10287
10288         if (state->modeset)
10289                 intel_encoders_update_prepare(state);
10290
10291         intel_dbuf_pre_plane_update(state);
10292         intel_psr_pre_plane_update(state);
10293
10294         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10295                 if (new_crtc_state->uapi.async_flip)
10296                         intel_crtc_enable_flip_done(state, crtc);
10297         }
10298
10299         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10300         dev_priv->display->commit_modeset_enables(state);
10301
10302         if (state->modeset) {
10303                 intel_encoders_update_complete(state);
10304
10305                 intel_set_cdclk_post_plane_update(state);
10306         }
10307
10308         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10309          * already, but still need the state for the delayed optimization. To
10310          * fix this:
10311          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10312          * - schedule that vblank worker _before_ calling hw_done
10313          * - at the start of commit_tail, cancel it _synchrously
10314          * - switch over to the vblank wait helper in the core after that since
10315          *   we don't need out special handling any more.
10316          */
10317         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10318
10319         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10320                 if (new_crtc_state->uapi.async_flip)
10321                         intel_crtc_disable_flip_done(state, crtc);
10322
10323                 if (new_crtc_state->hw.active &&
10324                     !intel_crtc_needs_modeset(new_crtc_state) &&
10325                     !new_crtc_state->preload_luts &&
10326                     (new_crtc_state->uapi.color_mgmt_changed ||
10327                      new_crtc_state->update_pipe))
10328                         intel_color_load_luts(new_crtc_state);
10329         }
10330
10331         /*
10332          * Now that the vblank has passed, we can go ahead and program the
10333          * optimal watermarks on platforms that need two-step watermark
10334          * programming.
10335          *
10336          * TODO: Move this (and other cleanup) to an async worker eventually.
10337          */
10338         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10339                                             new_crtc_state, i) {
10340                 /*
10341                  * Gen2 reports pipe underruns whenever all planes are disabled.
10342                  * So re-enable underrun reporting after some planes get enabled.
10343                  *
10344                  * We do this before .optimize_watermarks() so that we have a
10345                  * chance of catching underruns with the intermediate watermarks
10346                  * vs. the new plane configuration.
10347                  */
10348                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10349                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10350
10351                 intel_optimize_watermarks(state, crtc);
10352         }
10353
10354         intel_dbuf_post_plane_update(state);
10355         intel_psr_post_plane_update(state);
10356
10357         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10358                 intel_post_plane_update(state, crtc);
10359
10360                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10361
10362                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10363
10364                 /*
10365                  * DSB cleanup is done in cleanup_work aligning with framebuffer
10366                  * cleanup. So copy and reset the dsb structure to sync with
10367                  * commit_done and later do dsb cleanup in cleanup_work.
10368                  */
10369                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10370         }
10371
10372         /* Underruns don't always raise interrupts, so check manually */
10373         intel_check_cpu_fifo_underruns(dev_priv);
10374         intel_check_pch_fifo_underruns(dev_priv);
10375
10376         if (state->modeset)
10377                 intel_verify_planes(state);
10378
10379         intel_sagv_post_plane_update(state);
10380
10381         drm_atomic_helper_commit_hw_done(&state->base);
10382
10383         if (state->modeset) {
10384                 /* As one of the primary mmio accessors, KMS has a high
10385                  * likelihood of triggering bugs in unclaimed access. After we
10386                  * finish modesetting, see if an error has been flagged, and if
10387                  * so enable debugging for the next modeset - and hope we catch
10388                  * the culprit.
10389                  */
10390                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10391                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10392         }
10393         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10394
10395         /*
10396          * Defer the cleanup of the old state to a separate worker to not
10397          * impede the current task (userspace for blocking modesets) that
10398          * are executed inline. For out-of-line asynchronous modesets/flips,
10399          * deferring to a new worker seems overkill, but we would place a
10400          * schedule point (cond_resched()) here anyway to keep latencies
10401          * down.
10402          */
10403         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10404         queue_work(system_highpri_wq, &state->base.commit_work);
10405 }
10406
10407 static void intel_atomic_commit_work(struct work_struct *work)
10408 {
10409         struct intel_atomic_state *state =
10410                 container_of(work, struct intel_atomic_state, base.commit_work);
10411
10412         intel_atomic_commit_tail(state);
10413 }
10414
10415 static int __i915_sw_fence_call
10416 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10417                           enum i915_sw_fence_notify notify)
10418 {
10419         struct intel_atomic_state *state =
10420                 container_of(fence, struct intel_atomic_state, commit_ready);
10421
10422         switch (notify) {
10423         case FENCE_COMPLETE:
10424                 /* we do blocking waits in the worker, nothing to do here */
10425                 break;
10426         case FENCE_FREE:
10427                 {
10428                         struct intel_atomic_helper *helper =
10429                                 &to_i915(state->base.dev)->atomic_helper;
10430
10431                         if (llist_add(&state->freed, &helper->free_list))
10432                                 schedule_work(&helper->free_work);
10433                         break;
10434                 }
10435         }
10436
10437         return NOTIFY_DONE;
10438 }
10439
10440 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10441 {
10442         struct intel_plane_state *old_plane_state, *new_plane_state;
10443         struct intel_plane *plane;
10444         int i;
10445
10446         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10447                                              new_plane_state, i)
10448                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10449                                         to_intel_frontbuffer(new_plane_state->hw.fb),
10450                                         plane->frontbuffer_bit);
10451 }
10452
10453 static int intel_atomic_commit(struct drm_device *dev,
10454                                struct drm_atomic_state *_state,
10455                                bool nonblock)
10456 {
10457         struct intel_atomic_state *state = to_intel_atomic_state(_state);
10458         struct drm_i915_private *dev_priv = to_i915(dev);
10459         int ret = 0;
10460
10461         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10462
10463         drm_atomic_state_get(&state->base);
10464         i915_sw_fence_init(&state->commit_ready,
10465                            intel_atomic_commit_ready);
10466
10467         /*
10468          * The intel_legacy_cursor_update() fast path takes care
10469          * of avoiding the vblank waits for simple cursor
10470          * movement and flips. For cursor on/off and size changes,
10471          * we want to perform the vblank waits so that watermark
10472          * updates happen during the correct frames. Gen9+ have
10473          * double buffered watermarks and so shouldn't need this.
10474          *
10475          * Unset state->legacy_cursor_update before the call to
10476          * drm_atomic_helper_setup_commit() because otherwise
10477          * drm_atomic_helper_wait_for_flip_done() is a noop and
10478          * we get FIFO underruns because we didn't wait
10479          * for vblank.
10480          *
10481          * FIXME doing watermarks and fb cleanup from a vblank worker
10482          * (assuming we had any) would solve these problems.
10483          */
10484         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10485                 struct intel_crtc_state *new_crtc_state;
10486                 struct intel_crtc *crtc;
10487                 int i;
10488
10489                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10490                         if (new_crtc_state->wm.need_postvbl_update ||
10491                             new_crtc_state->update_wm_post)
10492                                 state->base.legacy_cursor_update = false;
10493         }
10494
10495         ret = intel_atomic_prepare_commit(state);
10496         if (ret) {
10497                 drm_dbg_atomic(&dev_priv->drm,
10498                                "Preparing state failed with %i\n", ret);
10499                 i915_sw_fence_commit(&state->commit_ready);
10500                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10501                 return ret;
10502         }
10503
10504         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10505         if (!ret)
10506                 ret = drm_atomic_helper_swap_state(&state->base, true);
10507         if (!ret)
10508                 intel_atomic_swap_global_state(state);
10509
10510         if (ret) {
10511                 struct intel_crtc_state *new_crtc_state;
10512                 struct intel_crtc *crtc;
10513                 int i;
10514
10515                 i915_sw_fence_commit(&state->commit_ready);
10516
10517                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10518                         intel_dsb_cleanup(new_crtc_state);
10519
10520                 drm_atomic_helper_cleanup_planes(dev, &state->base);
10521                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10522                 return ret;
10523         }
10524         intel_shared_dpll_swap_state(state);
10525         intel_atomic_track_fbs(state);
10526
10527         drm_atomic_state_get(&state->base);
10528         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10529
10530         i915_sw_fence_commit(&state->commit_ready);
10531         if (nonblock && state->modeset) {
10532                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10533         } else if (nonblock) {
10534                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
10535         } else {
10536                 if (state->modeset)
10537                         flush_workqueue(dev_priv->modeset_wq);
10538                 intel_atomic_commit_tail(state);
10539         }
10540
10541         return 0;
10542 }
10543
10544 struct wait_rps_boost {
10545         struct wait_queue_entry wait;
10546
10547         struct drm_crtc *crtc;
10548         struct i915_request *request;
10549 };
10550
10551 static int do_rps_boost(struct wait_queue_entry *_wait,
10552                         unsigned mode, int sync, void *key)
10553 {
10554         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10555         struct i915_request *rq = wait->request;
10556
10557         /*
10558          * If we missed the vblank, but the request is already running it
10559          * is reasonable to assume that it will complete before the next
10560          * vblank without our intervention, so leave RPS alone.
10561          */
10562         if (!i915_request_started(rq))
10563                 intel_rps_boost(rq);
10564         i915_request_put(rq);
10565
10566         drm_crtc_vblank_put(wait->crtc);
10567
10568         list_del(&wait->wait.entry);
10569         kfree(wait);
10570         return 1;
10571 }
10572
10573 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10574                                        struct dma_fence *fence)
10575 {
10576         struct wait_rps_boost *wait;
10577
10578         if (!dma_fence_is_i915(fence))
10579                 return;
10580
10581         if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10582                 return;
10583
10584         if (drm_crtc_vblank_get(crtc))
10585                 return;
10586
10587         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10588         if (!wait) {
10589                 drm_crtc_vblank_put(crtc);
10590                 return;
10591         }
10592
10593         wait->request = to_request(dma_fence_get(fence));
10594         wait->crtc = crtc;
10595
10596         wait->wait.func = do_rps_boost;
10597         wait->wait.flags = 0;
10598
10599         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10600 }
10601
10602 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10603 {
10604         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10605         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10606         struct drm_framebuffer *fb = plane_state->hw.fb;
10607         struct i915_vma *vma;
10608         bool phys_cursor =
10609                 plane->id == PLANE_CURSOR &&
10610                 INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10611
10612         if (!intel_fb_uses_dpt(fb)) {
10613                 vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10614                                                  &plane_state->view.gtt,
10615                                                  intel_plane_uses_fence(plane_state),
10616                                                  &plane_state->flags);
10617                 if (IS_ERR(vma))
10618                         return PTR_ERR(vma);
10619
10620                 plane_state->ggtt_vma = vma;
10621         } else {
10622                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10623
10624                 vma = intel_dpt_pin(intel_fb->dpt_vm);
10625                 if (IS_ERR(vma))
10626                         return PTR_ERR(vma);
10627
10628                 plane_state->ggtt_vma = vma;
10629
10630                 vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10631                                            &plane_state->flags, intel_fb->dpt_vm);
10632                 if (IS_ERR(vma)) {
10633                         intel_dpt_unpin(intel_fb->dpt_vm);
10634                         plane_state->ggtt_vma = NULL;
10635                         return PTR_ERR(vma);
10636                 }
10637
10638                 plane_state->dpt_vma = vma;
10639
10640                 WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10641         }
10642
10643         return 0;
10644 }
10645
10646 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10647 {
10648         struct drm_framebuffer *fb = old_plane_state->hw.fb;
10649         struct i915_vma *vma;
10650
10651         if (!intel_fb_uses_dpt(fb)) {
10652                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10653                 if (vma)
10654                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10655         } else {
10656                 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10657
10658                 vma = fetch_and_zero(&old_plane_state->dpt_vma);
10659                 if (vma)
10660                         intel_unpin_fb_vma(vma, old_plane_state->flags);
10661
10662                 vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10663                 if (vma)
10664                         intel_dpt_unpin(intel_fb->dpt_vm);
10665         }
10666 }
10667
10668 /**
10669  * intel_prepare_plane_fb - Prepare fb for usage on plane
10670  * @_plane: drm plane to prepare for
10671  * @_new_plane_state: the plane state being prepared
10672  *
10673  * Prepares a framebuffer for usage on a display plane.  Generally this
10674  * involves pinning the underlying object and updating the frontbuffer tracking
10675  * bits.  Some older platforms need special physical address handling for
10676  * cursor planes.
10677  *
10678  * Returns 0 on success, negative error code on failure.
10679  */
10680 int
10681 intel_prepare_plane_fb(struct drm_plane *_plane,
10682                        struct drm_plane_state *_new_plane_state)
10683 {
10684         struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10685         struct intel_plane *plane = to_intel_plane(_plane);
10686         struct intel_plane_state *new_plane_state =
10687                 to_intel_plane_state(_new_plane_state);
10688         struct intel_atomic_state *state =
10689                 to_intel_atomic_state(new_plane_state->uapi.state);
10690         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10691         const struct intel_plane_state *old_plane_state =
10692                 intel_atomic_get_old_plane_state(state, plane);
10693         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10694         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10695         int ret;
10696
10697         if (old_obj) {
10698                 const struct intel_crtc_state *crtc_state =
10699                         intel_atomic_get_new_crtc_state(state,
10700                                                         to_intel_crtc(old_plane_state->hw.crtc));
10701
10702                 /* Big Hammer, we also need to ensure that any pending
10703                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10704                  * current scanout is retired before unpinning the old
10705                  * framebuffer. Note that we rely on userspace rendering
10706                  * into the buffer attached to the pipe they are waiting
10707                  * on. If not, userspace generates a GPU hang with IPEHR
10708                  * point to the MI_WAIT_FOR_EVENT.
10709                  *
10710                  * This should only fail upon a hung GPU, in which case we
10711                  * can safely continue.
10712                  */
10713                 if (intel_crtc_needs_modeset(crtc_state)) {
10714                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
10715                                                               old_obj->base.resv, NULL,
10716                                                               false, 0,
10717                                                               GFP_KERNEL);
10718                         if (ret < 0)
10719                                 return ret;
10720                 }
10721         }
10722
10723         if (new_plane_state->uapi.fence) { /* explicit fencing */
10724                 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10725                                              &attr);
10726                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10727                                                     new_plane_state->uapi.fence,
10728                                                     i915_fence_timeout(dev_priv),
10729                                                     GFP_KERNEL);
10730                 if (ret < 0)
10731                         return ret;
10732         }
10733
10734         if (!obj)
10735                 return 0;
10736
10737
10738         ret = intel_plane_pin_fb(new_plane_state);
10739         if (ret)
10740                 return ret;
10741
10742         i915_gem_object_wait_priority(obj, 0, &attr);
10743
10744         if (!new_plane_state->uapi.fence) { /* implicit fencing */
10745                 struct dma_fence *fence;
10746
10747                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
10748                                                       obj->base.resv, NULL,
10749                                                       false,
10750                                                       i915_fence_timeout(dev_priv),
10751                                                       GFP_KERNEL);
10752                 if (ret < 0)
10753                         goto unpin_fb;
10754
10755                 fence = dma_resv_get_excl_unlocked(obj->base.resv);
10756                 if (fence) {
10757                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10758                                                    fence);
10759                         dma_fence_put(fence);
10760                 }
10761         } else {
10762                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10763                                            new_plane_state->uapi.fence);
10764         }
10765
10766         /*
10767          * We declare pageflips to be interactive and so merit a small bias
10768          * towards upclocking to deliver the frame on time. By only changing
10769          * the RPS thresholds to sample more regularly and aim for higher
10770          * clocks we can hopefully deliver low power workloads (like kodi)
10771          * that are not quite steady state without resorting to forcing
10772          * maximum clocks following a vblank miss (see do_rps_boost()).
10773          */
10774         if (!state->rps_interactive) {
10775                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10776                 state->rps_interactive = true;
10777         }
10778
10779         return 0;
10780
10781 unpin_fb:
10782         intel_plane_unpin_fb(new_plane_state);
10783
10784         return ret;
10785 }
10786
10787 /**
10788  * intel_cleanup_plane_fb - Cleans up an fb after plane use
10789  * @plane: drm plane to clean up for
10790  * @_old_plane_state: the state from the previous modeset
10791  *
10792  * Cleans up a framebuffer that has just been removed from a plane.
10793  */
10794 void
10795 intel_cleanup_plane_fb(struct drm_plane *plane,
10796                        struct drm_plane_state *_old_plane_state)
10797 {
10798         struct intel_plane_state *old_plane_state =
10799                 to_intel_plane_state(_old_plane_state);
10800         struct intel_atomic_state *state =
10801                 to_intel_atomic_state(old_plane_state->uapi.state);
10802         struct drm_i915_private *dev_priv = to_i915(plane->dev);
10803         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10804
10805         if (!obj)
10806                 return;
10807
10808         if (state->rps_interactive) {
10809                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10810                 state->rps_interactive = false;
10811         }
10812
10813         /* Should only be called after a successful intel_prepare_plane_fb()! */
10814         intel_plane_unpin_fb(old_plane_state);
10815 }
10816
10817 /**
10818  * intel_plane_destroy - destroy a plane
10819  * @plane: plane to destroy
10820  *
10821  * Common destruction function for all types of planes (primary, cursor,
10822  * sprite).
10823  */
10824 void intel_plane_destroy(struct drm_plane *plane)
10825 {
10826         drm_plane_cleanup(plane);
10827         kfree(to_intel_plane(plane));
10828 }
10829
10830 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10831 {
10832         struct intel_plane *plane;
10833
10834         for_each_intel_plane(&dev_priv->drm, plane) {
10835                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10836                                                                   plane->pipe);
10837
10838                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10839         }
10840 }
10841
10842
10843 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10844                                       struct drm_file *file)
10845 {
10846         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10847         struct drm_crtc *drmmode_crtc;
10848         struct intel_crtc *crtc;
10849
10850         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10851         if (!drmmode_crtc)
10852                 return -ENOENT;
10853
10854         crtc = to_intel_crtc(drmmode_crtc);
10855         pipe_from_crtc_id->pipe = crtc->pipe;
10856
10857         return 0;
10858 }
10859
10860 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10861 {
10862         struct drm_device *dev = encoder->base.dev;
10863         struct intel_encoder *source_encoder;
10864         u32 possible_clones = 0;
10865
10866         for_each_intel_encoder(dev, source_encoder) {
10867                 if (encoders_cloneable(encoder, source_encoder))
10868                         possible_clones |= drm_encoder_mask(&source_encoder->base);
10869         }
10870
10871         return possible_clones;
10872 }
10873
10874 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10875 {
10876         struct drm_device *dev = encoder->base.dev;
10877         struct intel_crtc *crtc;
10878         u32 possible_crtcs = 0;
10879
10880         for_each_intel_crtc(dev, crtc) {
10881                 if (encoder->pipe_mask & BIT(crtc->pipe))
10882                         possible_crtcs |= drm_crtc_mask(&crtc->base);
10883         }
10884
10885         return possible_crtcs;
10886 }
10887
10888 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10889 {
10890         if (!IS_MOBILE(dev_priv))
10891                 return false;
10892
10893         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10894                 return false;
10895
10896         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10897                 return false;
10898
10899         return true;
10900 }
10901
10902 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10903 {
10904         if (DISPLAY_VER(dev_priv) >= 9)
10905                 return false;
10906
10907         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10908                 return false;
10909
10910         if (HAS_PCH_LPT_H(dev_priv) &&
10911             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10912                 return false;
10913
10914         /* DDI E can't be used if DDI A requires 4 lanes */
10915         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10916                 return false;
10917
10918         if (!dev_priv->vbt.int_crt_support)
10919                 return false;
10920
10921         return true;
10922 }
10923
10924 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10925 {
10926         struct intel_encoder *encoder;
10927         bool dpd_is_edp = false;
10928
10929         intel_pps_unlock_regs_wa(dev_priv);
10930
10931         if (!HAS_DISPLAY(dev_priv))
10932                 return;
10933
10934         if (IS_DG2(dev_priv)) {
10935                 intel_ddi_init(dev_priv, PORT_A);
10936                 intel_ddi_init(dev_priv, PORT_B);
10937                 intel_ddi_init(dev_priv, PORT_C);
10938                 intel_ddi_init(dev_priv, PORT_D_XELPD);
10939         } else if (IS_ALDERLAKE_P(dev_priv)) {
10940                 intel_ddi_init(dev_priv, PORT_A);
10941                 intel_ddi_init(dev_priv, PORT_B);
10942                 intel_ddi_init(dev_priv, PORT_TC1);
10943                 intel_ddi_init(dev_priv, PORT_TC2);
10944                 intel_ddi_init(dev_priv, PORT_TC3);
10945                 intel_ddi_init(dev_priv, PORT_TC4);
10946                 icl_dsi_init(dev_priv);
10947         } else if (IS_ALDERLAKE_S(dev_priv)) {
10948                 intel_ddi_init(dev_priv, PORT_A);
10949                 intel_ddi_init(dev_priv, PORT_TC1);
10950                 intel_ddi_init(dev_priv, PORT_TC2);
10951                 intel_ddi_init(dev_priv, PORT_TC3);
10952                 intel_ddi_init(dev_priv, PORT_TC4);
10953         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
10954                 intel_ddi_init(dev_priv, PORT_A);
10955                 intel_ddi_init(dev_priv, PORT_B);
10956                 intel_ddi_init(dev_priv, PORT_TC1);
10957                 intel_ddi_init(dev_priv, PORT_TC2);
10958         } else if (DISPLAY_VER(dev_priv) >= 12) {
10959                 intel_ddi_init(dev_priv, PORT_A);
10960                 intel_ddi_init(dev_priv, PORT_B);
10961                 intel_ddi_init(dev_priv, PORT_TC1);
10962                 intel_ddi_init(dev_priv, PORT_TC2);
10963                 intel_ddi_init(dev_priv, PORT_TC3);
10964                 intel_ddi_init(dev_priv, PORT_TC4);
10965                 intel_ddi_init(dev_priv, PORT_TC5);
10966                 intel_ddi_init(dev_priv, PORT_TC6);
10967                 icl_dsi_init(dev_priv);
10968         } else if (IS_JSL_EHL(dev_priv)) {
10969                 intel_ddi_init(dev_priv, PORT_A);
10970                 intel_ddi_init(dev_priv, PORT_B);
10971                 intel_ddi_init(dev_priv, PORT_C);
10972                 intel_ddi_init(dev_priv, PORT_D);
10973                 icl_dsi_init(dev_priv);
10974         } else if (DISPLAY_VER(dev_priv) == 11) {
10975                 intel_ddi_init(dev_priv, PORT_A);
10976                 intel_ddi_init(dev_priv, PORT_B);
10977                 intel_ddi_init(dev_priv, PORT_C);
10978                 intel_ddi_init(dev_priv, PORT_D);
10979                 intel_ddi_init(dev_priv, PORT_E);
10980                 intel_ddi_init(dev_priv, PORT_F);
10981                 icl_dsi_init(dev_priv);
10982         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
10983                 intel_ddi_init(dev_priv, PORT_A);
10984                 intel_ddi_init(dev_priv, PORT_B);
10985                 intel_ddi_init(dev_priv, PORT_C);
10986                 vlv_dsi_init(dev_priv);
10987         } else if (DISPLAY_VER(dev_priv) >= 9) {
10988                 intel_ddi_init(dev_priv, PORT_A);
10989                 intel_ddi_init(dev_priv, PORT_B);
10990                 intel_ddi_init(dev_priv, PORT_C);
10991                 intel_ddi_init(dev_priv, PORT_D);
10992                 intel_ddi_init(dev_priv, PORT_E);
10993         } else if (HAS_DDI(dev_priv)) {
10994                 u32 found;
10995
10996                 if (intel_ddi_crt_present(dev_priv))
10997                         intel_crt_init(dev_priv);
10998
10999                 /* Haswell uses DDI functions to detect digital outputs. */
11000                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11001                 if (found)
11002                         intel_ddi_init(dev_priv, PORT_A);
11003
11004                 found = intel_de_read(dev_priv, SFUSE_STRAP);
11005                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11006                         intel_ddi_init(dev_priv, PORT_B);
11007                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11008                         intel_ddi_init(dev_priv, PORT_C);
11009                 if (found & SFUSE_STRAP_DDID_DETECTED)
11010                         intel_ddi_init(dev_priv, PORT_D);
11011                 if (found & SFUSE_STRAP_DDIF_DETECTED)
11012                         intel_ddi_init(dev_priv, PORT_F);
11013         } else if (HAS_PCH_SPLIT(dev_priv)) {
11014                 int found;
11015
11016                 /*
11017                  * intel_edp_init_connector() depends on this completing first,
11018                  * to prevent the registration of both eDP and LVDS and the
11019                  * incorrect sharing of the PPS.
11020                  */
11021                 intel_lvds_init(dev_priv);
11022                 intel_crt_init(dev_priv);
11023
11024                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11025
11026                 if (ilk_has_edp_a(dev_priv))
11027                         g4x_dp_init(dev_priv, DP_A, PORT_A);
11028
11029                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11030                         /* PCH SDVOB multiplex with HDMIB */
11031                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11032                         if (!found)
11033                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11034                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11035                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11036                 }
11037
11038                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11039                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11040
11041                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11042                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11043
11044                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11045                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11046
11047                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11048                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11049         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11050                 bool has_edp, has_port;
11051
11052                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11053                         intel_crt_init(dev_priv);
11054
11055                 /*
11056                  * The DP_DETECTED bit is the latched state of the DDC
11057                  * SDA pin at boot. However since eDP doesn't require DDC
11058                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
11059                  * eDP ports may have been muxed to an alternate function.
11060                  * Thus we can't rely on the DP_DETECTED bit alone to detect
11061                  * eDP ports. Consult the VBT as well as DP_DETECTED to
11062                  * detect eDP ports.
11063                  *
11064                  * Sadly the straps seem to be missing sometimes even for HDMI
11065                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11066                  * and VBT for the presence of the port. Additionally we can't
11067                  * trust the port type the VBT declares as we've seen at least
11068                  * HDMI ports that the VBT claim are DP or eDP.
11069                  */
11070                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11071                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11072                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11073                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11074                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11075                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11076
11077                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11078                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11079                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11080                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11081                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11082                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11083
11084                 if (IS_CHERRYVIEW(dev_priv)) {
11085                         /*
11086                          * eDP not supported on port D,
11087                          * so no need to worry about it
11088                          */
11089                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11090                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11091                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11092                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11093                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11094                 }
11095
11096                 vlv_dsi_init(dev_priv);
11097         } else if (IS_PINEVIEW(dev_priv)) {
11098                 intel_lvds_init(dev_priv);
11099                 intel_crt_init(dev_priv);
11100         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11101                 bool found = false;
11102
11103                 if (IS_MOBILE(dev_priv))
11104                         intel_lvds_init(dev_priv);
11105
11106                 intel_crt_init(dev_priv);
11107
11108                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11109                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11110                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11111                         if (!found && IS_G4X(dev_priv)) {
11112                                 drm_dbg_kms(&dev_priv->drm,
11113                                             "probing HDMI on SDVOB\n");
11114                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11115                         }
11116
11117                         if (!found && IS_G4X(dev_priv))
11118                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
11119                 }
11120
11121                 /* Before G4X SDVOC doesn't have its own detect register */
11122
11123                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11124                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11125                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11126                 }
11127
11128                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11129
11130                         if (IS_G4X(dev_priv)) {
11131                                 drm_dbg_kms(&dev_priv->drm,
11132                                             "probing HDMI on SDVOC\n");
11133                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11134                         }
11135                         if (IS_G4X(dev_priv))
11136                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
11137                 }
11138
11139                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11140                         g4x_dp_init(dev_priv, DP_D, PORT_D);
11141
11142                 if (SUPPORTS_TV(dev_priv))
11143                         intel_tv_init(dev_priv);
11144         } else if (DISPLAY_VER(dev_priv) == 2) {
11145                 if (IS_I85X(dev_priv))
11146                         intel_lvds_init(dev_priv);
11147
11148                 intel_crt_init(dev_priv);
11149                 intel_dvo_init(dev_priv);
11150         }
11151
11152         for_each_intel_encoder(&dev_priv->drm, encoder) {
11153                 encoder->base.possible_crtcs =
11154                         intel_encoder_possible_crtcs(encoder);
11155                 encoder->base.possible_clones =
11156                         intel_encoder_possible_clones(encoder);
11157         }
11158
11159         intel_init_pch_refclk(dev_priv);
11160
11161         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11162 }
11163
11164 static enum drm_mode_status
11165 intel_mode_valid(struct drm_device *dev,
11166                  const struct drm_display_mode *mode)
11167 {
11168         struct drm_i915_private *dev_priv = to_i915(dev);
11169         int hdisplay_max, htotal_max;
11170         int vdisplay_max, vtotal_max;
11171
11172         /*
11173          * Can't reject DBLSCAN here because Xorg ddxen can add piles
11174          * of DBLSCAN modes to the output's mode list when they detect
11175          * the scaling mode property on the connector. And they don't
11176          * ask the kernel to validate those modes in any way until
11177          * modeset time at which point the client gets a protocol error.
11178          * So in order to not upset those clients we silently ignore the
11179          * DBLSCAN flag on such connectors. For other connectors we will
11180          * reject modes with the DBLSCAN flag in encoder->compute_config().
11181          * And we always reject DBLSCAN modes in connector->mode_valid()
11182          * as we never want such modes on the connector's mode list.
11183          */
11184
11185         if (mode->vscan > 1)
11186                 return MODE_NO_VSCAN;
11187
11188         if (mode->flags & DRM_MODE_FLAG_HSKEW)
11189                 return MODE_H_ILLEGAL;
11190
11191         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11192                            DRM_MODE_FLAG_NCSYNC |
11193                            DRM_MODE_FLAG_PCSYNC))
11194                 return MODE_HSYNC;
11195
11196         if (mode->flags & (DRM_MODE_FLAG_BCAST |
11197                            DRM_MODE_FLAG_PIXMUX |
11198                            DRM_MODE_FLAG_CLKDIV2))
11199                 return MODE_BAD;
11200
11201         /* Transcoder timing limits */
11202         if (DISPLAY_VER(dev_priv) >= 11) {
11203                 hdisplay_max = 16384;
11204                 vdisplay_max = 8192;
11205                 htotal_max = 16384;
11206                 vtotal_max = 8192;
11207         } else if (DISPLAY_VER(dev_priv) >= 9 ||
11208                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11209                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11210                 vdisplay_max = 4096;
11211                 htotal_max = 8192;
11212                 vtotal_max = 8192;
11213         } else if (DISPLAY_VER(dev_priv) >= 3) {
11214                 hdisplay_max = 4096;
11215                 vdisplay_max = 4096;
11216                 htotal_max = 8192;
11217                 vtotal_max = 8192;
11218         } else {
11219                 hdisplay_max = 2048;
11220                 vdisplay_max = 2048;
11221                 htotal_max = 4096;
11222                 vtotal_max = 4096;
11223         }
11224
11225         if (mode->hdisplay > hdisplay_max ||
11226             mode->hsync_start > htotal_max ||
11227             mode->hsync_end > htotal_max ||
11228             mode->htotal > htotal_max)
11229                 return MODE_H_ILLEGAL;
11230
11231         if (mode->vdisplay > vdisplay_max ||
11232             mode->vsync_start > vtotal_max ||
11233             mode->vsync_end > vtotal_max ||
11234             mode->vtotal > vtotal_max)
11235                 return MODE_V_ILLEGAL;
11236
11237         if (DISPLAY_VER(dev_priv) >= 5) {
11238                 if (mode->hdisplay < 64 ||
11239                     mode->htotal - mode->hdisplay < 32)
11240                         return MODE_H_ILLEGAL;
11241
11242                 if (mode->vtotal - mode->vdisplay < 5)
11243                         return MODE_V_ILLEGAL;
11244         } else {
11245                 if (mode->htotal - mode->hdisplay < 32)
11246                         return MODE_H_ILLEGAL;
11247
11248                 if (mode->vtotal - mode->vdisplay < 3)
11249                         return MODE_V_ILLEGAL;
11250         }
11251
11252         return MODE_OK;
11253 }
11254
11255 enum drm_mode_status
11256 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11257                                 const struct drm_display_mode *mode,
11258                                 bool bigjoiner)
11259 {
11260         int plane_width_max, plane_height_max;
11261
11262         /*
11263          * intel_mode_valid() should be
11264          * sufficient on older platforms.
11265          */
11266         if (DISPLAY_VER(dev_priv) < 9)
11267                 return MODE_OK;
11268
11269         /*
11270          * Most people will probably want a fullscreen
11271          * plane so let's not advertize modes that are
11272          * too big for that.
11273          */
11274         if (DISPLAY_VER(dev_priv) >= 11) {
11275                 plane_width_max = 5120 << bigjoiner;
11276                 plane_height_max = 4320;
11277         } else {
11278                 plane_width_max = 5120;
11279                 plane_height_max = 4096;
11280         }
11281
11282         if (mode->hdisplay > plane_width_max)
11283                 return MODE_H_ILLEGAL;
11284
11285         if (mode->vdisplay > plane_height_max)
11286                 return MODE_V_ILLEGAL;
11287
11288         return MODE_OK;
11289 }
11290
11291 static const struct drm_mode_config_funcs intel_mode_funcs = {
11292         .fb_create = intel_user_framebuffer_create,
11293         .get_format_info = intel_get_format_info,
11294         .output_poll_changed = intel_fbdev_output_poll_changed,
11295         .mode_valid = intel_mode_valid,
11296         .atomic_check = intel_atomic_check,
11297         .atomic_commit = intel_atomic_commit,
11298         .atomic_state_alloc = intel_atomic_state_alloc,
11299         .atomic_state_clear = intel_atomic_state_clear,
11300         .atomic_state_free = intel_atomic_state_free,
11301 };
11302
11303 static const struct drm_i915_display_funcs skl_display_funcs = {
11304         .get_pipe_config = hsw_get_pipe_config,
11305         .crtc_enable = hsw_crtc_enable,
11306         .crtc_disable = hsw_crtc_disable,
11307         .commit_modeset_enables = skl_commit_modeset_enables,
11308         .get_initial_plane_config = skl_get_initial_plane_config,
11309 };
11310
11311 static const struct drm_i915_display_funcs ddi_display_funcs = {
11312         .get_pipe_config = hsw_get_pipe_config,
11313         .crtc_enable = hsw_crtc_enable,
11314         .crtc_disable = hsw_crtc_disable,
11315         .commit_modeset_enables = intel_commit_modeset_enables,
11316         .get_initial_plane_config = i9xx_get_initial_plane_config,
11317 };
11318
11319 static const struct drm_i915_display_funcs pch_split_display_funcs = {
11320         .get_pipe_config = ilk_get_pipe_config,
11321         .crtc_enable = ilk_crtc_enable,
11322         .crtc_disable = ilk_crtc_disable,
11323         .commit_modeset_enables = intel_commit_modeset_enables,
11324         .get_initial_plane_config = i9xx_get_initial_plane_config,
11325 };
11326
11327 static const struct drm_i915_display_funcs vlv_display_funcs = {
11328         .get_pipe_config = i9xx_get_pipe_config,
11329         .crtc_enable = valleyview_crtc_enable,
11330         .crtc_disable = i9xx_crtc_disable,
11331         .commit_modeset_enables = intel_commit_modeset_enables,
11332         .get_initial_plane_config = i9xx_get_initial_plane_config,
11333 };
11334
11335 static const struct drm_i915_display_funcs i9xx_display_funcs = {
11336         .get_pipe_config = i9xx_get_pipe_config,
11337         .crtc_enable = i9xx_crtc_enable,
11338         .crtc_disable = i9xx_crtc_disable,
11339         .commit_modeset_enables = intel_commit_modeset_enables,
11340         .get_initial_plane_config = i9xx_get_initial_plane_config,
11341 };
11342
11343 /**
11344  * intel_init_display_hooks - initialize the display modesetting hooks
11345  * @dev_priv: device private
11346  */
11347 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11348 {
11349         if (!HAS_DISPLAY(dev_priv))
11350                 return;
11351
11352         intel_init_cdclk_hooks(dev_priv);
11353         intel_init_audio_hooks(dev_priv);
11354
11355         intel_dpll_init_clock_hook(dev_priv);
11356
11357         if (DISPLAY_VER(dev_priv) >= 9) {
11358                 dev_priv->display = &skl_display_funcs;
11359         } else if (HAS_DDI(dev_priv)) {
11360                 dev_priv->display = &ddi_display_funcs;
11361         } else if (HAS_PCH_SPLIT(dev_priv)) {
11362                 dev_priv->display = &pch_split_display_funcs;
11363         } else if (IS_CHERRYVIEW(dev_priv) ||
11364                    IS_VALLEYVIEW(dev_priv)) {
11365                 dev_priv->display = &vlv_display_funcs;
11366         } else {
11367                 dev_priv->display = &i9xx_display_funcs;
11368         }
11369
11370         intel_fdi_init_hook(dev_priv);
11371 }
11372
11373 void intel_modeset_init_hw(struct drm_i915_private *i915)
11374 {
11375         struct intel_cdclk_state *cdclk_state;
11376
11377         if (!HAS_DISPLAY(i915))
11378                 return;
11379
11380         cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11381
11382         intel_update_cdclk(i915);
11383         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11384         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11385 }
11386
11387 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11388 {
11389         struct drm_plane *plane;
11390         struct intel_crtc *crtc;
11391
11392         for_each_intel_crtc(state->dev, crtc) {
11393                 struct intel_crtc_state *crtc_state;
11394
11395                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
11396                 if (IS_ERR(crtc_state))
11397                         return PTR_ERR(crtc_state);
11398
11399                 if (crtc_state->hw.active) {
11400                         /*
11401                          * Preserve the inherited flag to avoid
11402                          * taking the full modeset path.
11403                          */
11404                         crtc_state->inherited = true;
11405                 }
11406         }
11407
11408         drm_for_each_plane(plane, state->dev) {
11409                 struct drm_plane_state *plane_state;
11410
11411                 plane_state = drm_atomic_get_plane_state(state, plane);
11412                 if (IS_ERR(plane_state))
11413                         return PTR_ERR(plane_state);
11414         }
11415
11416         return 0;
11417 }
11418
11419 /*
11420  * Calculate what we think the watermarks should be for the state we've read
11421  * out of the hardware and then immediately program those watermarks so that
11422  * we ensure the hardware settings match our internal state.
11423  *
11424  * We can calculate what we think WM's should be by creating a duplicate of the
11425  * current state (which was constructed during hardware readout) and running it
11426  * through the atomic check code to calculate new watermark values in the
11427  * state object.
11428  */
11429 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11430 {
11431         struct drm_atomic_state *state;
11432         struct intel_atomic_state *intel_state;
11433         struct intel_crtc *crtc;
11434         struct intel_crtc_state *crtc_state;
11435         struct drm_modeset_acquire_ctx ctx;
11436         int ret;
11437         int i;
11438
11439         /* Only supported on platforms that use atomic watermark design */
11440         if (!dev_priv->wm_disp->optimize_watermarks)
11441                 return;
11442
11443         state = drm_atomic_state_alloc(&dev_priv->drm);
11444         if (drm_WARN_ON(&dev_priv->drm, !state))
11445                 return;
11446
11447         intel_state = to_intel_atomic_state(state);
11448
11449         drm_modeset_acquire_init(&ctx, 0);
11450
11451 retry:
11452         state->acquire_ctx = &ctx;
11453
11454         /*
11455          * Hardware readout is the only time we don't want to calculate
11456          * intermediate watermarks (since we don't trust the current
11457          * watermarks).
11458          */
11459         if (!HAS_GMCH(dev_priv))
11460                 intel_state->skip_intermediate_wm = true;
11461
11462         ret = sanitize_watermarks_add_affected(state);
11463         if (ret)
11464                 goto fail;
11465
11466         ret = intel_atomic_check(&dev_priv->drm, state);
11467         if (ret)
11468                 goto fail;
11469
11470         /* Write calculated watermark values back */
11471         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11472                 crtc_state->wm.need_postvbl_update = true;
11473                 intel_optimize_watermarks(intel_state, crtc);
11474
11475                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11476         }
11477
11478 fail:
11479         if (ret == -EDEADLK) {
11480                 drm_atomic_state_clear(state);
11481                 drm_modeset_backoff(&ctx);
11482                 goto retry;
11483         }
11484
11485         /*
11486          * If we fail here, it means that the hardware appears to be
11487          * programmed in a way that shouldn't be possible, given our
11488          * understanding of watermark requirements.  This might mean a
11489          * mistake in the hardware readout code or a mistake in the
11490          * watermark calculations for a given platform.  Raise a WARN
11491          * so that this is noticeable.
11492          *
11493          * If this actually happens, we'll have to just leave the
11494          * BIOS-programmed watermarks untouched and hope for the best.
11495          */
11496         drm_WARN(&dev_priv->drm, ret,
11497                  "Could not determine valid watermarks for inherited state\n");
11498
11499         drm_atomic_state_put(state);
11500
11501         drm_modeset_drop_locks(&ctx);
11502         drm_modeset_acquire_fini(&ctx);
11503 }
11504
11505 static int intel_initial_commit(struct drm_device *dev)
11506 {
11507         struct drm_atomic_state *state = NULL;
11508         struct drm_modeset_acquire_ctx ctx;
11509         struct intel_crtc *crtc;
11510         int ret = 0;
11511
11512         state = drm_atomic_state_alloc(dev);
11513         if (!state)
11514                 return -ENOMEM;
11515
11516         drm_modeset_acquire_init(&ctx, 0);
11517
11518 retry:
11519         state->acquire_ctx = &ctx;
11520
11521         for_each_intel_crtc(dev, crtc) {
11522                 struct intel_crtc_state *crtc_state =
11523                         intel_atomic_get_crtc_state(state, crtc);
11524
11525                 if (IS_ERR(crtc_state)) {
11526                         ret = PTR_ERR(crtc_state);
11527                         goto out;
11528                 }
11529
11530                 if (crtc_state->hw.active) {
11531                         struct intel_encoder *encoder;
11532
11533                         /*
11534                          * We've not yet detected sink capabilities
11535                          * (audio,infoframes,etc.) and thus we don't want to
11536                          * force a full state recomputation yet. We want that to
11537                          * happen only for the first real commit from userspace.
11538                          * So preserve the inherited flag for the time being.
11539                          */
11540                         crtc_state->inherited = true;
11541
11542                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
11543                         if (ret)
11544                                 goto out;
11545
11546                         /*
11547                          * FIXME hack to force a LUT update to avoid the
11548                          * plane update forcing the pipe gamma on without
11549                          * having a proper LUT loaded. Remove once we
11550                          * have readout for pipe gamma enable.
11551                          */
11552                         crtc_state->uapi.color_mgmt_changed = true;
11553
11554                         for_each_intel_encoder_mask(dev, encoder,
11555                                                     crtc_state->uapi.encoder_mask) {
11556                                 if (encoder->initial_fastset_check &&
11557                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
11558                                         ret = drm_atomic_add_affected_connectors(state,
11559                                                                                  &crtc->base);
11560                                         if (ret)
11561                                                 goto out;
11562                                 }
11563                         }
11564                 }
11565         }
11566
11567         ret = drm_atomic_commit(state);
11568
11569 out:
11570         if (ret == -EDEADLK) {
11571                 drm_atomic_state_clear(state);
11572                 drm_modeset_backoff(&ctx);
11573                 goto retry;
11574         }
11575
11576         drm_atomic_state_put(state);
11577
11578         drm_modeset_drop_locks(&ctx);
11579         drm_modeset_acquire_fini(&ctx);
11580
11581         return ret;
11582 }
11583
11584 static void intel_mode_config_init(struct drm_i915_private *i915)
11585 {
11586         struct drm_mode_config *mode_config = &i915->drm.mode_config;
11587
11588         drm_mode_config_init(&i915->drm);
11589         INIT_LIST_HEAD(&i915->global_obj_list);
11590
11591         mode_config->min_width = 0;
11592         mode_config->min_height = 0;
11593
11594         mode_config->preferred_depth = 24;
11595         mode_config->prefer_shadow = 1;
11596
11597         mode_config->funcs = &intel_mode_funcs;
11598
11599         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
11600
11601         /*
11602          * Maximum framebuffer dimensions, chosen to match
11603          * the maximum render engine surface size on gen4+.
11604          */
11605         if (DISPLAY_VER(i915) >= 7) {
11606                 mode_config->max_width = 16384;
11607                 mode_config->max_height = 16384;
11608         } else if (DISPLAY_VER(i915) >= 4) {
11609                 mode_config->max_width = 8192;
11610                 mode_config->max_height = 8192;
11611         } else if (DISPLAY_VER(i915) == 3) {
11612                 mode_config->max_width = 4096;
11613                 mode_config->max_height = 4096;
11614         } else {
11615                 mode_config->max_width = 2048;
11616                 mode_config->max_height = 2048;
11617         }
11618
11619         if (IS_I845G(i915) || IS_I865G(i915)) {
11620                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11621                 mode_config->cursor_height = 1023;
11622         } else if (IS_I830(i915) || IS_I85X(i915) ||
11623                    IS_I915G(i915) || IS_I915GM(i915)) {
11624                 mode_config->cursor_width = 64;
11625                 mode_config->cursor_height = 64;
11626         } else {
11627                 mode_config->cursor_width = 256;
11628                 mode_config->cursor_height = 256;
11629         }
11630 }
11631
11632 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11633 {
11634         intel_atomic_global_obj_cleanup(i915);
11635         drm_mode_config_cleanup(&i915->drm);
11636 }
11637
11638 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11639 {
11640         if (plane_config->fb) {
11641                 struct drm_framebuffer *fb = &plane_config->fb->base;
11642
11643                 /* We may only have the stub and not a full framebuffer */
11644                 if (drm_framebuffer_read_refcount(fb))
11645                         drm_framebuffer_put(fb);
11646                 else
11647                         kfree(fb);
11648         }
11649
11650         if (plane_config->vma)
11651                 i915_vma_put(plane_config->vma);
11652 }
11653
11654 /* part #1: call before irq install */
11655 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11656 {
11657         int ret;
11658
11659         if (i915_inject_probe_failure(i915))
11660                 return -ENODEV;
11661
11662         if (HAS_DISPLAY(i915)) {
11663                 ret = drm_vblank_init(&i915->drm,
11664                                       INTEL_NUM_PIPES(i915));
11665                 if (ret)
11666                         return ret;
11667         }
11668
11669         intel_bios_init(i915);
11670
11671         ret = intel_vga_register(i915);
11672         if (ret)
11673                 goto cleanup_bios;
11674
11675         /* FIXME: completely on the wrong abstraction layer */
11676         intel_power_domains_init_hw(i915, false);
11677
11678         if (!HAS_DISPLAY(i915))
11679                 return 0;
11680
11681         intel_dmc_ucode_init(i915);
11682
11683         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11684         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11685                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11686
11687         i915->framestart_delay = 1; /* 1-4 */
11688
11689         i915->window2_delay = 0; /* No DSB so no window2 delay */
11690
11691         intel_mode_config_init(i915);
11692
11693         ret = intel_cdclk_init(i915);
11694         if (ret)
11695                 goto cleanup_vga_client_pw_domain_dmc;
11696
11697         ret = intel_dbuf_init(i915);
11698         if (ret)
11699                 goto cleanup_vga_client_pw_domain_dmc;
11700
11701         ret = intel_bw_init(i915);
11702         if (ret)
11703                 goto cleanup_vga_client_pw_domain_dmc;
11704
11705         init_llist_head(&i915->atomic_helper.free_list);
11706         INIT_WORK(&i915->atomic_helper.free_work,
11707                   intel_atomic_helper_free_state_worker);
11708
11709         intel_init_quirks(i915);
11710
11711         intel_fbc_init(i915);
11712
11713         return 0;
11714
11715 cleanup_vga_client_pw_domain_dmc:
11716         intel_dmc_ucode_fini(i915);
11717         intel_power_domains_driver_remove(i915);
11718         intel_vga_unregister(i915);
11719 cleanup_bios:
11720         intel_bios_driver_remove(i915);
11721
11722         return ret;
11723 }
11724
11725 /* part #2: call after irq install, but before gem init */
11726 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11727 {
11728         struct drm_device *dev = &i915->drm;
11729         enum pipe pipe;
11730         struct intel_crtc *crtc;
11731         int ret;
11732
11733         if (!HAS_DISPLAY(i915))
11734                 return 0;
11735
11736         intel_init_pm(i915);
11737
11738         intel_panel_sanitize_ssc(i915);
11739
11740         intel_pps_setup(i915);
11741
11742         intel_gmbus_setup(i915);
11743
11744         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11745                     INTEL_NUM_PIPES(i915),
11746                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11747
11748         for_each_pipe(i915, pipe) {
11749                 ret = intel_crtc_init(i915, pipe);
11750                 if (ret) {
11751                         intel_mode_config_cleanup(i915);
11752                         return ret;
11753                 }
11754         }
11755
11756         intel_plane_possible_crtcs_init(i915);
11757         intel_shared_dpll_init(dev);
11758         intel_fdi_pll_freq_update(i915);
11759
11760         intel_update_czclk(i915);
11761         intel_modeset_init_hw(i915);
11762         intel_dpll_update_ref_clks(i915);
11763
11764         intel_hdcp_component_init(i915);
11765
11766         if (i915->max_cdclk_freq == 0)
11767                 intel_update_max_cdclk(i915);
11768
11769         /*
11770          * If the platform has HTI, we need to find out whether it has reserved
11771          * any display resources before we create our display outputs.
11772          */
11773         if (INTEL_INFO(i915)->display.has_hti)
11774                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11775
11776         /* Just disable it once at startup */
11777         intel_vga_disable(i915);
11778         intel_setup_outputs(i915);
11779
11780         drm_modeset_lock_all(dev);
11781         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11782         intel_acpi_assign_connector_fwnodes(i915);
11783         drm_modeset_unlock_all(dev);
11784
11785         for_each_intel_crtc(dev, crtc) {
11786                 struct intel_initial_plane_config plane_config = {};
11787
11788                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11789                         continue;
11790
11791                 /*
11792                  * Note that reserving the BIOS fb up front prevents us
11793                  * from stuffing other stolen allocations like the ring
11794                  * on top.  This prevents some ugliness at boot time, and
11795                  * can even allow for smooth boot transitions if the BIOS
11796                  * fb is large enough for the active pipe configuration.
11797                  */
11798                 i915->display->get_initial_plane_config(crtc, &plane_config);
11799
11800                 /*
11801                  * If the fb is shared between multiple heads, we'll
11802                  * just get the first one.
11803                  */
11804                 intel_find_initial_plane_obj(crtc, &plane_config);
11805
11806                 plane_config_fini(&plane_config);
11807         }
11808
11809         /*
11810          * Make sure hardware watermarks really match the state we read out.
11811          * Note that we need to do this after reconstructing the BIOS fb's
11812          * since the watermark calculation done here will use pstate->fb.
11813          */
11814         if (!HAS_GMCH(i915))
11815                 sanitize_watermarks(i915);
11816
11817         return 0;
11818 }
11819
11820 /* part #3: call after gem init */
11821 int intel_modeset_init(struct drm_i915_private *i915)
11822 {
11823         int ret;
11824
11825         if (!HAS_DISPLAY(i915))
11826                 return 0;
11827
11828         /*
11829          * Force all active planes to recompute their states. So that on
11830          * mode_setcrtc after probe, all the intel_plane_state variables
11831          * are already calculated and there is no assert_plane warnings
11832          * during bootup.
11833          */
11834         ret = intel_initial_commit(&i915->drm);
11835         if (ret)
11836                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11837
11838         intel_overlay_setup(i915);
11839
11840         ret = intel_fbdev_init(&i915->drm);
11841         if (ret)
11842                 return ret;
11843
11844         /* Only enable hotplug handling once the fbdev is fully set up. */
11845         intel_hpd_init(i915);
11846         intel_hpd_poll_disable(i915);
11847
11848         intel_init_ipc(i915);
11849
11850         return 0;
11851 }
11852
11853 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11854 {
11855         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11856         /* 640x480@60Hz, ~25175 kHz */
11857         struct dpll clock = {
11858                 .m1 = 18,
11859                 .m2 = 7,
11860                 .p1 = 13,
11861                 .p2 = 4,
11862                 .n = 2,
11863         };
11864         u32 dpll, fp;
11865         int i;
11866
11867         drm_WARN_ON(&dev_priv->drm,
11868                     i9xx_calc_dpll_params(48000, &clock) != 25154);
11869
11870         drm_dbg_kms(&dev_priv->drm,
11871                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11872                     pipe_name(pipe), clock.vco, clock.dot);
11873
11874         fp = i9xx_dpll_compute_fp(&clock);
11875         dpll = DPLL_DVO_2X_MODE |
11876                 DPLL_VGA_MODE_DIS |
11877                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11878                 PLL_P2_DIVIDE_BY_4 |
11879                 PLL_REF_INPUT_DREFCLK |
11880                 DPLL_VCO_ENABLE;
11881
11882         intel_de_write(dev_priv, FP0(pipe), fp);
11883         intel_de_write(dev_priv, FP1(pipe), fp);
11884
11885         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
11886         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
11887         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
11888         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
11889         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
11890         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
11891         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
11892
11893         /*
11894          * Apparently we need to have VGA mode enabled prior to changing
11895          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
11896          * dividers, even though the register value does change.
11897          */
11898         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
11899         intel_de_write(dev_priv, DPLL(pipe), dpll);
11900
11901         /* Wait for the clocks to stabilize. */
11902         intel_de_posting_read(dev_priv, DPLL(pipe));
11903         udelay(150);
11904
11905         /* The pixel multiplier can only be updated once the
11906          * DPLL is enabled and the clocks are stable.
11907          *
11908          * So write it again.
11909          */
11910         intel_de_write(dev_priv, DPLL(pipe), dpll);
11911
11912         /* We do this three times for luck */
11913         for (i = 0; i < 3 ; i++) {
11914                 intel_de_write(dev_priv, DPLL(pipe), dpll);
11915                 intel_de_posting_read(dev_priv, DPLL(pipe));
11916                 udelay(150); /* wait for warmup */
11917         }
11918
11919         intel_de_write(dev_priv, PIPECONF(pipe),
11920                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
11921         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11922
11923         intel_wait_for_pipe_scanline_moving(crtc);
11924 }
11925
11926 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11927 {
11928         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11929
11930         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
11931                     pipe_name(pipe));
11932
11933         drm_WARN_ON(&dev_priv->drm,
11934                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
11935                     DISPLAY_PLANE_ENABLE);
11936         drm_WARN_ON(&dev_priv->drm,
11937                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
11938                     DISPLAY_PLANE_ENABLE);
11939         drm_WARN_ON(&dev_priv->drm,
11940                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
11941                     DISPLAY_PLANE_ENABLE);
11942         drm_WARN_ON(&dev_priv->drm,
11943                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
11944         drm_WARN_ON(&dev_priv->drm,
11945                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
11946
11947         intel_de_write(dev_priv, PIPECONF(pipe), 0);
11948         intel_de_posting_read(dev_priv, PIPECONF(pipe));
11949
11950         intel_wait_for_pipe_scanline_stopped(crtc);
11951
11952         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
11953         intel_de_posting_read(dev_priv, DPLL(pipe));
11954 }
11955
11956 static void
11957 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
11958 {
11959         struct intel_crtc *crtc;
11960
11961         if (DISPLAY_VER(dev_priv) >= 4)
11962                 return;
11963
11964         for_each_intel_crtc(&dev_priv->drm, crtc) {
11965                 struct intel_plane *plane =
11966                         to_intel_plane(crtc->base.primary);
11967                 struct intel_crtc *plane_crtc;
11968                 enum pipe pipe;
11969
11970                 if (!plane->get_hw_state(plane, &pipe))
11971                         continue;
11972
11973                 if (pipe == crtc->pipe)
11974                         continue;
11975
11976                 drm_dbg_kms(&dev_priv->drm,
11977                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
11978                             plane->base.base.id, plane->base.name);
11979
11980                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11981                 intel_plane_disable_noatomic(plane_crtc, plane);
11982         }
11983 }
11984
11985 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
11986 {
11987         struct drm_device *dev = crtc->base.dev;
11988         struct intel_encoder *encoder;
11989
11990         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
11991                 return true;
11992
11993         return false;
11994 }
11995
11996 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
11997 {
11998         struct drm_device *dev = encoder->base.dev;
11999         struct intel_connector *connector;
12000
12001         for_each_connector_on_encoder(dev, &encoder->base, connector)
12002                 return connector;
12003
12004         return NULL;
12005 }
12006
12007 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12008                               enum pipe pch_transcoder)
12009 {
12010         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12011                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12012 }
12013
12014 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12015 {
12016         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12017         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12018         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12019
12020         if (DISPLAY_VER(dev_priv) >= 9 ||
12021             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12022                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12023                 u32 val;
12024
12025                 if (transcoder_is_dsi(cpu_transcoder))
12026                         return;
12027
12028                 val = intel_de_read(dev_priv, reg);
12029                 val &= ~HSW_FRAME_START_DELAY_MASK;
12030                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12031                 intel_de_write(dev_priv, reg, val);
12032         } else {
12033                 i915_reg_t reg = PIPECONF(cpu_transcoder);
12034                 u32 val;
12035
12036                 val = intel_de_read(dev_priv, reg);
12037                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12038                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12039                 intel_de_write(dev_priv, reg, val);
12040         }
12041
12042         if (!crtc_state->has_pch_encoder)
12043                 return;
12044
12045         if (HAS_PCH_IBX(dev_priv)) {
12046                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12047                 u32 val;
12048
12049                 val = intel_de_read(dev_priv, reg);
12050                 val &= ~TRANS_FRAME_START_DELAY_MASK;
12051                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12052                 intel_de_write(dev_priv, reg, val);
12053         } else {
12054                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12055                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12056                 u32 val;
12057
12058                 val = intel_de_read(dev_priv, reg);
12059                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12060                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12061                 intel_de_write(dev_priv, reg, val);
12062         }
12063 }
12064
12065 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12066                                 struct drm_modeset_acquire_ctx *ctx)
12067 {
12068         struct drm_device *dev = crtc->base.dev;
12069         struct drm_i915_private *dev_priv = to_i915(dev);
12070         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12071
12072         if (crtc_state->hw.active) {
12073                 struct intel_plane *plane;
12074
12075                 /* Clear any frame start delays used for debugging left by the BIOS */
12076                 intel_sanitize_frame_start_delay(crtc_state);
12077
12078                 /* Disable everything but the primary plane */
12079                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
12080                         const struct intel_plane_state *plane_state =
12081                                 to_intel_plane_state(plane->base.state);
12082
12083                         if (plane_state->uapi.visible &&
12084                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12085                                 intel_plane_disable_noatomic(crtc, plane);
12086                 }
12087
12088                 /*
12089                  * Disable any background color set by the BIOS, but enable the
12090                  * gamma and CSC to match how we program our planes.
12091                  */
12092                 if (DISPLAY_VER(dev_priv) >= 9)
12093                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12094                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12095         }
12096
12097         /* Adjust the state of the output pipe according to whether we
12098          * have active connectors/encoders. */
12099         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12100             !crtc_state->bigjoiner_slave)
12101                 intel_crtc_disable_noatomic(crtc, ctx);
12102
12103         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12104                 /*
12105                  * We start out with underrun reporting disabled to avoid races.
12106                  * For correct bookkeeping mark this on active crtcs.
12107                  *
12108                  * Also on gmch platforms we dont have any hardware bits to
12109                  * disable the underrun reporting. Which means we need to start
12110                  * out with underrun reporting disabled also on inactive pipes,
12111                  * since otherwise we'll complain about the garbage we read when
12112                  * e.g. coming up after runtime pm.
12113                  *
12114                  * No protection against concurrent access is required - at
12115                  * worst a fifo underrun happens which also sets this to false.
12116                  */
12117                 crtc->cpu_fifo_underrun_disabled = true;
12118                 /*
12119                  * We track the PCH trancoder underrun reporting state
12120                  * within the crtc. With crtc for pipe A housing the underrun
12121                  * reporting state for PCH transcoder A, crtc for pipe B housing
12122                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12123                  * and marking underrun reporting as disabled for the non-existing
12124                  * PCH transcoders B and C would prevent enabling the south
12125                  * error interrupt (see cpt_can_enable_serr_int()).
12126                  */
12127                 if (has_pch_trancoder(dev_priv, crtc->pipe))
12128                         crtc->pch_fifo_underrun_disabled = true;
12129         }
12130 }
12131
12132 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12133 {
12134         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12135
12136         /*
12137          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12138          * the hardware when a high res displays plugged in. DPLL P
12139          * divider is zero, and the pipe timings are bonkers. We'll
12140          * try to disable everything in that case.
12141          *
12142          * FIXME would be nice to be able to sanitize this state
12143          * without several WARNs, but for now let's take the easy
12144          * road.
12145          */
12146         return IS_SANDYBRIDGE(dev_priv) &&
12147                 crtc_state->hw.active &&
12148                 crtc_state->shared_dpll &&
12149                 crtc_state->port_clock == 0;
12150 }
12151
12152 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12153 {
12154         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12155         struct intel_connector *connector;
12156         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12157         struct intel_crtc_state *crtc_state = crtc ?
12158                 to_intel_crtc_state(crtc->base.state) : NULL;
12159
12160         /* We need to check both for a crtc link (meaning that the
12161          * encoder is active and trying to read from a pipe) and the
12162          * pipe itself being active. */
12163         bool has_active_crtc = crtc_state &&
12164                 crtc_state->hw.active;
12165
12166         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12167                 drm_dbg_kms(&dev_priv->drm,
12168                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12169                             pipe_name(crtc->pipe));
12170                 has_active_crtc = false;
12171         }
12172
12173         connector = intel_encoder_find_connector(encoder);
12174         if (connector && !has_active_crtc) {
12175                 drm_dbg_kms(&dev_priv->drm,
12176                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12177                             encoder->base.base.id,
12178                             encoder->base.name);
12179
12180                 /* Connector is active, but has no active pipe. This is
12181                  * fallout from our resume register restoring. Disable
12182                  * the encoder manually again. */
12183                 if (crtc_state) {
12184                         struct drm_encoder *best_encoder;
12185
12186                         drm_dbg_kms(&dev_priv->drm,
12187                                     "[ENCODER:%d:%s] manually disabled\n",
12188                                     encoder->base.base.id,
12189                                     encoder->base.name);
12190
12191                         /* avoid oopsing in case the hooks consult best_encoder */
12192                         best_encoder = connector->base.state->best_encoder;
12193                         connector->base.state->best_encoder = &encoder->base;
12194
12195                         /* FIXME NULL atomic state passed! */
12196                         if (encoder->disable)
12197                                 encoder->disable(NULL, encoder, crtc_state,
12198                                                  connector->base.state);
12199                         if (encoder->post_disable)
12200                                 encoder->post_disable(NULL, encoder, crtc_state,
12201                                                       connector->base.state);
12202
12203                         connector->base.state->best_encoder = best_encoder;
12204                 }
12205                 encoder->base.crtc = NULL;
12206
12207                 /* Inconsistent output/port/pipe state happens presumably due to
12208                  * a bug in one of the get_hw_state functions. Or someplace else
12209                  * in our code, like the register restore mess on resume. Clamp
12210                  * things to off as a safer default. */
12211
12212                 connector->base.dpms = DRM_MODE_DPMS_OFF;
12213                 connector->base.encoder = NULL;
12214         }
12215
12216         /* notify opregion of the sanitized encoder state */
12217         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12218
12219         if (HAS_DDI(dev_priv))
12220                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
12221 }
12222
12223 /* FIXME read out full plane state for all planes */
12224 static void readout_plane_state(struct drm_i915_private *dev_priv)
12225 {
12226         struct intel_plane *plane;
12227         struct intel_crtc *crtc;
12228
12229         for_each_intel_plane(&dev_priv->drm, plane) {
12230                 struct intel_plane_state *plane_state =
12231                         to_intel_plane_state(plane->base.state);
12232                 struct intel_crtc_state *crtc_state;
12233                 enum pipe pipe = PIPE_A;
12234                 bool visible;
12235
12236                 visible = plane->get_hw_state(plane, &pipe);
12237
12238                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12239                 crtc_state = to_intel_crtc_state(crtc->base.state);
12240
12241                 intel_set_plane_visible(crtc_state, plane_state, visible);
12242
12243                 drm_dbg_kms(&dev_priv->drm,
12244                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12245                             plane->base.base.id, plane->base.name,
12246                             enableddisabled(visible), pipe_name(pipe));
12247         }
12248
12249         for_each_intel_crtc(&dev_priv->drm, crtc) {
12250                 struct intel_crtc_state *crtc_state =
12251                         to_intel_crtc_state(crtc->base.state);
12252
12253                 fixup_plane_bitmasks(crtc_state);
12254         }
12255 }
12256
12257 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12258 {
12259         struct drm_i915_private *dev_priv = to_i915(dev);
12260         struct intel_cdclk_state *cdclk_state =
12261                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12262         struct intel_dbuf_state *dbuf_state =
12263                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12264         enum pipe pipe;
12265         struct intel_crtc *crtc;
12266         struct intel_encoder *encoder;
12267         struct intel_connector *connector;
12268         struct drm_connector_list_iter conn_iter;
12269         u8 active_pipes = 0;
12270
12271         for_each_intel_crtc(dev, crtc) {
12272                 struct intel_crtc_state *crtc_state =
12273                         to_intel_crtc_state(crtc->base.state);
12274
12275                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12276                 intel_crtc_free_hw_state(crtc_state);
12277                 intel_crtc_state_reset(crtc_state, crtc);
12278
12279                 intel_crtc_get_pipe_config(crtc_state);
12280
12281                 crtc_state->hw.enable = crtc_state->hw.active;
12282
12283                 crtc->base.enabled = crtc_state->hw.enable;
12284                 crtc->active = crtc_state->hw.active;
12285
12286                 if (crtc_state->hw.active)
12287                         active_pipes |= BIT(crtc->pipe);
12288
12289                 drm_dbg_kms(&dev_priv->drm,
12290                             "[CRTC:%d:%s] hw state readout: %s\n",
12291                             crtc->base.base.id, crtc->base.name,
12292                             enableddisabled(crtc_state->hw.active));
12293         }
12294
12295         cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
12296
12297         readout_plane_state(dev_priv);
12298
12299         for_each_intel_encoder(dev, encoder) {
12300                 struct intel_crtc_state *crtc_state = NULL;
12301
12302                 pipe = 0;
12303
12304                 if (encoder->get_hw_state(encoder, &pipe)) {
12305                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12306                         crtc_state = to_intel_crtc_state(crtc->base.state);
12307
12308                         encoder->base.crtc = &crtc->base;
12309                         intel_encoder_get_config(encoder, crtc_state);
12310
12311                         /* read out to slave crtc as well for bigjoiner */
12312                         if (crtc_state->bigjoiner) {
12313                                 /* encoder should read be linked to bigjoiner master */
12314                                 WARN_ON(crtc_state->bigjoiner_slave);
12315
12316                                 crtc = crtc_state->bigjoiner_linked_crtc;
12317                                 crtc_state = to_intel_crtc_state(crtc->base.state);
12318                                 intel_encoder_get_config(encoder, crtc_state);
12319                         }
12320                 } else {
12321                         encoder->base.crtc = NULL;
12322                 }
12323
12324                 if (encoder->sync_state)
12325                         encoder->sync_state(encoder, crtc_state);
12326
12327                 drm_dbg_kms(&dev_priv->drm,
12328                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12329                             encoder->base.base.id, encoder->base.name,
12330                             enableddisabled(encoder->base.crtc),
12331                             pipe_name(pipe));
12332         }
12333
12334         intel_dpll_readout_hw_state(dev_priv);
12335
12336         drm_connector_list_iter_begin(dev, &conn_iter);
12337         for_each_intel_connector_iter(connector, &conn_iter) {
12338                 if (connector->get_hw_state(connector)) {
12339                         struct intel_crtc_state *crtc_state;
12340                         struct intel_crtc *crtc;
12341
12342                         connector->base.dpms = DRM_MODE_DPMS_ON;
12343
12344                         encoder = intel_attached_encoder(connector);
12345                         connector->base.encoder = &encoder->base;
12346
12347                         crtc = to_intel_crtc(encoder->base.crtc);
12348                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12349
12350                         if (crtc_state && crtc_state->hw.active) {
12351                                 /*
12352                                  * This has to be done during hardware readout
12353                                  * because anything calling .crtc_disable may
12354                                  * rely on the connector_mask being accurate.
12355                                  */
12356                                 crtc_state->uapi.connector_mask |=
12357                                         drm_connector_mask(&connector->base);
12358                                 crtc_state->uapi.encoder_mask |=
12359                                         drm_encoder_mask(&encoder->base);
12360                         }
12361                 } else {
12362                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12363                         connector->base.encoder = NULL;
12364                 }
12365                 drm_dbg_kms(&dev_priv->drm,
12366                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
12367                             connector->base.base.id, connector->base.name,
12368                             enableddisabled(connector->base.encoder));
12369         }
12370         drm_connector_list_iter_end(&conn_iter);
12371
12372         for_each_intel_crtc(dev, crtc) {
12373                 struct intel_bw_state *bw_state =
12374                         to_intel_bw_state(dev_priv->bw_obj.state);
12375                 struct intel_crtc_state *crtc_state =
12376                         to_intel_crtc_state(crtc->base.state);
12377                 struct intel_plane *plane;
12378                 int min_cdclk = 0;
12379
12380                 if (crtc_state->bigjoiner_slave)
12381                         continue;
12382
12383                 if (crtc_state->hw.active) {
12384                         /*
12385                          * The initial mode needs to be set in order to keep
12386                          * the atomic core happy. It wants a valid mode if the
12387                          * crtc's enabled, so we do the above call.
12388                          *
12389                          * But we don't set all the derived state fully, hence
12390                          * set a flag to indicate that a full recalculation is
12391                          * needed on the next commit.
12392                          */
12393                         crtc_state->inherited = true;
12394
12395                         intel_crtc_update_active_timings(crtc_state);
12396
12397                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
12398                 }
12399
12400                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12401                         const struct intel_plane_state *plane_state =
12402                                 to_intel_plane_state(plane->base.state);
12403
12404                         /*
12405                          * FIXME don't have the fb yet, so can't
12406                          * use intel_plane_data_rate() :(
12407                          */
12408                         if (plane_state->uapi.visible)
12409                                 crtc_state->data_rate[plane->id] =
12410                                         4 * crtc_state->pixel_rate;
12411                         /*
12412                          * FIXME don't have the fb yet, so can't
12413                          * use plane->min_cdclk() :(
12414                          */
12415                         if (plane_state->uapi.visible && plane->min_cdclk) {
12416                                 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12417                                         crtc_state->min_cdclk[plane->id] =
12418                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12419                                 else
12420                                         crtc_state->min_cdclk[plane->id] =
12421                                                 crtc_state->pixel_rate;
12422                         }
12423                         drm_dbg_kms(&dev_priv->drm,
12424                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
12425                                     plane->base.base.id, plane->base.name,
12426                                     crtc_state->min_cdclk[plane->id]);
12427                 }
12428
12429                 if (crtc_state->hw.active) {
12430                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12431                         if (drm_WARN_ON(dev, min_cdclk < 0))
12432                                 min_cdclk = 0;
12433                 }
12434
12435                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12436                 cdclk_state->min_voltage_level[crtc->pipe] =
12437                         crtc_state->min_voltage_level;
12438
12439                 intel_bw_crtc_update(bw_state, crtc_state);
12440
12441                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
12442
12443                 /* discard our incomplete slave state, copy it from master */
12444                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
12445                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12446                         struct intel_crtc_state *slave_crtc_state =
12447                                 to_intel_crtc_state(slave->base.state);
12448
12449                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12450                         slave->base.mode = crtc->base.mode;
12451
12452                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12453                         cdclk_state->min_voltage_level[slave->pipe] =
12454                                 crtc_state->min_voltage_level;
12455
12456                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12457                                 const struct intel_plane_state *plane_state =
12458                                         to_intel_plane_state(plane->base.state);
12459
12460                                 /*
12461                                  * FIXME don't have the fb yet, so can't
12462                                  * use intel_plane_data_rate() :(
12463                                  */
12464                                 if (plane_state->uapi.visible)
12465                                         crtc_state->data_rate[plane->id] =
12466                                                 4 * crtc_state->pixel_rate;
12467                                 else
12468                                         crtc_state->data_rate[plane->id] = 0;
12469                         }
12470
12471                         intel_bw_crtc_update(bw_state, slave_crtc_state);
12472                         drm_calc_timestamping_constants(&slave->base,
12473                                                         &slave_crtc_state->hw.adjusted_mode);
12474                 }
12475         }
12476 }
12477
12478 static void
12479 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12480 {
12481         struct intel_encoder *encoder;
12482
12483         for_each_intel_encoder(&dev_priv->drm, encoder) {
12484                 struct intel_crtc_state *crtc_state;
12485
12486                 if (!encoder->get_power_domains)
12487                         continue;
12488
12489                 /*
12490                  * MST-primary and inactive encoders don't have a crtc state
12491                  * and neither of these require any power domain references.
12492                  */
12493                 if (!encoder->base.crtc)
12494                         continue;
12495
12496                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12497                 encoder->get_power_domains(encoder, crtc_state);
12498         }
12499 }
12500
12501 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12502 {
12503         /*
12504          * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
12505          * Also known as Wa_14010480278.
12506          */
12507         if (IS_DISPLAY_VER(dev_priv, 10, 12))
12508                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12509                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12510
12511         if (IS_HASWELL(dev_priv)) {
12512                 /*
12513                  * WaRsPkgCStateDisplayPMReq:hsw
12514                  * System hang if this isn't done before disabling all planes!
12515                  */
12516                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
12517                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12518         }
12519
12520         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12521                 /* Display WA #1142:kbl,cfl,cml */
12522                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12523                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12524                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12525                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12526                              KBL_ARB_FILL_SPARE_14);
12527         }
12528 }
12529
12530 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12531                                        enum port port, i915_reg_t hdmi_reg)
12532 {
12533         u32 val = intel_de_read(dev_priv, hdmi_reg);
12534
12535         if (val & SDVO_ENABLE ||
12536             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12537                 return;
12538
12539         drm_dbg_kms(&dev_priv->drm,
12540                     "Sanitizing transcoder select for HDMI %c\n",
12541                     port_name(port));
12542
12543         val &= ~SDVO_PIPE_SEL_MASK;
12544         val |= SDVO_PIPE_SEL(PIPE_A);
12545
12546         intel_de_write(dev_priv, hdmi_reg, val);
12547 }
12548
12549 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12550                                      enum port port, i915_reg_t dp_reg)
12551 {
12552         u32 val = intel_de_read(dev_priv, dp_reg);
12553
12554         if (val & DP_PORT_EN ||
12555             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12556                 return;
12557
12558         drm_dbg_kms(&dev_priv->drm,
12559                     "Sanitizing transcoder select for DP %c\n",
12560                     port_name(port));
12561
12562         val &= ~DP_PIPE_SEL_MASK;
12563         val |= DP_PIPE_SEL(PIPE_A);
12564
12565         intel_de_write(dev_priv, dp_reg, val);
12566 }
12567
12568 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12569 {
12570         /*
12571          * The BIOS may select transcoder B on some of the PCH
12572          * ports even it doesn't enable the port. This would trip
12573          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12574          * Sanitize the transcoder select bits to prevent that. We
12575          * assume that the BIOS never actually enabled the port,
12576          * because if it did we'd actually have to toggle the port
12577          * on and back off to make the transcoder A select stick
12578          * (see. intel_dp_link_down(), intel_disable_hdmi(),
12579          * intel_disable_sdvo()).
12580          */
12581         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12582         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12583         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12584
12585         /* PCH SDVOB multiplex with HDMIB */
12586         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12587         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12588         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12589 }
12590
12591 /* Scan out the current hw modeset state,
12592  * and sanitizes it to the current state
12593  */
12594 static void
12595 intel_modeset_setup_hw_state(struct drm_device *dev,
12596                              struct drm_modeset_acquire_ctx *ctx)
12597 {
12598         struct drm_i915_private *dev_priv = to_i915(dev);
12599         struct intel_encoder *encoder;
12600         struct intel_crtc *crtc;
12601         intel_wakeref_t wakeref;
12602
12603         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12604
12605         intel_early_display_was(dev_priv);
12606         intel_modeset_readout_hw_state(dev);
12607
12608         /* HW state is read out, now we need to sanitize this mess. */
12609         get_encoder_power_domains(dev_priv);
12610
12611         if (HAS_PCH_IBX(dev_priv))
12612                 ibx_sanitize_pch_ports(dev_priv);
12613
12614         /*
12615          * intel_sanitize_plane_mapping() may need to do vblank
12616          * waits, so we need vblank interrupts restored beforehand.
12617          */
12618         for_each_intel_crtc(&dev_priv->drm, crtc) {
12619                 struct intel_crtc_state *crtc_state =
12620                         to_intel_crtc_state(crtc->base.state);
12621
12622                 drm_crtc_vblank_reset(&crtc->base);
12623
12624                 if (crtc_state->hw.active)
12625                         intel_crtc_vblank_on(crtc_state);
12626         }
12627
12628         intel_sanitize_plane_mapping(dev_priv);
12629
12630         for_each_intel_encoder(dev, encoder)
12631                 intel_sanitize_encoder(encoder);
12632
12633         for_each_intel_crtc(&dev_priv->drm, crtc) {
12634                 struct intel_crtc_state *crtc_state =
12635                         to_intel_crtc_state(crtc->base.state);
12636
12637                 intel_sanitize_crtc(crtc, ctx);
12638                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12639         }
12640
12641         intel_modeset_update_connector_atomic_state(dev);
12642
12643         intel_dpll_sanitize_state(dev_priv);
12644
12645         if (IS_G4X(dev_priv)) {
12646                 g4x_wm_get_hw_state(dev_priv);
12647                 g4x_wm_sanitize(dev_priv);
12648         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12649                 vlv_wm_get_hw_state(dev_priv);
12650                 vlv_wm_sanitize(dev_priv);
12651         } else if (DISPLAY_VER(dev_priv) >= 9) {
12652                 skl_wm_get_hw_state(dev_priv);
12653         } else if (HAS_PCH_SPLIT(dev_priv)) {
12654                 ilk_wm_get_hw_state(dev_priv);
12655         }
12656
12657         for_each_intel_crtc(dev, crtc) {
12658                 struct intel_crtc_state *crtc_state =
12659                         to_intel_crtc_state(crtc->base.state);
12660                 u64 put_domains;
12661
12662                 put_domains = modeset_get_crtc_power_domains(crtc_state);
12663                 if (drm_WARN_ON(dev, put_domains))
12664                         modeset_put_crtc_power_domains(crtc, put_domains);
12665         }
12666
12667         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12668 }
12669
12670 void intel_display_resume(struct drm_device *dev)
12671 {
12672         struct drm_i915_private *dev_priv = to_i915(dev);
12673         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12674         struct drm_modeset_acquire_ctx ctx;
12675         int ret;
12676
12677         if (!HAS_DISPLAY(dev_priv))
12678                 return;
12679
12680         dev_priv->modeset_restore_state = NULL;
12681         if (state)
12682                 state->acquire_ctx = &ctx;
12683
12684         drm_modeset_acquire_init(&ctx, 0);
12685
12686         while (1) {
12687                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
12688                 if (ret != -EDEADLK)
12689                         break;
12690
12691                 drm_modeset_backoff(&ctx);
12692         }
12693
12694         if (!ret)
12695                 ret = __intel_display_resume(dev, state, &ctx);
12696
12697         intel_enable_ipc(dev_priv);
12698         drm_modeset_drop_locks(&ctx);
12699         drm_modeset_acquire_fini(&ctx);
12700
12701         if (ret)
12702                 drm_err(&dev_priv->drm,
12703                         "Restoring old state failed with %i\n", ret);
12704         if (state)
12705                 drm_atomic_state_put(state);
12706 }
12707
12708 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12709 {
12710         struct intel_connector *connector;
12711         struct drm_connector_list_iter conn_iter;
12712
12713         /* Kill all the work that may have been queued by hpd. */
12714         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12715         for_each_intel_connector_iter(connector, &conn_iter) {
12716                 if (connector->modeset_retry_work.func)
12717                         cancel_work_sync(&connector->modeset_retry_work);
12718                 if (connector->hdcp.shim) {
12719                         cancel_delayed_work_sync(&connector->hdcp.check_work);
12720                         cancel_work_sync(&connector->hdcp.prop_work);
12721                 }
12722         }
12723         drm_connector_list_iter_end(&conn_iter);
12724 }
12725
12726 /* part #1: call before irq uninstall */
12727 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12728 {
12729         if (!HAS_DISPLAY(i915))
12730                 return;
12731
12732         flush_workqueue(i915->flip_wq);
12733         flush_workqueue(i915->modeset_wq);
12734
12735         flush_work(&i915->atomic_helper.free_work);
12736         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12737 }
12738
12739 /* part #2: call after irq uninstall */
12740 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12741 {
12742         if (!HAS_DISPLAY(i915))
12743                 return;
12744
12745         /*
12746          * Due to the hpd irq storm handling the hotplug work can re-arm the
12747          * poll handlers. Hence disable polling after hpd handling is shut down.
12748          */
12749         intel_hpd_poll_fini(i915);
12750
12751         /*
12752          * MST topology needs to be suspended so we don't have any calls to
12753          * fbdev after it's finalized. MST will be destroyed later as part of
12754          * drm_mode_config_cleanup()
12755          */
12756         intel_dp_mst_suspend(i915);
12757
12758         /* poll work can call into fbdev, hence clean that up afterwards */
12759         intel_fbdev_fini(i915);
12760
12761         intel_unregister_dsm_handler();
12762
12763         intel_fbc_global_disable(i915);
12764
12765         /* flush any delayed tasks or pending work */
12766         flush_scheduled_work();
12767
12768         intel_hdcp_component_fini(i915);
12769
12770         intel_mode_config_cleanup(i915);
12771
12772         intel_overlay_cleanup(i915);
12773
12774         intel_gmbus_teardown(i915);
12775
12776         destroy_workqueue(i915->flip_wq);
12777         destroy_workqueue(i915->modeset_wq);
12778
12779         intel_fbc_cleanup_cfb(i915);
12780 }
12781
12782 /* part #3: call after gem init */
12783 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12784 {
12785         intel_dmc_ucode_fini(i915);
12786
12787         intel_power_domains_driver_remove(i915);
12788
12789         intel_vga_unregister(i915);
12790
12791         intel_bios_driver_remove(i915);
12792 }
12793
12794 void intel_display_driver_register(struct drm_i915_private *i915)
12795 {
12796         if (!HAS_DISPLAY(i915))
12797                 return;
12798
12799         intel_display_debugfs_register(i915);
12800
12801         /* Must be done after probing outputs */
12802         intel_opregion_register(i915);
12803         acpi_video_register();
12804
12805         intel_audio_init(i915);
12806
12807         /*
12808          * Some ports require correctly set-up hpd registers for
12809          * detection to work properly (leading to ghost connected
12810          * connector status), e.g. VGA on gm45.  Hence we can only set
12811          * up the initial fbdev config after hpd irqs are fully
12812          * enabled. We do it last so that the async config cannot run
12813          * before the connectors are registered.
12814          */
12815         intel_fbdev_initial_config_async(&i915->drm);
12816
12817         /*
12818          * We need to coordinate the hotplugs with the asynchronous
12819          * fbdev configuration, for which we use the
12820          * fbdev->async_cookie.
12821          */
12822         drm_kms_helper_poll_init(&i915->drm);
12823 }
12824
12825 void intel_display_driver_unregister(struct drm_i915_private *i915)
12826 {
12827         if (!HAS_DISPLAY(i915))
12828                 return;
12829
12830         intel_fbdev_unregister(i915);
12831         intel_audio_deinit(i915);
12832
12833         /*
12834          * After flushing the fbdev (incl. a late async config which
12835          * will have delayed queuing of a hotplug event), then flush
12836          * the hotplug events.
12837          */
12838         drm_kms_helper_poll_fini(&i915->drm);
12839         drm_atomic_helper_shutdown(&i915->drm);
12840
12841         acpi_video_unregister();
12842         intel_opregion_unregister(i915);
12843 }