drm/i915/display: remove intel_display_commit_duplicated_state()
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dma-resv.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/string_helpers.h>
34 #include <linux/vga_switcheroo.h>
35 #include <acpi/video.h>
36
37 #include <drm/display/drm_dp_helper.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_uapi.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_privacy_screen_consumer.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_rect.h>
47
48 #include "gem/i915_gem_lmem.h"
49 #include "gem/i915_gem_object.h"
50
51 #include "g4x_dp.h"
52 #include "g4x_hdmi.h"
53 #include "hsw_ips.h"
54 #include "i915_drv.h"
55 #include "i915_reg.h"
56 #include "i915_utils.h"
57 #include "i9xx_plane.h"
58 #include "i9xx_wm.h"
59 #include "icl_dsi.h"
60 #include "intel_acpi.h"
61 #include "intel_atomic.h"
62 #include "intel_atomic_plane.h"
63 #include "intel_audio.h"
64 #include "intel_bw.h"
65 #include "intel_cdclk.h"
66 #include "intel_clock_gating.h"
67 #include "intel_color.h"
68 #include "intel_crt.h"
69 #include "intel_crtc.h"
70 #include "intel_crtc_state_dump.h"
71 #include "intel_ddi.h"
72 #include "intel_de.h"
73 #include "intel_display_debugfs.h"
74 #include "intel_display_power.h"
75 #include "intel_display_types.h"
76 #include "intel_dmc.h"
77 #include "intel_dp.h"
78 #include "intel_dp_link_training.h"
79 #include "intel_dp_mst.h"
80 #include "intel_dpio_phy.h"
81 #include "intel_dpll.h"
82 #include "intel_dpll_mgr.h"
83 #include "intel_dpt.h"
84 #include "intel_drrs.h"
85 #include "intel_dsi.h"
86 #include "intel_dvo.h"
87 #include "intel_fb.h"
88 #include "intel_fbc.h"
89 #include "intel_fbdev.h"
90 #include "intel_fdi.h"
91 #include "intel_fifo_underrun.h"
92 #include "intel_frontbuffer.h"
93 #include "intel_gmbus.h"
94 #include "intel_hdcp.h"
95 #include "intel_hdmi.h"
96 #include "intel_hotplug.h"
97 #include "intel_hti.h"
98 #include "intel_lvds.h"
99 #include "intel_lvds_regs.h"
100 #include "intel_modeset_setup.h"
101 #include "intel_modeset_verify.h"
102 #include "intel_overlay.h"
103 #include "intel_panel.h"
104 #include "intel_pch_display.h"
105 #include "intel_pch_refclk.h"
106 #include "intel_pcode.h"
107 #include "intel_pipe_crc.h"
108 #include "intel_plane_initial.h"
109 #include "intel_pps.h"
110 #include "intel_psr.h"
111 #include "intel_quirks.h"
112 #include "intel_sdvo.h"
113 #include "intel_snps_phy.h"
114 #include "intel_tc.h"
115 #include "intel_tv.h"
116 #include "intel_vblank.h"
117 #include "intel_vdsc.h"
118 #include "intel_vdsc_regs.h"
119 #include "intel_vga.h"
120 #include "intel_vrr.h"
121 #include "intel_wm.h"
122 #include "skl_scaler.h"
123 #include "skl_universal_plane.h"
124 #include "skl_watermark.h"
125 #include "vlv_dsi.h"
126 #include "vlv_dsi_pll.h"
127 #include "vlv_dsi_regs.h"
128 #include "vlv_sideband.h"
129
130 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
131 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
132 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
133 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
134 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
135
136 /* returns HPLL frequency in kHz */
137 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
138 {
139         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140
141         /* Obtain SKU information */
142         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
143                 CCK_FUSE_HPLL_FREQ_MASK;
144
145         return vco_freq[hpll_freq] * 1000;
146 }
147
148 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
149                       const char *name, u32 reg, int ref_freq)
150 {
151         u32 val;
152         int divider;
153
154         val = vlv_cck_read(dev_priv, reg);
155         divider = val & CCK_FREQUENCY_VALUES;
156
157         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
158                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
159                  "%s change in progress\n", name);
160
161         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
162 }
163
164 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
165                            const char *name, u32 reg)
166 {
167         int hpll;
168
169         vlv_cck_get(dev_priv);
170
171         if (dev_priv->hpll_freq == 0)
172                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
173
174         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
175
176         vlv_cck_put(dev_priv);
177
178         return hpll;
179 }
180
181 static void intel_update_czclk(struct drm_i915_private *dev_priv)
182 {
183         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
184                 return;
185
186         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
187                                                       CCK_CZ_CLOCK_CONTROL);
188
189         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
190                 dev_priv->czclk_freq);
191 }
192
193 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
194 {
195         return (crtc_state->active_planes &
196                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
197 }
198
199 /* WA Display #0827: Gen9:all */
200 static void
201 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
202 {
203         if (enable)
204                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
205                              0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
206         else
207                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
208                              DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
209 }
210
211 /* Wa_2006604312:icl,ehl */
212 static void
213 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
214                        bool enable)
215 {
216         if (enable)
217                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
218         else
219                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
220 }
221
222 /* Wa_1604331009:icl,jsl,ehl */
223 static void
224 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
225                        bool enable)
226 {
227         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
228                      enable ? CURSOR_GATING_DIS : 0);
229 }
230
231 static bool
232 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
233 {
234         return crtc_state->master_transcoder != INVALID_TRANSCODER;
235 }
236
237 static bool
238 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
239 {
240         return crtc_state->sync_mode_slaves_mask != 0;
241 }
242
243 bool
244 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
245 {
246         return is_trans_port_sync_master(crtc_state) ||
247                 is_trans_port_sync_slave(crtc_state);
248 }
249
250 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
251 {
252         return ffs(crtc_state->bigjoiner_pipes) - 1;
253 }
254
255 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
256 {
257         if (crtc_state->bigjoiner_pipes)
258                 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
259         else
260                 return 0;
261 }
262
263 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
264 {
265         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
266
267         return crtc_state->bigjoiner_pipes &&
268                 crtc->pipe != bigjoiner_master_pipe(crtc_state);
269 }
270
271 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
272 {
273         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
274
275         return crtc_state->bigjoiner_pipes &&
276                 crtc->pipe == bigjoiner_master_pipe(crtc_state);
277 }
278
279 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
280 {
281         return hweight8(crtc_state->bigjoiner_pipes);
282 }
283
284 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
285 {
286         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
287
288         if (intel_crtc_is_bigjoiner_slave(crtc_state))
289                 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
290         else
291                 return to_intel_crtc(crtc_state->uapi.crtc);
292 }
293
294 static void
295 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
296 {
297         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
298         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
299
300         if (DISPLAY_VER(dev_priv) >= 4) {
301                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
302
303                 /* Wait for the Pipe State to go off */
304                 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
305                                             TRANSCONF_STATE_ENABLE, 100))
306                         drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
307         } else {
308                 intel_wait_for_pipe_scanline_stopped(crtc);
309         }
310 }
311
312 void assert_transcoder(struct drm_i915_private *dev_priv,
313                        enum transcoder cpu_transcoder, bool state)
314 {
315         bool cur_state;
316         enum intel_display_power_domain power_domain;
317         intel_wakeref_t wakeref;
318
319         /* we keep both pipes enabled on 830 */
320         if (IS_I830(dev_priv))
321                 state = true;
322
323         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
324         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
325         if (wakeref) {
326                 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
327                 cur_state = !!(val & TRANSCONF_ENABLE);
328
329                 intel_display_power_put(dev_priv, power_domain, wakeref);
330         } else {
331                 cur_state = false;
332         }
333
334         I915_STATE_WARN(cur_state != state,
335                         "transcoder %s assertion failure (expected %s, current %s)\n",
336                         transcoder_name(cpu_transcoder),
337                         str_on_off(state), str_on_off(cur_state));
338 }
339
340 static void assert_plane(struct intel_plane *plane, bool state)
341 {
342         enum pipe pipe;
343         bool cur_state;
344
345         cur_state = plane->get_hw_state(plane, &pipe);
346
347         I915_STATE_WARN(cur_state != state,
348                         "%s assertion failure (expected %s, current %s)\n",
349                         plane->base.name, str_on_off(state),
350                         str_on_off(cur_state));
351 }
352
353 #define assert_plane_enabled(p) assert_plane(p, true)
354 #define assert_plane_disabled(p) assert_plane(p, false)
355
356 static void assert_planes_disabled(struct intel_crtc *crtc)
357 {
358         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
359         struct intel_plane *plane;
360
361         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
362                 assert_plane_disabled(plane);
363 }
364
365 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
366                          struct intel_digital_port *dig_port,
367                          unsigned int expected_mask)
368 {
369         u32 port_mask;
370         i915_reg_t dpll_reg;
371
372         switch (dig_port->base.port) {
373         default:
374                 MISSING_CASE(dig_port->base.port);
375                 fallthrough;
376         case PORT_B:
377                 port_mask = DPLL_PORTB_READY_MASK;
378                 dpll_reg = DPLL(0);
379                 break;
380         case PORT_C:
381                 port_mask = DPLL_PORTC_READY_MASK;
382                 dpll_reg = DPLL(0);
383                 expected_mask <<= 4;
384                 break;
385         case PORT_D:
386                 port_mask = DPLL_PORTD_READY_MASK;
387                 dpll_reg = DPIO_PHY_STATUS;
388                 break;
389         }
390
391         if (intel_de_wait_for_register(dev_priv, dpll_reg,
392                                        port_mask, expected_mask, 1000))
393                 drm_WARN(&dev_priv->drm, 1,
394                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
395                          dig_port->base.base.base.id, dig_port->base.base.name,
396                          intel_de_read(dev_priv, dpll_reg) & port_mask,
397                          expected_mask);
398 }
399
400 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
401 {
402         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
404         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
405         enum pipe pipe = crtc->pipe;
406         i915_reg_t reg;
407         u32 val;
408
409         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
410
411         assert_planes_disabled(crtc);
412
413         /*
414          * A pipe without a PLL won't actually be able to drive bits from
415          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
416          * need the check.
417          */
418         if (HAS_GMCH(dev_priv)) {
419                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
420                         assert_dsi_pll_enabled(dev_priv);
421                 else
422                         assert_pll_enabled(dev_priv, pipe);
423         } else {
424                 if (new_crtc_state->has_pch_encoder) {
425                         /* if driving the PCH, we need FDI enabled */
426                         assert_fdi_rx_pll_enabled(dev_priv,
427                                                   intel_crtc_pch_transcoder(crtc));
428                         assert_fdi_tx_pll_enabled(dev_priv,
429                                                   (enum pipe) cpu_transcoder);
430                 }
431                 /* FIXME: assert CPU port conditions for SNB+ */
432         }
433
434         /* Wa_22012358565:adl-p */
435         if (DISPLAY_VER(dev_priv) == 13)
436                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
437                              0, PIPE_ARB_USE_PROG_SLOTS);
438
439         reg = TRANSCONF(cpu_transcoder);
440         val = intel_de_read(dev_priv, reg);
441         if (val & TRANSCONF_ENABLE) {
442                 /* we keep both pipes enabled on 830 */
443                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
444                 return;
445         }
446
447         intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
448         intel_de_posting_read(dev_priv, reg);
449
450         /*
451          * Until the pipe starts PIPEDSL reads will return a stale value,
452          * which causes an apparent vblank timestamp jump when PIPEDSL
453          * resets to its proper value. That also messes up the frame count
454          * when it's derived from the timestamps. So let's wait for the
455          * pipe to start properly before we call drm_crtc_vblank_on()
456          */
457         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
458                 intel_wait_for_pipe_scanline_moving(crtc);
459 }
460
461 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
462 {
463         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
464         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
465         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
466         enum pipe pipe = crtc->pipe;
467         i915_reg_t reg;
468         u32 val;
469
470         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
471
472         /*
473          * Make sure planes won't keep trying to pump pixels to us,
474          * or we might hang the display.
475          */
476         assert_planes_disabled(crtc);
477
478         reg = TRANSCONF(cpu_transcoder);
479         val = intel_de_read(dev_priv, reg);
480         if ((val & TRANSCONF_ENABLE) == 0)
481                 return;
482
483         /*
484          * Double wide has implications for planes
485          * so best keep it disabled when not needed.
486          */
487         if (old_crtc_state->double_wide)
488                 val &= ~TRANSCONF_DOUBLE_WIDE;
489
490         /* Don't disable pipe or pipe PLLs if needed */
491         if (!IS_I830(dev_priv))
492                 val &= ~TRANSCONF_ENABLE;
493
494         if (DISPLAY_VER(dev_priv) >= 14)
495                 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
496                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
497         else if (DISPLAY_VER(dev_priv) >= 12)
498                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
499                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
500
501         intel_de_write(dev_priv, reg, val);
502         if ((val & TRANSCONF_ENABLE) == 0)
503                 intel_wait_for_pipe_off(old_crtc_state);
504 }
505
506 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
507 {
508         unsigned int size = 0;
509         int i;
510
511         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
512                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
513
514         return size;
515 }
516
517 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
518 {
519         unsigned int size = 0;
520         int i;
521
522         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
523                 unsigned int plane_size;
524
525                 if (rem_info->plane[i].linear)
526                         plane_size = rem_info->plane[i].size;
527                 else
528                         plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
529
530                 if (plane_size == 0)
531                         continue;
532
533                 if (rem_info->plane_alignment)
534                         size = ALIGN(size, rem_info->plane_alignment);
535
536                 size += plane_size;
537         }
538
539         return size;
540 }
541
542 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
543 {
544         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
545         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
546
547         return DISPLAY_VER(dev_priv) < 4 ||
548                 (plane->fbc &&
549                  plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
550 }
551
552 /*
553  * Convert the x/y offsets into a linear offset.
554  * Only valid with 0/180 degree rotation, which is fine since linear
555  * offset is only used with linear buffers on pre-hsw and tiled buffers
556  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
557  */
558 u32 intel_fb_xy_to_linear(int x, int y,
559                           const struct intel_plane_state *state,
560                           int color_plane)
561 {
562         const struct drm_framebuffer *fb = state->hw.fb;
563         unsigned int cpp = fb->format->cpp[color_plane];
564         unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
565
566         return y * pitch + x * cpp;
567 }
568
569 /*
570  * Add the x/y offsets derived from fb->offsets[] to the user
571  * specified plane src x/y offsets. The resulting x/y offsets
572  * specify the start of scanout from the beginning of the gtt mapping.
573  */
574 void intel_add_fb_offsets(int *x, int *y,
575                           const struct intel_plane_state *state,
576                           int color_plane)
577
578 {
579         *x += state->view.color_plane[color_plane].x;
580         *y += state->view.color_plane[color_plane].y;
581 }
582
583 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
584                               u32 pixel_format, u64 modifier)
585 {
586         struct intel_crtc *crtc;
587         struct intel_plane *plane;
588
589         if (!HAS_DISPLAY(dev_priv))
590                 return 0;
591
592         /*
593          * We assume the primary plane for pipe A has
594          * the highest stride limits of them all,
595          * if in case pipe A is disabled, use the first pipe from pipe_mask.
596          */
597         crtc = intel_first_crtc(dev_priv);
598         if (!crtc)
599                 return 0;
600
601         plane = to_intel_plane(crtc->base.primary);
602
603         return plane->max_stride(plane, pixel_format, modifier,
604                                  DRM_MODE_ROTATE_0);
605 }
606
607 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
608                              struct intel_plane_state *plane_state,
609                              bool visible)
610 {
611         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
612
613         plane_state->uapi.visible = visible;
614
615         if (visible)
616                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
617         else
618                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
619 }
620
621 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
622 {
623         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
624         struct drm_plane *plane;
625
626         /*
627          * Active_planes aliases if multiple "primary" or cursor planes
628          * have been used on the same (or wrong) pipe. plane_mask uses
629          * unique ids, hence we can use that to reconstruct active_planes.
630          */
631         crtc_state->enabled_planes = 0;
632         crtc_state->active_planes = 0;
633
634         drm_for_each_plane_mask(plane, &dev_priv->drm,
635                                 crtc_state->uapi.plane_mask) {
636                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
637                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
638         }
639 }
640
641 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
642                                   struct intel_plane *plane)
643 {
644         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
645         struct intel_crtc_state *crtc_state =
646                 to_intel_crtc_state(crtc->base.state);
647         struct intel_plane_state *plane_state =
648                 to_intel_plane_state(plane->base.state);
649
650         drm_dbg_kms(&dev_priv->drm,
651                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
652                     plane->base.base.id, plane->base.name,
653                     crtc->base.base.id, crtc->base.name);
654
655         intel_set_plane_visible(crtc_state, plane_state, false);
656         intel_plane_fixup_bitmasks(crtc_state);
657         crtc_state->data_rate[plane->id] = 0;
658         crtc_state->data_rate_y[plane->id] = 0;
659         crtc_state->rel_data_rate[plane->id] = 0;
660         crtc_state->rel_data_rate_y[plane->id] = 0;
661         crtc_state->min_cdclk[plane->id] = 0;
662
663         if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
664             hsw_ips_disable(crtc_state)) {
665                 crtc_state->ips_enabled = false;
666                 intel_crtc_wait_for_next_vblank(crtc);
667         }
668
669         /*
670          * Vblank time updates from the shadow to live plane control register
671          * are blocked if the memory self-refresh mode is active at that
672          * moment. So to make sure the plane gets truly disabled, disable
673          * first the self-refresh mode. The self-refresh enable bit in turn
674          * will be checked/applied by the HW only at the next frame start
675          * event which is after the vblank start event, so we need to have a
676          * wait-for-vblank between disabling the plane and the pipe.
677          */
678         if (HAS_GMCH(dev_priv) &&
679             intel_set_memory_cxsr(dev_priv, false))
680                 intel_crtc_wait_for_next_vblank(crtc);
681
682         /*
683          * Gen2 reports pipe underruns whenever all planes are disabled.
684          * So disable underrun reporting before all the planes get disabled.
685          */
686         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
687                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
688
689         intel_plane_disable_arm(plane, crtc_state);
690         intel_crtc_wait_for_next_vblank(crtc);
691 }
692
693 unsigned int
694 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
695 {
696         int x = 0, y = 0;
697
698         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
699                                           plane_state->view.color_plane[0].offset, 0);
700
701         return y;
702 }
703
704 static int
705 __intel_display_resume(struct drm_i915_private *i915,
706                        struct drm_atomic_state *state,
707                        struct drm_modeset_acquire_ctx *ctx)
708 {
709         struct drm_crtc_state *crtc_state;
710         struct drm_crtc *crtc;
711         int ret, i;
712
713         intel_modeset_setup_hw_state(i915, ctx);
714         intel_vga_redisable(i915);
715
716         if (!state)
717                 return 0;
718
719         /*
720          * We've duplicated the state, pointers to the old state are invalid.
721          *
722          * Don't attempt to use the old state until we commit the duplicated state.
723          */
724         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
725                 /*
726                  * Force recalculation even if we restore
727                  * current state. With fast modeset this may not result
728                  * in a modeset when the state is compatible.
729                  */
730                 crtc_state->mode_changed = true;
731         }
732
733         /* ignore any reset values/BIOS leftovers in the WM registers */
734         if (!HAS_GMCH(i915))
735                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
736
737         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
738
739         drm_WARN_ON(&i915->drm, ret == -EDEADLK);
740
741         return ret;
742 }
743
744 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
745 {
746         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
747                 intel_has_gpu_reset(to_gt(dev_priv)));
748 }
749
750 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
751 {
752         struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
753         struct drm_atomic_state *state;
754         int ret;
755
756         if (!HAS_DISPLAY(dev_priv))
757                 return;
758
759         /* reset doesn't touch the display */
760         if (!dev_priv->params.force_reset_modeset_test &&
761             !gpu_reset_clobbers_display(dev_priv))
762                 return;
763
764         /* We have a modeset vs reset deadlock, defensively unbreak it. */
765         set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
766         smp_mb__after_atomic();
767         wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
768
769         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
770                 drm_dbg_kms(&dev_priv->drm,
771                             "Modeset potentially stuck, unbreaking through wedging\n");
772                 intel_gt_set_wedged(to_gt(dev_priv));
773         }
774
775         /*
776          * Need mode_config.mutex so that we don't
777          * trample ongoing ->detect() and whatnot.
778          */
779         mutex_lock(&dev_priv->drm.mode_config.mutex);
780         drm_modeset_acquire_init(ctx, 0);
781         while (1) {
782                 ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
783                 if (ret != -EDEADLK)
784                         break;
785
786                 drm_modeset_backoff(ctx);
787         }
788         /*
789          * Disabling the crtcs gracefully seems nicer. Also the
790          * g33 docs say we should at least disable all the planes.
791          */
792         state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
793         if (IS_ERR(state)) {
794                 ret = PTR_ERR(state);
795                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
796                         ret);
797                 return;
798         }
799
800         ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
801         if (ret) {
802                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
803                         ret);
804                 drm_atomic_state_put(state);
805                 return;
806         }
807
808         dev_priv->display.restore.modeset_state = state;
809         state->acquire_ctx = ctx;
810 }
811
812 void intel_display_finish_reset(struct drm_i915_private *i915)
813 {
814         struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
815         struct drm_atomic_state *state;
816         int ret;
817
818         if (!HAS_DISPLAY(i915))
819                 return;
820
821         /* reset doesn't touch the display */
822         if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
823                 return;
824
825         state = fetch_and_zero(&i915->display.restore.modeset_state);
826         if (!state)
827                 goto unlock;
828
829         /* reset doesn't touch the display */
830         if (!gpu_reset_clobbers_display(i915)) {
831                 /* for testing only restore the display */
832                 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
833                 if (ret) {
834                         drm_WARN_ON(&i915->drm, ret == -EDEADLK);
835                         drm_err(&i915->drm,
836                                 "Restoring old state failed with %i\n", ret);
837                 }
838         } else {
839                 /*
840                  * The display has been reset as well,
841                  * so need a full re-initialization.
842                  */
843                 intel_pps_unlock_regs_wa(i915);
844                 intel_modeset_init_hw(i915);
845                 intel_clock_gating_init(i915);
846                 intel_hpd_init(i915);
847
848                 ret = __intel_display_resume(i915, state, ctx);
849                 if (ret)
850                         drm_err(&i915->drm,
851                                 "Restoring old state failed with %i\n", ret);
852
853                 intel_hpd_poll_disable(i915);
854         }
855
856         drm_atomic_state_put(state);
857 unlock:
858         drm_modeset_drop_locks(ctx);
859         drm_modeset_acquire_fini(ctx);
860         mutex_unlock(&i915->drm.mode_config.mutex);
861
862         clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
863 }
864
865 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
866 {
867         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
868         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
869         enum pipe pipe = crtc->pipe;
870         u32 tmp;
871
872         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
873
874         /*
875          * Display WA #1153: icl
876          * enable hardware to bypass the alpha math
877          * and rounding for per-pixel values 00 and 0xff
878          */
879         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
880         /*
881          * Display WA # 1605353570: icl
882          * Set the pixel rounding bit to 1 for allowing
883          * passthrough of Frame buffer pixels unmodified
884          * across pipe
885          */
886         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
887
888         /*
889          * Underrun recovery must always be disabled on display 13+.
890          * DG2 chicken bit meaning is inverted compared to other platforms.
891          */
892         if (IS_DG2(dev_priv))
893                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
894         else if (DISPLAY_VER(dev_priv) >= 13)
895                 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
896
897         /* Wa_14010547955:dg2 */
898         if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
899                 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
900
901         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
902 }
903
904 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
905 {
906         struct drm_crtc *crtc;
907         bool cleanup_done;
908
909         drm_for_each_crtc(crtc, &dev_priv->drm) {
910                 struct drm_crtc_commit *commit;
911                 spin_lock(&crtc->commit_lock);
912                 commit = list_first_entry_or_null(&crtc->commit_list,
913                                                   struct drm_crtc_commit, commit_entry);
914                 cleanup_done = commit ?
915                         try_wait_for_completion(&commit->cleanup_done) : true;
916                 spin_unlock(&crtc->commit_lock);
917
918                 if (cleanup_done)
919                         continue;
920
921                 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
922
923                 return true;
924         }
925
926         return false;
927 }
928
929 /*
930  * Finds the encoder associated with the given CRTC. This can only be
931  * used when we know that the CRTC isn't feeding multiple encoders!
932  */
933 struct intel_encoder *
934 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
935                            const struct intel_crtc_state *crtc_state)
936 {
937         const struct drm_connector_state *connector_state;
938         const struct drm_connector *connector;
939         struct intel_encoder *encoder = NULL;
940         struct intel_crtc *master_crtc;
941         int num_encoders = 0;
942         int i;
943
944         master_crtc = intel_master_crtc(crtc_state);
945
946         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
947                 if (connector_state->crtc != &master_crtc->base)
948                         continue;
949
950                 encoder = to_intel_encoder(connector_state->best_encoder);
951                 num_encoders++;
952         }
953
954         drm_WARN(state->base.dev, num_encoders != 1,
955                  "%d encoders for pipe %c\n",
956                  num_encoders, pipe_name(master_crtc->pipe));
957
958         return encoder;
959 }
960
961 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
962 {
963         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
964         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
965         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
966         enum pipe pipe = crtc->pipe;
967         int width = drm_rect_width(dst);
968         int height = drm_rect_height(dst);
969         int x = dst->x1;
970         int y = dst->y1;
971
972         if (!crtc_state->pch_pfit.enabled)
973                 return;
974
975         /* Force use of hard-coded filter coefficients
976          * as some pre-programmed values are broken,
977          * e.g. x201.
978          */
979         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
980                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
981                                   PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
982         else
983                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
984                                   PF_FILTER_MED_3x3);
985         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
986         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
987 }
988
989 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
990 {
991         if (crtc->overlay)
992                 (void) intel_overlay_switch_off(crtc->overlay);
993
994         /* Let userspace switch the overlay on again. In most cases userspace
995          * has to recompute where to put it anyway.
996          */
997 }
998
999 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1000 {
1001         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1002
1003         if (!crtc_state->nv12_planes)
1004                 return false;
1005
1006         /* WA Display #0827: Gen9:all */
1007         if (DISPLAY_VER(dev_priv) == 9)
1008                 return true;
1009
1010         return false;
1011 }
1012
1013 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1014 {
1015         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1016
1017         /* Wa_2006604312:icl,ehl */
1018         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1019                 return true;
1020
1021         return false;
1022 }
1023
1024 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1025 {
1026         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1027
1028         /* Wa_1604331009:icl,jsl,ehl */
1029         if (is_hdr_mode(crtc_state) &&
1030             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1031             DISPLAY_VER(dev_priv) == 11)
1032                 return true;
1033
1034         return false;
1035 }
1036
1037 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1038                                     enum pipe pipe, bool enable)
1039 {
1040         if (DISPLAY_VER(i915) == 9) {
1041                 /*
1042                  * "Plane N strech max must be programmed to 11b (x1)
1043                  *  when Async flips are enabled on that plane."
1044                  */
1045                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1046                              SKL_PLANE1_STRETCH_MAX_MASK,
1047                              enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1048         } else {
1049                 /* Also needed on HSW/BDW albeit undocumented */
1050                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1051                              HSW_PRI_STRETCH_MAX_MASK,
1052                              enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1053         }
1054 }
1055
1056 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1057 {
1058         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1059
1060         return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1061                 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1062 }
1063
1064 #define is_enabling(feature, old_crtc_state, new_crtc_state) \
1065         ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
1066          (new_crtc_state)->feature)
1067 #define is_disabling(feature, old_crtc_state, new_crtc_state) \
1068         ((old_crtc_state)->feature && \
1069          (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
1070
1071 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1072                             const struct intel_crtc_state *new_crtc_state)
1073 {
1074         return is_enabling(active_planes, old_crtc_state, new_crtc_state);
1075 }
1076
1077 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1078                              const struct intel_crtc_state *new_crtc_state)
1079 {
1080         return is_disabling(active_planes, old_crtc_state, new_crtc_state);
1081 }
1082
1083 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
1084                          const struct intel_crtc_state *new_crtc_state)
1085 {
1086         return is_enabling(vrr.enable, old_crtc_state, new_crtc_state);
1087 }
1088
1089 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
1090                           const struct intel_crtc_state *new_crtc_state)
1091 {
1092         return is_disabling(vrr.enable, old_crtc_state, new_crtc_state);
1093 }
1094
1095 #undef is_disabling
1096 #undef is_enabling
1097
1098 static void intel_post_plane_update(struct intel_atomic_state *state,
1099                                     struct intel_crtc *crtc)
1100 {
1101         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1102         const struct intel_crtc_state *old_crtc_state =
1103                 intel_atomic_get_old_crtc_state(state, crtc);
1104         const struct intel_crtc_state *new_crtc_state =
1105                 intel_atomic_get_new_crtc_state(state, crtc);
1106         enum pipe pipe = crtc->pipe;
1107
1108         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1109
1110         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1111                 intel_update_watermarks(dev_priv);
1112
1113         intel_fbc_post_update(state, crtc);
1114
1115         if (needs_async_flip_vtd_wa(old_crtc_state) &&
1116             !needs_async_flip_vtd_wa(new_crtc_state))
1117                 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1118
1119         if (needs_nv12_wa(old_crtc_state) &&
1120             !needs_nv12_wa(new_crtc_state))
1121                 skl_wa_827(dev_priv, pipe, false);
1122
1123         if (needs_scalerclk_wa(old_crtc_state) &&
1124             !needs_scalerclk_wa(new_crtc_state))
1125                 icl_wa_scalerclkgating(dev_priv, pipe, false);
1126
1127         if (needs_cursorclk_wa(old_crtc_state) &&
1128             !needs_cursorclk_wa(new_crtc_state))
1129                 icl_wa_cursorclkgating(dev_priv, pipe, false);
1130
1131         if (intel_crtc_needs_color_update(new_crtc_state))
1132                 intel_color_post_update(new_crtc_state);
1133 }
1134
1135 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1136                                         struct intel_crtc *crtc)
1137 {
1138         const struct intel_crtc_state *crtc_state =
1139                 intel_atomic_get_new_crtc_state(state, crtc);
1140         u8 update_planes = crtc_state->update_planes;
1141         const struct intel_plane_state *plane_state;
1142         struct intel_plane *plane;
1143         int i;
1144
1145         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1146                 if (plane->pipe == crtc->pipe &&
1147                     update_planes & BIT(plane->id))
1148                         plane->enable_flip_done(plane);
1149         }
1150 }
1151
1152 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1153                                          struct intel_crtc *crtc)
1154 {
1155         const struct intel_crtc_state *crtc_state =
1156                 intel_atomic_get_new_crtc_state(state, crtc);
1157         u8 update_planes = crtc_state->update_planes;
1158         const struct intel_plane_state *plane_state;
1159         struct intel_plane *plane;
1160         int i;
1161
1162         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1163                 if (plane->pipe == crtc->pipe &&
1164                     update_planes & BIT(plane->id))
1165                         plane->disable_flip_done(plane);
1166         }
1167 }
1168
1169 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1170                                              struct intel_crtc *crtc)
1171 {
1172         const struct intel_crtc_state *old_crtc_state =
1173                 intel_atomic_get_old_crtc_state(state, crtc);
1174         const struct intel_crtc_state *new_crtc_state =
1175                 intel_atomic_get_new_crtc_state(state, crtc);
1176         u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1177                                        ~new_crtc_state->async_flip_planes;
1178         const struct intel_plane_state *old_plane_state;
1179         struct intel_plane *plane;
1180         bool need_vbl_wait = false;
1181         int i;
1182
1183         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1184                 if (plane->need_async_flip_disable_wa &&
1185                     plane->pipe == crtc->pipe &&
1186                     disable_async_flip_planes & BIT(plane->id)) {
1187                         /*
1188                          * Apart from the async flip bit we want to
1189                          * preserve the old state for the plane.
1190                          */
1191                         plane->async_flip(plane, old_crtc_state,
1192                                           old_plane_state, false);
1193                         need_vbl_wait = true;
1194                 }
1195         }
1196
1197         if (need_vbl_wait)
1198                 intel_crtc_wait_for_next_vblank(crtc);
1199 }
1200
1201 static void intel_pre_plane_update(struct intel_atomic_state *state,
1202                                    struct intel_crtc *crtc)
1203 {
1204         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1205         const struct intel_crtc_state *old_crtc_state =
1206                 intel_atomic_get_old_crtc_state(state, crtc);
1207         const struct intel_crtc_state *new_crtc_state =
1208                 intel_atomic_get_new_crtc_state(state, crtc);
1209         enum pipe pipe = crtc->pipe;
1210
1211         if (vrr_disabling(old_crtc_state, new_crtc_state)) {
1212                 intel_vrr_disable(old_crtc_state);
1213                 intel_crtc_update_active_timings(old_crtc_state, false);
1214         }
1215
1216         intel_drrs_deactivate(old_crtc_state);
1217
1218         intel_psr_pre_plane_update(state, crtc);
1219
1220         if (hsw_ips_pre_update(state, crtc))
1221                 intel_crtc_wait_for_next_vblank(crtc);
1222
1223         if (intel_fbc_pre_update(state, crtc))
1224                 intel_crtc_wait_for_next_vblank(crtc);
1225
1226         if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1227             needs_async_flip_vtd_wa(new_crtc_state))
1228                 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1229
1230         /* Display WA 827 */
1231         if (!needs_nv12_wa(old_crtc_state) &&
1232             needs_nv12_wa(new_crtc_state))
1233                 skl_wa_827(dev_priv, pipe, true);
1234
1235         /* Wa_2006604312:icl,ehl */
1236         if (!needs_scalerclk_wa(old_crtc_state) &&
1237             needs_scalerclk_wa(new_crtc_state))
1238                 icl_wa_scalerclkgating(dev_priv, pipe, true);
1239
1240         /* Wa_1604331009:icl,jsl,ehl */
1241         if (!needs_cursorclk_wa(old_crtc_state) &&
1242             needs_cursorclk_wa(new_crtc_state))
1243                 icl_wa_cursorclkgating(dev_priv, pipe, true);
1244
1245         /*
1246          * Vblank time updates from the shadow to live plane control register
1247          * are blocked if the memory self-refresh mode is active at that
1248          * moment. So to make sure the plane gets truly disabled, disable
1249          * first the self-refresh mode. The self-refresh enable bit in turn
1250          * will be checked/applied by the HW only at the next frame start
1251          * event which is after the vblank start event, so we need to have a
1252          * wait-for-vblank between disabling the plane and the pipe.
1253          */
1254         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1255             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1256                 intel_crtc_wait_for_next_vblank(crtc);
1257
1258         /*
1259          * IVB workaround: must disable low power watermarks for at least
1260          * one frame before enabling scaling.  LP watermarks can be re-enabled
1261          * when scaling is disabled.
1262          *
1263          * WaCxSRDisabledForSpriteScaling:ivb
1264          */
1265         if (old_crtc_state->hw.active &&
1266             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1267                 intel_crtc_wait_for_next_vblank(crtc);
1268
1269         /*
1270          * If we're doing a modeset we don't need to do any
1271          * pre-vblank watermark programming here.
1272          */
1273         if (!intel_crtc_needs_modeset(new_crtc_state)) {
1274                 /*
1275                  * For platforms that support atomic watermarks, program the
1276                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1277                  * will be the intermediate values that are safe for both pre- and
1278                  * post- vblank; when vblank happens, the 'active' values will be set
1279                  * to the final 'target' values and we'll do this again to get the
1280                  * optimal watermarks.  For gen9+ platforms, the values we program here
1281                  * will be the final target values which will get automatically latched
1282                  * at vblank time; no further programming will be necessary.
1283                  *
1284                  * If a platform hasn't been transitioned to atomic watermarks yet,
1285                  * we'll continue to update watermarks the old way, if flags tell
1286                  * us to.
1287                  */
1288                 if (!intel_initial_watermarks(state, crtc))
1289                         if (new_crtc_state->update_wm_pre)
1290                                 intel_update_watermarks(dev_priv);
1291         }
1292
1293         /*
1294          * Gen2 reports pipe underruns whenever all planes are disabled.
1295          * So disable underrun reporting before all the planes get disabled.
1296          *
1297          * We do this after .initial_watermarks() so that we have a
1298          * chance of catching underruns with the intermediate watermarks
1299          * vs. the old plane configuration.
1300          */
1301         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1302                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1303
1304         /*
1305          * WA for platforms where async address update enable bit
1306          * is double buffered and only latched at start of vblank.
1307          */
1308         if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1309                 intel_crtc_async_flip_disable_wa(state, crtc);
1310 }
1311
1312 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1313                                       struct intel_crtc *crtc)
1314 {
1315         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1316         const struct intel_crtc_state *new_crtc_state =
1317                 intel_atomic_get_new_crtc_state(state, crtc);
1318         unsigned int update_mask = new_crtc_state->update_planes;
1319         const struct intel_plane_state *old_plane_state;
1320         struct intel_plane *plane;
1321         unsigned fb_bits = 0;
1322         int i;
1323
1324         intel_crtc_dpms_overlay_disable(crtc);
1325
1326         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1327                 if (crtc->pipe != plane->pipe ||
1328                     !(update_mask & BIT(plane->id)))
1329                         continue;
1330
1331                 intel_plane_disable_arm(plane, new_crtc_state);
1332
1333                 if (old_plane_state->uapi.visible)
1334                         fb_bits |= plane->frontbuffer_bit;
1335         }
1336
1337         intel_frontbuffer_flip(dev_priv, fb_bits);
1338 }
1339
1340 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1341 {
1342         struct drm_i915_private *i915 = to_i915(state->base.dev);
1343         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1344         struct intel_crtc *crtc;
1345         int i;
1346
1347         /*
1348          * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1349          * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1350          */
1351         if (i915->display.dpll.mgr) {
1352                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1353                         if (intel_crtc_needs_modeset(new_crtc_state))
1354                                 continue;
1355
1356                         new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1357                         new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1358                 }
1359         }
1360 }
1361
1362 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1363                                           struct intel_crtc *crtc)
1364 {
1365         const struct intel_crtc_state *crtc_state =
1366                 intel_atomic_get_new_crtc_state(state, crtc);
1367         const struct drm_connector_state *conn_state;
1368         struct drm_connector *conn;
1369         int i;
1370
1371         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1372                 struct intel_encoder *encoder =
1373                         to_intel_encoder(conn_state->best_encoder);
1374
1375                 if (conn_state->crtc != &crtc->base)
1376                         continue;
1377
1378                 if (encoder->pre_pll_enable)
1379                         encoder->pre_pll_enable(state, encoder,
1380                                                 crtc_state, conn_state);
1381         }
1382 }
1383
1384 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1385                                       struct intel_crtc *crtc)
1386 {
1387         const struct intel_crtc_state *crtc_state =
1388                 intel_atomic_get_new_crtc_state(state, crtc);
1389         const struct drm_connector_state *conn_state;
1390         struct drm_connector *conn;
1391         int i;
1392
1393         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1394                 struct intel_encoder *encoder =
1395                         to_intel_encoder(conn_state->best_encoder);
1396
1397                 if (conn_state->crtc != &crtc->base)
1398                         continue;
1399
1400                 if (encoder->pre_enable)
1401                         encoder->pre_enable(state, encoder,
1402                                             crtc_state, conn_state);
1403         }
1404 }
1405
1406 static void intel_encoders_enable(struct intel_atomic_state *state,
1407                                   struct intel_crtc *crtc)
1408 {
1409         const struct intel_crtc_state *crtc_state =
1410                 intel_atomic_get_new_crtc_state(state, crtc);
1411         const struct drm_connector_state *conn_state;
1412         struct drm_connector *conn;
1413         int i;
1414
1415         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1416                 struct intel_encoder *encoder =
1417                         to_intel_encoder(conn_state->best_encoder);
1418
1419                 if (conn_state->crtc != &crtc->base)
1420                         continue;
1421
1422                 if (encoder->enable)
1423                         encoder->enable(state, encoder,
1424                                         crtc_state, conn_state);
1425                 intel_opregion_notify_encoder(encoder, true);
1426         }
1427 }
1428
1429 static void intel_encoders_disable(struct intel_atomic_state *state,
1430                                    struct intel_crtc *crtc)
1431 {
1432         const struct intel_crtc_state *old_crtc_state =
1433                 intel_atomic_get_old_crtc_state(state, crtc);
1434         const struct drm_connector_state *old_conn_state;
1435         struct drm_connector *conn;
1436         int i;
1437
1438         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1439                 struct intel_encoder *encoder =
1440                         to_intel_encoder(old_conn_state->best_encoder);
1441
1442                 if (old_conn_state->crtc != &crtc->base)
1443                         continue;
1444
1445                 intel_opregion_notify_encoder(encoder, false);
1446                 if (encoder->disable)
1447                         encoder->disable(state, encoder,
1448                                          old_crtc_state, old_conn_state);
1449         }
1450 }
1451
1452 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1453                                         struct intel_crtc *crtc)
1454 {
1455         const struct intel_crtc_state *old_crtc_state =
1456                 intel_atomic_get_old_crtc_state(state, crtc);
1457         const struct drm_connector_state *old_conn_state;
1458         struct drm_connector *conn;
1459         int i;
1460
1461         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1462                 struct intel_encoder *encoder =
1463                         to_intel_encoder(old_conn_state->best_encoder);
1464
1465                 if (old_conn_state->crtc != &crtc->base)
1466                         continue;
1467
1468                 if (encoder->post_disable)
1469                         encoder->post_disable(state, encoder,
1470                                               old_crtc_state, old_conn_state);
1471         }
1472 }
1473
1474 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1475                                             struct intel_crtc *crtc)
1476 {
1477         const struct intel_crtc_state *old_crtc_state =
1478                 intel_atomic_get_old_crtc_state(state, crtc);
1479         const struct drm_connector_state *old_conn_state;
1480         struct drm_connector *conn;
1481         int i;
1482
1483         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1484                 struct intel_encoder *encoder =
1485                         to_intel_encoder(old_conn_state->best_encoder);
1486
1487                 if (old_conn_state->crtc != &crtc->base)
1488                         continue;
1489
1490                 if (encoder->post_pll_disable)
1491                         encoder->post_pll_disable(state, encoder,
1492                                                   old_crtc_state, old_conn_state);
1493         }
1494 }
1495
1496 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1497                                        struct intel_crtc *crtc)
1498 {
1499         const struct intel_crtc_state *crtc_state =
1500                 intel_atomic_get_new_crtc_state(state, crtc);
1501         const struct drm_connector_state *conn_state;
1502         struct drm_connector *conn;
1503         int i;
1504
1505         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1506                 struct intel_encoder *encoder =
1507                         to_intel_encoder(conn_state->best_encoder);
1508
1509                 if (conn_state->crtc != &crtc->base)
1510                         continue;
1511
1512                 if (encoder->update_pipe)
1513                         encoder->update_pipe(state, encoder,
1514                                              crtc_state, conn_state);
1515         }
1516 }
1517
1518 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1519 {
1520         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1521         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1522
1523         plane->disable_arm(plane, crtc_state);
1524 }
1525
1526 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1527 {
1528         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1529         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1530
1531         if (crtc_state->has_pch_encoder) {
1532                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1533                                                &crtc_state->fdi_m_n);
1534         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1535                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1536                                                &crtc_state->dp_m_n);
1537                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1538                                                &crtc_state->dp_m2_n2);
1539         }
1540
1541         intel_set_transcoder_timings(crtc_state);
1542
1543         ilk_set_pipeconf(crtc_state);
1544 }
1545
1546 static void ilk_crtc_enable(struct intel_atomic_state *state,
1547                             struct intel_crtc *crtc)
1548 {
1549         const struct intel_crtc_state *new_crtc_state =
1550                 intel_atomic_get_new_crtc_state(state, crtc);
1551         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1552         enum pipe pipe = crtc->pipe;
1553
1554         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1555                 return;
1556
1557         /*
1558          * Sometimes spurious CPU pipe underruns happen during FDI
1559          * training, at least with VGA+HDMI cloning. Suppress them.
1560          *
1561          * On ILK we get an occasional spurious CPU pipe underruns
1562          * between eDP port A enable and vdd enable. Also PCH port
1563          * enable seems to result in the occasional CPU pipe underrun.
1564          *
1565          * Spurious PCH underruns also occur during PCH enabling.
1566          */
1567         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1568         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1569
1570         ilk_configure_cpu_transcoder(new_crtc_state);
1571
1572         intel_set_pipe_src_size(new_crtc_state);
1573
1574         crtc->active = true;
1575
1576         intel_encoders_pre_enable(state, crtc);
1577
1578         if (new_crtc_state->has_pch_encoder) {
1579                 ilk_pch_pre_enable(state, crtc);
1580         } else {
1581                 assert_fdi_tx_disabled(dev_priv, pipe);
1582                 assert_fdi_rx_disabled(dev_priv, pipe);
1583         }
1584
1585         ilk_pfit_enable(new_crtc_state);
1586
1587         /*
1588          * On ILK+ LUT must be loaded before the pipe is running but with
1589          * clocks enabled
1590          */
1591         intel_color_load_luts(new_crtc_state);
1592         intel_color_commit_noarm(new_crtc_state);
1593         intel_color_commit_arm(new_crtc_state);
1594         /* update DSPCNTR to configure gamma for pipe bottom color */
1595         intel_disable_primary_plane(new_crtc_state);
1596
1597         intel_initial_watermarks(state, crtc);
1598         intel_enable_transcoder(new_crtc_state);
1599
1600         if (new_crtc_state->has_pch_encoder)
1601                 ilk_pch_enable(state, crtc);
1602
1603         intel_crtc_vblank_on(new_crtc_state);
1604
1605         intel_encoders_enable(state, crtc);
1606
1607         if (HAS_PCH_CPT(dev_priv))
1608                 intel_wait_for_pipe_scanline_moving(crtc);
1609
1610         /*
1611          * Must wait for vblank to avoid spurious PCH FIFO underruns.
1612          * And a second vblank wait is needed at least on ILK with
1613          * some interlaced HDMI modes. Let's do the double wait always
1614          * in case there are more corner cases we don't know about.
1615          */
1616         if (new_crtc_state->has_pch_encoder) {
1617                 intel_crtc_wait_for_next_vblank(crtc);
1618                 intel_crtc_wait_for_next_vblank(crtc);
1619         }
1620         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1621         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1622 }
1623
1624 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1625                                             enum pipe pipe, bool apply)
1626 {
1627         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1628         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1629
1630         if (apply)
1631                 val |= mask;
1632         else
1633                 val &= ~mask;
1634
1635         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1636 }
1637
1638 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1639 {
1640         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1641         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1642
1643         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1644                        HSW_LINETIME(crtc_state->linetime) |
1645                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
1646 }
1647
1648 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1649 {
1650         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1651         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1652         enum transcoder transcoder = crtc_state->cpu_transcoder;
1653         i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1654                          CHICKEN_TRANS(transcoder);
1655
1656         intel_de_rmw(dev_priv, reg,
1657                      HSW_FRAME_START_DELAY_MASK,
1658                      HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1659 }
1660
1661 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1662                                          const struct intel_crtc_state *crtc_state)
1663 {
1664         struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1665
1666         /*
1667          * Enable sequence steps 1-7 on bigjoiner master
1668          */
1669         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1670                 intel_encoders_pre_pll_enable(state, master_crtc);
1671
1672         if (crtc_state->shared_dpll)
1673                 intel_enable_shared_dpll(crtc_state);
1674
1675         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1676                 intel_encoders_pre_enable(state, master_crtc);
1677 }
1678
1679 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1680 {
1681         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1682         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1683         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1684
1685         if (crtc_state->has_pch_encoder) {
1686                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1687                                                &crtc_state->fdi_m_n);
1688         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1689                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1690                                                &crtc_state->dp_m_n);
1691                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1692                                                &crtc_state->dp_m2_n2);
1693         }
1694
1695         intel_set_transcoder_timings(crtc_state);
1696         if (HAS_VRR(dev_priv))
1697                 intel_vrr_set_transcoder_timings(crtc_state);
1698
1699         if (cpu_transcoder != TRANSCODER_EDP)
1700                 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
1701                                crtc_state->pixel_multiplier - 1);
1702
1703         hsw_set_frame_start_delay(crtc_state);
1704
1705         hsw_set_transconf(crtc_state);
1706 }
1707
1708 static void hsw_crtc_enable(struct intel_atomic_state *state,
1709                             struct intel_crtc *crtc)
1710 {
1711         const struct intel_crtc_state *new_crtc_state =
1712                 intel_atomic_get_new_crtc_state(state, crtc);
1713         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1714         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1715         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1716         bool psl_clkgate_wa;
1717
1718         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1719                 return;
1720
1721         intel_dmc_enable_pipe(dev_priv, crtc->pipe);
1722
1723         if (!new_crtc_state->bigjoiner_pipes) {
1724                 intel_encoders_pre_pll_enable(state, crtc);
1725
1726                 if (new_crtc_state->shared_dpll)
1727                         intel_enable_shared_dpll(new_crtc_state);
1728
1729                 intel_encoders_pre_enable(state, crtc);
1730         } else {
1731                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1732         }
1733
1734         intel_dsc_enable(new_crtc_state);
1735
1736         if (DISPLAY_VER(dev_priv) >= 13)
1737                 intel_uncompressed_joiner_enable(new_crtc_state);
1738
1739         intel_set_pipe_src_size(new_crtc_state);
1740         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1741                 bdw_set_pipe_misc(new_crtc_state);
1742
1743         if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1744             !transcoder_is_dsi(cpu_transcoder))
1745                 hsw_configure_cpu_transcoder(new_crtc_state);
1746
1747         crtc->active = true;
1748
1749         /* Display WA #1180: WaDisableScalarClockGating: glk */
1750         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1751                 new_crtc_state->pch_pfit.enabled;
1752         if (psl_clkgate_wa)
1753                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1754
1755         if (DISPLAY_VER(dev_priv) >= 9)
1756                 skl_pfit_enable(new_crtc_state);
1757         else
1758                 ilk_pfit_enable(new_crtc_state);
1759
1760         /*
1761          * On ILK+ LUT must be loaded before the pipe is running but with
1762          * clocks enabled
1763          */
1764         intel_color_load_luts(new_crtc_state);
1765         intel_color_commit_noarm(new_crtc_state);
1766         intel_color_commit_arm(new_crtc_state);
1767         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1768         if (DISPLAY_VER(dev_priv) < 9)
1769                 intel_disable_primary_plane(new_crtc_state);
1770
1771         hsw_set_linetime_wm(new_crtc_state);
1772
1773         if (DISPLAY_VER(dev_priv) >= 11)
1774                 icl_set_pipe_chicken(new_crtc_state);
1775
1776         intel_initial_watermarks(state, crtc);
1777
1778         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1779                 intel_crtc_vblank_on(new_crtc_state);
1780
1781         intel_encoders_enable(state, crtc);
1782
1783         if (psl_clkgate_wa) {
1784                 intel_crtc_wait_for_next_vblank(crtc);
1785                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1786         }
1787
1788         /* If we change the relative order between pipe/planes enabling, we need
1789          * to change the workaround. */
1790         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1791         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1792                 struct intel_crtc *wa_crtc;
1793
1794                 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1795
1796                 intel_crtc_wait_for_next_vblank(wa_crtc);
1797                 intel_crtc_wait_for_next_vblank(wa_crtc);
1798         }
1799 }
1800
1801 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1802 {
1803         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1804         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1805         enum pipe pipe = crtc->pipe;
1806
1807         /* To avoid upsetting the power well on haswell only disable the pfit if
1808          * it's in use. The hw state code will make sure we get this right. */
1809         if (!old_crtc_state->pch_pfit.enabled)
1810                 return;
1811
1812         intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1813         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1814         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1815 }
1816
1817 static void ilk_crtc_disable(struct intel_atomic_state *state,
1818                              struct intel_crtc *crtc)
1819 {
1820         const struct intel_crtc_state *old_crtc_state =
1821                 intel_atomic_get_old_crtc_state(state, crtc);
1822         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1823         enum pipe pipe = crtc->pipe;
1824
1825         /*
1826          * Sometimes spurious CPU pipe underruns happen when the
1827          * pipe is already disabled, but FDI RX/TX is still enabled.
1828          * Happens at least with VGA+HDMI cloning. Suppress them.
1829          */
1830         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1831         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1832
1833         intel_encoders_disable(state, crtc);
1834
1835         intel_crtc_vblank_off(old_crtc_state);
1836
1837         intel_disable_transcoder(old_crtc_state);
1838
1839         ilk_pfit_disable(old_crtc_state);
1840
1841         if (old_crtc_state->has_pch_encoder)
1842                 ilk_pch_disable(state, crtc);
1843
1844         intel_encoders_post_disable(state, crtc);
1845
1846         if (old_crtc_state->has_pch_encoder)
1847                 ilk_pch_post_disable(state, crtc);
1848
1849         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1850         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1851
1852         intel_disable_shared_dpll(old_crtc_state);
1853 }
1854
1855 static void hsw_crtc_disable(struct intel_atomic_state *state,
1856                              struct intel_crtc *crtc)
1857 {
1858         const struct intel_crtc_state *old_crtc_state =
1859                 intel_atomic_get_old_crtc_state(state, crtc);
1860         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1861
1862         /*
1863          * FIXME collapse everything to one hook.
1864          * Need care with mst->ddi interactions.
1865          */
1866         if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
1867                 intel_encoders_disable(state, crtc);
1868                 intel_encoders_post_disable(state, crtc);
1869         }
1870
1871         intel_disable_shared_dpll(old_crtc_state);
1872
1873         intel_encoders_post_pll_disable(state, crtc);
1874
1875         intel_dmc_disable_pipe(i915, crtc->pipe);
1876 }
1877
1878 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1879 {
1880         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1881         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1882
1883         if (!crtc_state->gmch_pfit.control)
1884                 return;
1885
1886         /*
1887          * The panel fitter should only be adjusted whilst the pipe is disabled,
1888          * according to register description and PRM.
1889          */
1890         drm_WARN_ON(&dev_priv->drm,
1891                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
1892         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1893
1894         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
1895                        crtc_state->gmch_pfit.pgm_ratios);
1896         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
1897
1898         /* Border color in case we don't scale up to the full screen. Black by
1899          * default, change to something else for debugging. */
1900         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
1901 }
1902
1903 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1904 {
1905         if (phy == PHY_NONE)
1906                 return false;
1907         else if (IS_ALDERLAKE_S(dev_priv))
1908                 return phy <= PHY_E;
1909         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1910                 return phy <= PHY_D;
1911         else if (IS_JSL_EHL(dev_priv))
1912                 return phy <= PHY_C;
1913         else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1914                 return phy <= PHY_B;
1915         else
1916                 /*
1917                  * DG2 outputs labelled as "combo PHY" in the bspec use
1918                  * SNPS PHYs with completely different programming,
1919                  * hence we always return false here.
1920                  */
1921                 return false;
1922 }
1923
1924 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1925 {
1926         if (IS_DG2(dev_priv))
1927                 /* DG2's "TC1" output uses a SNPS PHY */
1928                 return false;
1929         else if (IS_ALDERLAKE_P(dev_priv))
1930                 return phy >= PHY_F && phy <= PHY_I;
1931         else if (IS_TIGERLAKE(dev_priv))
1932                 return phy >= PHY_D && phy <= PHY_I;
1933         else if (IS_ICELAKE(dev_priv))
1934                 return phy >= PHY_C && phy <= PHY_F;
1935         else
1936                 return false;
1937 }
1938
1939 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1940 {
1941         if (phy == PHY_NONE)
1942                 return false;
1943         else if (IS_DG2(dev_priv))
1944                 /*
1945                  * All four "combo" ports and the TC1 port (PHY E) use
1946                  * Synopsis PHYs.
1947                  */
1948                 return phy <= PHY_E;
1949
1950         return false;
1951 }
1952
1953 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
1954 {
1955         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
1956                 return PHY_D + port - PORT_D_XELPD;
1957         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
1958                 return PHY_F + port - PORT_TC1;
1959         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
1960                 return PHY_B + port - PORT_TC1;
1961         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
1962                 return PHY_C + port - PORT_TC1;
1963         else if (IS_JSL_EHL(i915) && port == PORT_D)
1964                 return PHY_A;
1965
1966         return PHY_A + port - PORT_A;
1967 }
1968
1969 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
1970 {
1971         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
1972                 return TC_PORT_NONE;
1973
1974         if (DISPLAY_VER(dev_priv) >= 12)
1975                 return TC_PORT_1 + port - PORT_TC1;
1976         else
1977                 return TC_PORT_1 + port - PORT_C;
1978 }
1979
1980 enum intel_display_power_domain
1981 intel_aux_power_domain(struct intel_digital_port *dig_port)
1982 {
1983         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1984
1985         if (intel_tc_port_in_tbt_alt_mode(dig_port))
1986                 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
1987
1988         return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
1989 }
1990
1991 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
1992                                    struct intel_power_domain_mask *mask)
1993 {
1994         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1995         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1996         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1997         struct drm_encoder *encoder;
1998         enum pipe pipe = crtc->pipe;
1999
2000         bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2001
2002         if (!crtc_state->hw.active)
2003                 return;
2004
2005         set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2006         set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2007         if (crtc_state->pch_pfit.enabled ||
2008             crtc_state->pch_pfit.force_thru)
2009                 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2010
2011         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2012                                   crtc_state->uapi.encoder_mask) {
2013                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2014
2015                 set_bit(intel_encoder->power_domain, mask->bits);
2016         }
2017
2018         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2019                 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2020
2021         if (crtc_state->shared_dpll)
2022                 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2023
2024         if (crtc_state->dsc.compression_enable)
2025                 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2026 }
2027
2028 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2029                                           struct intel_power_domain_mask *old_domains)
2030 {
2031         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2032         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2033         enum intel_display_power_domain domain;
2034         struct intel_power_domain_mask domains, new_domains;
2035
2036         get_crtc_power_domains(crtc_state, &domains);
2037
2038         bitmap_andnot(new_domains.bits,
2039                       domains.bits,
2040                       crtc->enabled_power_domains.mask.bits,
2041                       POWER_DOMAIN_NUM);
2042         bitmap_andnot(old_domains->bits,
2043                       crtc->enabled_power_domains.mask.bits,
2044                       domains.bits,
2045                       POWER_DOMAIN_NUM);
2046
2047         for_each_power_domain(domain, &new_domains)
2048                 intel_display_power_get_in_set(dev_priv,
2049                                                &crtc->enabled_power_domains,
2050                                                domain);
2051 }
2052
2053 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2054                                           struct intel_power_domain_mask *domains)
2055 {
2056         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2057                                             &crtc->enabled_power_domains,
2058                                             domains);
2059 }
2060
2061 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2062 {
2063         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2064         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2065
2066         if (intel_crtc_has_dp_encoder(crtc_state)) {
2067                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2068                                                &crtc_state->dp_m_n);
2069                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2070                                                &crtc_state->dp_m2_n2);
2071         }
2072
2073         intel_set_transcoder_timings(crtc_state);
2074
2075         i9xx_set_pipeconf(crtc_state);
2076 }
2077
2078 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2079                                    struct intel_crtc *crtc)
2080 {
2081         const struct intel_crtc_state *new_crtc_state =
2082                 intel_atomic_get_new_crtc_state(state, crtc);
2083         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2084         enum pipe pipe = crtc->pipe;
2085
2086         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2087                 return;
2088
2089         i9xx_configure_cpu_transcoder(new_crtc_state);
2090
2091         intel_set_pipe_src_size(new_crtc_state);
2092
2093         intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
2094
2095         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2096                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2097                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2098         }
2099
2100         crtc->active = true;
2101
2102         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2103
2104         intel_encoders_pre_pll_enable(state, crtc);
2105
2106         if (IS_CHERRYVIEW(dev_priv))
2107                 chv_enable_pll(new_crtc_state);
2108         else
2109                 vlv_enable_pll(new_crtc_state);
2110
2111         intel_encoders_pre_enable(state, crtc);
2112
2113         i9xx_pfit_enable(new_crtc_state);
2114
2115         intel_color_load_luts(new_crtc_state);
2116         intel_color_commit_noarm(new_crtc_state);
2117         intel_color_commit_arm(new_crtc_state);
2118         /* update DSPCNTR to configure gamma for pipe bottom color */
2119         intel_disable_primary_plane(new_crtc_state);
2120
2121         intel_initial_watermarks(state, crtc);
2122         intel_enable_transcoder(new_crtc_state);
2123
2124         intel_crtc_vblank_on(new_crtc_state);
2125
2126         intel_encoders_enable(state, crtc);
2127 }
2128
2129 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2130                              struct intel_crtc *crtc)
2131 {
2132         const struct intel_crtc_state *new_crtc_state =
2133                 intel_atomic_get_new_crtc_state(state, crtc);
2134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2135         enum pipe pipe = crtc->pipe;
2136
2137         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2138                 return;
2139
2140         i9xx_configure_cpu_transcoder(new_crtc_state);
2141
2142         intel_set_pipe_src_size(new_crtc_state);
2143
2144         crtc->active = true;
2145
2146         if (DISPLAY_VER(dev_priv) != 2)
2147                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2148
2149         intel_encoders_pre_enable(state, crtc);
2150
2151         i9xx_enable_pll(new_crtc_state);
2152
2153         i9xx_pfit_enable(new_crtc_state);
2154
2155         intel_color_load_luts(new_crtc_state);
2156         intel_color_commit_noarm(new_crtc_state);
2157         intel_color_commit_arm(new_crtc_state);
2158         /* update DSPCNTR to configure gamma for pipe bottom color */
2159         intel_disable_primary_plane(new_crtc_state);
2160
2161         if (!intel_initial_watermarks(state, crtc))
2162                 intel_update_watermarks(dev_priv);
2163         intel_enable_transcoder(new_crtc_state);
2164
2165         intel_crtc_vblank_on(new_crtc_state);
2166
2167         intel_encoders_enable(state, crtc);
2168
2169         /* prevents spurious underruns */
2170         if (DISPLAY_VER(dev_priv) == 2)
2171                 intel_crtc_wait_for_next_vblank(crtc);
2172 }
2173
2174 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2175 {
2176         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2177         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2178
2179         if (!old_crtc_state->gmch_pfit.control)
2180                 return;
2181
2182         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2183
2184         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2185                     intel_de_read(dev_priv, PFIT_CONTROL));
2186         intel_de_write(dev_priv, PFIT_CONTROL, 0);
2187 }
2188
2189 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2190                               struct intel_crtc *crtc)
2191 {
2192         struct intel_crtc_state *old_crtc_state =
2193                 intel_atomic_get_old_crtc_state(state, crtc);
2194         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2195         enum pipe pipe = crtc->pipe;
2196
2197         /*
2198          * On gen2 planes are double buffered but the pipe isn't, so we must
2199          * wait for planes to fully turn off before disabling the pipe.
2200          */
2201         if (DISPLAY_VER(dev_priv) == 2)
2202                 intel_crtc_wait_for_next_vblank(crtc);
2203
2204         intel_encoders_disable(state, crtc);
2205
2206         intel_crtc_vblank_off(old_crtc_state);
2207
2208         intel_disable_transcoder(old_crtc_state);
2209
2210         i9xx_pfit_disable(old_crtc_state);
2211
2212         intel_encoders_post_disable(state, crtc);
2213
2214         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2215                 if (IS_CHERRYVIEW(dev_priv))
2216                         chv_disable_pll(dev_priv, pipe);
2217                 else if (IS_VALLEYVIEW(dev_priv))
2218                         vlv_disable_pll(dev_priv, pipe);
2219                 else
2220                         i9xx_disable_pll(old_crtc_state);
2221         }
2222
2223         intel_encoders_post_pll_disable(state, crtc);
2224
2225         if (DISPLAY_VER(dev_priv) != 2)
2226                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2227
2228         if (!dev_priv->display.funcs.wm->initial_watermarks)
2229                 intel_update_watermarks(dev_priv);
2230
2231         /* clock the pipe down to 640x480@60 to potentially save power */
2232         if (IS_I830(dev_priv))
2233                 i830_enable_pipe(dev_priv, pipe);
2234 }
2235
2236
2237 /*
2238  * turn all crtc's off, but do not adjust state
2239  * This has to be paired with a call to intel_modeset_setup_hw_state.
2240  */
2241 int intel_display_suspend(struct drm_device *dev)
2242 {
2243         struct drm_i915_private *dev_priv = to_i915(dev);
2244         struct drm_atomic_state *state;
2245         int ret;
2246
2247         if (!HAS_DISPLAY(dev_priv))
2248                 return 0;
2249
2250         state = drm_atomic_helper_suspend(dev);
2251         ret = PTR_ERR_OR_ZERO(state);
2252         if (ret)
2253                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2254                         ret);
2255         else
2256                 dev_priv->display.restore.modeset_state = state;
2257         return ret;
2258 }
2259
2260 void intel_encoder_destroy(struct drm_encoder *encoder)
2261 {
2262         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2263
2264         drm_encoder_cleanup(encoder);
2265         kfree(intel_encoder);
2266 }
2267
2268 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2269 {
2270         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2271
2272         /* GDG double wide on either pipe, otherwise pipe A only */
2273         return DISPLAY_VER(dev_priv) < 4 &&
2274                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2275 }
2276
2277 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2278 {
2279         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2280         struct drm_rect src;
2281
2282         /*
2283          * We only use IF-ID interlacing. If we ever use
2284          * PF-ID we'll need to adjust the pixel_rate here.
2285          */
2286
2287         if (!crtc_state->pch_pfit.enabled)
2288                 return pixel_rate;
2289
2290         drm_rect_init(&src, 0, 0,
2291                       drm_rect_width(&crtc_state->pipe_src) << 16,
2292                       drm_rect_height(&crtc_state->pipe_src) << 16);
2293
2294         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2295                                    pixel_rate);
2296 }
2297
2298 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2299                                          const struct drm_display_mode *timings)
2300 {
2301         mode->hdisplay = timings->crtc_hdisplay;
2302         mode->htotal = timings->crtc_htotal;
2303         mode->hsync_start = timings->crtc_hsync_start;
2304         mode->hsync_end = timings->crtc_hsync_end;
2305
2306         mode->vdisplay = timings->crtc_vdisplay;
2307         mode->vtotal = timings->crtc_vtotal;
2308         mode->vsync_start = timings->crtc_vsync_start;
2309         mode->vsync_end = timings->crtc_vsync_end;
2310
2311         mode->flags = timings->flags;
2312         mode->type = DRM_MODE_TYPE_DRIVER;
2313
2314         mode->clock = timings->crtc_clock;
2315
2316         drm_mode_set_name(mode);
2317 }
2318
2319 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2320 {
2321         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2322
2323         if (HAS_GMCH(dev_priv))
2324                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2325                 crtc_state->pixel_rate =
2326                         crtc_state->hw.pipe_mode.crtc_clock;
2327         else
2328                 crtc_state->pixel_rate =
2329                         ilk_pipe_pixel_rate(crtc_state);
2330 }
2331
2332 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2333                                            struct drm_display_mode *mode)
2334 {
2335         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2336
2337         if (num_pipes < 2)
2338                 return;
2339
2340         mode->crtc_clock /= num_pipes;
2341         mode->crtc_hdisplay /= num_pipes;
2342         mode->crtc_hblank_start /= num_pipes;
2343         mode->crtc_hblank_end /= num_pipes;
2344         mode->crtc_hsync_start /= num_pipes;
2345         mode->crtc_hsync_end /= num_pipes;
2346         mode->crtc_htotal /= num_pipes;
2347 }
2348
2349 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2350                                           struct drm_display_mode *mode)
2351 {
2352         int overlap = crtc_state->splitter.pixel_overlap;
2353         int n = crtc_state->splitter.link_count;
2354
2355         if (!crtc_state->splitter.enable)
2356                 return;
2357
2358         /*
2359          * eDP MSO uses segment timings from EDID for transcoder
2360          * timings, but full mode for everything else.
2361          *
2362          * h_full = (h_segment - pixel_overlap) * link_count
2363          */
2364         mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2365         mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2366         mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2367         mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2368         mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2369         mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2370         mode->crtc_clock *= n;
2371 }
2372
2373 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2374 {
2375         struct drm_display_mode *mode = &crtc_state->hw.mode;
2376         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2377         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2378
2379         /*
2380          * Start with the adjusted_mode crtc timings, which
2381          * have been filled with the transcoder timings.
2382          */
2383         drm_mode_copy(pipe_mode, adjusted_mode);
2384
2385         /* Expand MSO per-segment transcoder timings to full */
2386         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2387
2388         /*
2389          * We want the full numbers in adjusted_mode normal timings,
2390          * adjusted_mode crtc timings are left with the raw transcoder
2391          * timings.
2392          */
2393         intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2394
2395         /* Populate the "user" mode with full numbers */
2396         drm_mode_copy(mode, pipe_mode);
2397         intel_mode_from_crtc_timings(mode, mode);
2398         mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2399                 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2400         mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2401
2402         /* Derive per-pipe timings in case bigjoiner is used */
2403         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2404         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2405
2406         intel_crtc_compute_pixel_rate(crtc_state);
2407 }
2408
2409 void intel_encoder_get_config(struct intel_encoder *encoder,
2410                               struct intel_crtc_state *crtc_state)
2411 {
2412         encoder->get_config(encoder, crtc_state);
2413
2414         intel_crtc_readout_derived_state(crtc_state);
2415 }
2416
2417 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2418 {
2419         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2420         int width, height;
2421
2422         if (num_pipes < 2)
2423                 return;
2424
2425         width = drm_rect_width(&crtc_state->pipe_src);
2426         height = drm_rect_height(&crtc_state->pipe_src);
2427
2428         drm_rect_init(&crtc_state->pipe_src, 0, 0,
2429                       width / num_pipes, height);
2430 }
2431
2432 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2433 {
2434         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2435         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2436
2437         intel_bigjoiner_compute_pipe_src(crtc_state);
2438
2439         /*
2440          * Pipe horizontal size must be even in:
2441          * - DVO ganged mode
2442          * - LVDS dual channel mode
2443          * - Double wide pipe
2444          */
2445         if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2446                 if (crtc_state->double_wide) {
2447                         drm_dbg_kms(&i915->drm,
2448                                     "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2449                                     crtc->base.base.id, crtc->base.name);
2450                         return -EINVAL;
2451                 }
2452
2453                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2454                     intel_is_dual_link_lvds(i915)) {
2455                         drm_dbg_kms(&i915->drm,
2456                                     "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2457                                     crtc->base.base.id, crtc->base.name);
2458                         return -EINVAL;
2459                 }
2460         }
2461
2462         return 0;
2463 }
2464
2465 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2466 {
2467         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2468         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2469         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2470         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2471         int clock_limit = i915->max_dotclk_freq;
2472
2473         /*
2474          * Start with the adjusted_mode crtc timings, which
2475          * have been filled with the transcoder timings.
2476          */
2477         drm_mode_copy(pipe_mode, adjusted_mode);
2478
2479         /* Expand MSO per-segment transcoder timings to full */
2480         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2481
2482         /* Derive per-pipe timings in case bigjoiner is used */
2483         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2484         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2485
2486         if (DISPLAY_VER(i915) < 4) {
2487                 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2488
2489                 /*
2490                  * Enable double wide mode when the dot clock
2491                  * is > 90% of the (display) core speed.
2492                  */
2493                 if (intel_crtc_supports_double_wide(crtc) &&
2494                     pipe_mode->crtc_clock > clock_limit) {
2495                         clock_limit = i915->max_dotclk_freq;
2496                         crtc_state->double_wide = true;
2497                 }
2498         }
2499
2500         if (pipe_mode->crtc_clock > clock_limit) {
2501                 drm_dbg_kms(&i915->drm,
2502                             "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2503                             crtc->base.base.id, crtc->base.name,
2504                             pipe_mode->crtc_clock, clock_limit,
2505                             str_yes_no(crtc_state->double_wide));
2506                 return -EINVAL;
2507         }
2508
2509         return 0;
2510 }
2511
2512 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2513                                      struct intel_crtc *crtc)
2514 {
2515         struct intel_crtc_state *crtc_state =
2516                 intel_atomic_get_new_crtc_state(state, crtc);
2517         int ret;
2518
2519         ret = intel_dpll_crtc_compute_clock(state, crtc);
2520         if (ret)
2521                 return ret;
2522
2523         ret = intel_crtc_compute_pipe_src(crtc_state);
2524         if (ret)
2525                 return ret;
2526
2527         ret = intel_crtc_compute_pipe_mode(crtc_state);
2528         if (ret)
2529                 return ret;
2530
2531         intel_crtc_compute_pixel_rate(crtc_state);
2532
2533         if (crtc_state->has_pch_encoder)
2534                 return ilk_fdi_compute_config(crtc, crtc_state);
2535
2536         return 0;
2537 }
2538
2539 static void
2540 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2541 {
2542         while (*num > DATA_LINK_M_N_MASK ||
2543                *den > DATA_LINK_M_N_MASK) {
2544                 *num >>= 1;
2545                 *den >>= 1;
2546         }
2547 }
2548
2549 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2550                         u32 m, u32 n, u32 constant_n)
2551 {
2552         if (constant_n)
2553                 *ret_n = constant_n;
2554         else
2555                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2556
2557         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2558         intel_reduce_m_n_ratio(ret_m, ret_n);
2559 }
2560
2561 void
2562 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2563                        int pixel_clock, int link_clock,
2564                        struct intel_link_m_n *m_n,
2565                        bool fec_enable)
2566 {
2567         u32 data_clock = bits_per_pixel * pixel_clock;
2568
2569         if (fec_enable)
2570                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2571
2572         /*
2573          * Windows/BIOS uses fixed M/N values always. Follow suit.
2574          *
2575          * Also several DP dongles in particular seem to be fussy
2576          * about too large link M/N values. Presumably the 20bit
2577          * value used by Windows/BIOS is acceptable to everyone.
2578          */
2579         m_n->tu = 64;
2580         compute_m_n(&m_n->data_m, &m_n->data_n,
2581                     data_clock, link_clock * nlanes * 8,
2582                     0x8000000);
2583
2584         compute_m_n(&m_n->link_m, &m_n->link_n,
2585                     pixel_clock, link_clock,
2586                     0x80000);
2587 }
2588
2589 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2590 {
2591         /*
2592          * There may be no VBT; and if the BIOS enabled SSC we can
2593          * just keep using it to avoid unnecessary flicker.  Whereas if the
2594          * BIOS isn't using it, don't assume it will work even if the VBT
2595          * indicates as much.
2596          */
2597         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2598                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2599                                                        PCH_DREF_CONTROL) &
2600                         DREF_SSC1_ENABLE;
2601
2602                 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2603                         drm_dbg_kms(&dev_priv->drm,
2604                                     "SSC %s by BIOS, overriding VBT which says %s\n",
2605                                     str_enabled_disabled(bios_lvds_use_ssc),
2606                                     str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2607                         dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2608                 }
2609         }
2610 }
2611
2612 void intel_zero_m_n(struct intel_link_m_n *m_n)
2613 {
2614         /* corresponds to 0 register value */
2615         memset(m_n, 0, sizeof(*m_n));
2616         m_n->tu = 1;
2617 }
2618
2619 void intel_set_m_n(struct drm_i915_private *i915,
2620                    const struct intel_link_m_n *m_n,
2621                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2622                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2623 {
2624         intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2625         intel_de_write(i915, data_n_reg, m_n->data_n);
2626         intel_de_write(i915, link_m_reg, m_n->link_m);
2627         /*
2628          * On BDW+ writing LINK_N arms the double buffered update
2629          * of all the M/N registers, so it must be written last.
2630          */
2631         intel_de_write(i915, link_n_reg, m_n->link_n);
2632 }
2633
2634 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2635                                     enum transcoder transcoder)
2636 {
2637         if (IS_HASWELL(dev_priv))
2638                 return transcoder == TRANSCODER_EDP;
2639
2640         return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2641 }
2642
2643 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2644                                     enum transcoder transcoder,
2645                                     const struct intel_link_m_n *m_n)
2646 {
2647         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2648         enum pipe pipe = crtc->pipe;
2649
2650         if (DISPLAY_VER(dev_priv) >= 5)
2651                 intel_set_m_n(dev_priv, m_n,
2652                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2653                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2654         else
2655                 intel_set_m_n(dev_priv, m_n,
2656                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2657                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2658 }
2659
2660 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2661                                     enum transcoder transcoder,
2662                                     const struct intel_link_m_n *m_n)
2663 {
2664         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2665
2666         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2667                 return;
2668
2669         intel_set_m_n(dev_priv, m_n,
2670                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2671                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2672 }
2673
2674 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2675 {
2676         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2677         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2678         enum pipe pipe = crtc->pipe;
2679         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2680         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2681         u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2682         int vsyncshift = 0;
2683
2684         /* We need to be careful not to changed the adjusted mode, for otherwise
2685          * the hw state checker will get angry at the mismatch. */
2686         crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2687         crtc_vtotal = adjusted_mode->crtc_vtotal;
2688         crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2689         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2690
2691         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2692                 /* the chip adds 2 halflines automatically */
2693                 crtc_vtotal -= 1;
2694                 crtc_vblank_end -= 1;
2695
2696                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2697                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2698                 else
2699                         vsyncshift = adjusted_mode->crtc_hsync_start -
2700                                 adjusted_mode->crtc_htotal / 2;
2701                 if (vsyncshift < 0)
2702                         vsyncshift += adjusted_mode->crtc_htotal;
2703         }
2704
2705         /*
2706          * VBLANK_START no longer works on ADL+, instead we must use
2707          * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2708          */
2709         if (DISPLAY_VER(dev_priv) >= 13) {
2710                 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
2711                                crtc_vblank_start - crtc_vdisplay);
2712
2713                 /*
2714                  * VBLANK_START not used by hw, just clear it
2715                  * to make it stand out in register dumps.
2716                  */
2717                 crtc_vblank_start = 1;
2718         }
2719
2720         if (DISPLAY_VER(dev_priv) > 3)
2721                 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
2722                                vsyncshift);
2723
2724         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
2725                        HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2726                        HTOTAL(adjusted_mode->crtc_htotal - 1));
2727         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
2728                        HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2729                        HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2730         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
2731                        HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2732                        HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2733
2734         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
2735                        VACTIVE(crtc_vdisplay - 1) |
2736                        VTOTAL(crtc_vtotal - 1));
2737         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
2738                        VBLANK_START(crtc_vblank_start - 1) |
2739                        VBLANK_END(crtc_vblank_end - 1));
2740         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
2741                        VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2742                        VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2743
2744         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2745          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2746          * documented on the DDI_FUNC_CTL register description, EDP Input Select
2747          * bits. */
2748         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2749             (pipe == PIPE_B || pipe == PIPE_C))
2750                 intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
2751                                VACTIVE(crtc_vdisplay - 1) |
2752                                VTOTAL(crtc_vtotal - 1));
2753 }
2754
2755 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2756 {
2757         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2758         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2759         int width = drm_rect_width(&crtc_state->pipe_src);
2760         int height = drm_rect_height(&crtc_state->pipe_src);
2761         enum pipe pipe = crtc->pipe;
2762
2763         /* pipesrc controls the size that is scaled from, which should
2764          * always be the user's requested size.
2765          */
2766         intel_de_write(dev_priv, PIPESRC(pipe),
2767                        PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2768 }
2769
2770 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2771 {
2772         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2773         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2774
2775         if (DISPLAY_VER(dev_priv) == 2)
2776                 return false;
2777
2778         if (DISPLAY_VER(dev_priv) >= 9 ||
2779             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2780                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2781         else
2782                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2783 }
2784
2785 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2786                                          struct intel_crtc_state *pipe_config)
2787 {
2788         struct drm_device *dev = crtc->base.dev;
2789         struct drm_i915_private *dev_priv = to_i915(dev);
2790         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2791         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2792         u32 tmp;
2793
2794         tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
2795         adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2796         adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2797
2798         if (!transcoder_is_dsi(cpu_transcoder)) {
2799                 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
2800                 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2801                 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2802         }
2803
2804         tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
2805         adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2806         adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2807
2808         tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
2809         adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2810         adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2811
2812         /* FIXME TGL+ DSI transcoders have this! */
2813         if (!transcoder_is_dsi(cpu_transcoder)) {
2814                 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
2815                 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2816                 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2817         }
2818         tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
2819         adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2820         adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2821
2822         if (intel_pipe_is_interlaced(pipe_config)) {
2823                 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2824                 adjusted_mode->crtc_vtotal += 1;
2825                 adjusted_mode->crtc_vblank_end += 1;
2826         }
2827
2828         if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2829                 adjusted_mode->crtc_vblank_start =
2830                         adjusted_mode->crtc_vdisplay +
2831                         intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
2832 }
2833
2834 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2835 {
2836         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2837         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2838         enum pipe master_pipe, pipe = crtc->pipe;
2839         int width;
2840
2841         if (num_pipes < 2)
2842                 return;
2843
2844         master_pipe = bigjoiner_master_pipe(crtc_state);
2845         width = drm_rect_width(&crtc_state->pipe_src);
2846
2847         drm_rect_translate_to(&crtc_state->pipe_src,
2848                               (pipe - master_pipe) * width, 0);
2849 }
2850
2851 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2852                                     struct intel_crtc_state *pipe_config)
2853 {
2854         struct drm_device *dev = crtc->base.dev;
2855         struct drm_i915_private *dev_priv = to_i915(dev);
2856         u32 tmp;
2857
2858         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
2859
2860         drm_rect_init(&pipe_config->pipe_src, 0, 0,
2861                       REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2862                       REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2863
2864         intel_bigjoiner_adjust_pipe_src(pipe_config);
2865 }
2866
2867 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2868 {
2869         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2870         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2871         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2872         u32 val = 0;
2873
2874         /*
2875          * - We keep both pipes enabled on 830
2876          * - During modeset the pipe is still disabled and must remain so
2877          * - During fastset the pipe is already enabled and must remain so
2878          */
2879         if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2880                 val |= TRANSCONF_ENABLE;
2881
2882         if (crtc_state->double_wide)
2883                 val |= TRANSCONF_DOUBLE_WIDE;
2884
2885         /* only g4x and later have fancy bpc/dither controls */
2886         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2887             IS_CHERRYVIEW(dev_priv)) {
2888                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2889                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2890                         val |= TRANSCONF_DITHER_EN |
2891                                 TRANSCONF_DITHER_TYPE_SP;
2892
2893                 switch (crtc_state->pipe_bpp) {
2894                 default:
2895                         /* Case prevented by intel_choose_pipe_bpp_dither. */
2896                         MISSING_CASE(crtc_state->pipe_bpp);
2897                         fallthrough;
2898                 case 18:
2899                         val |= TRANSCONF_BPC_6;
2900                         break;
2901                 case 24:
2902                         val |= TRANSCONF_BPC_8;
2903                         break;
2904                 case 30:
2905                         val |= TRANSCONF_BPC_10;
2906                         break;
2907                 }
2908         }
2909
2910         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2911                 if (DISPLAY_VER(dev_priv) < 4 ||
2912                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2913                         val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2914                 else
2915                         val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2916         } else {
2917                 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2918         }
2919
2920         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2921              crtc_state->limited_color_range)
2922                 val |= TRANSCONF_COLOR_RANGE_SELECT;
2923
2924         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2925
2926         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2927
2928         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
2929         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
2930 }
2931
2932 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2933 {
2934         if (IS_I830(dev_priv))
2935                 return false;
2936
2937         return DISPLAY_VER(dev_priv) >= 4 ||
2938                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2939 }
2940
2941 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2942 {
2943         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2944         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2945         u32 tmp;
2946
2947         if (!i9xx_has_pfit(dev_priv))
2948                 return;
2949
2950         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
2951         if (!(tmp & PFIT_ENABLE))
2952                 return;
2953
2954         /* Check whether the pfit is attached to our pipe. */
2955         if (DISPLAY_VER(dev_priv) < 4) {
2956                 if (crtc->pipe != PIPE_B)
2957                         return;
2958         } else {
2959                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
2960                         return;
2961         }
2962
2963         crtc_state->gmch_pfit.control = tmp;
2964         crtc_state->gmch_pfit.pgm_ratios =
2965                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
2966 }
2967
2968 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
2969                                struct intel_crtc_state *pipe_config)
2970 {
2971         struct drm_device *dev = crtc->base.dev;
2972         struct drm_i915_private *dev_priv = to_i915(dev);
2973         enum pipe pipe = crtc->pipe;
2974         struct dpll clock;
2975         u32 mdiv;
2976         int refclk = 100000;
2977
2978         /* In case of DSI, DPLL will not be used */
2979         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
2980                 return;
2981
2982         vlv_dpio_get(dev_priv);
2983         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
2984         vlv_dpio_put(dev_priv);
2985
2986         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
2987         clock.m2 = mdiv & DPIO_M2DIV_MASK;
2988         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
2989         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
2990         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
2991
2992         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
2993 }
2994
2995 static void chv_crtc_clock_get(struct intel_crtc *crtc,
2996                                struct intel_crtc_state *pipe_config)
2997 {
2998         struct drm_device *dev = crtc->base.dev;
2999         struct drm_i915_private *dev_priv = to_i915(dev);
3000         enum pipe pipe = crtc->pipe;
3001         enum dpio_channel port = vlv_pipe_to_channel(pipe);
3002         struct dpll clock;
3003         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3004         int refclk = 100000;
3005
3006         /* In case of DSI, DPLL will not be used */
3007         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3008                 return;
3009
3010         vlv_dpio_get(dev_priv);
3011         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3012         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3013         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3014         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3015         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3016         vlv_dpio_put(dev_priv);
3017
3018         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3019         clock.m2 = (pll_dw0 & 0xff) << 22;
3020         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3021                 clock.m2 |= pll_dw2 & 0x3fffff;
3022         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3023         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3024         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3025
3026         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3027 }
3028
3029 static enum intel_output_format
3030 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
3031 {
3032         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3033         u32 tmp;
3034
3035         tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3036
3037         if (tmp & PIPE_MISC_YUV420_ENABLE) {
3038                 /* We support 4:2:0 in full blend mode only */
3039                 drm_WARN_ON(&dev_priv->drm,
3040                             (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
3041
3042                 return INTEL_OUTPUT_FORMAT_YCBCR420;
3043         } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
3044                 return INTEL_OUTPUT_FORMAT_YCBCR444;
3045         } else {
3046                 return INTEL_OUTPUT_FORMAT_RGB;
3047         }
3048 }
3049
3050 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3051 {
3052         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3053         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3054         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3055         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3056         u32 tmp;
3057
3058         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3059
3060         if (tmp & DISP_PIPE_GAMMA_ENABLE)
3061                 crtc_state->gamma_enable = true;
3062
3063         if (!HAS_GMCH(dev_priv) &&
3064             tmp & DISP_PIPE_CSC_ENABLE)
3065                 crtc_state->csc_enable = true;
3066 }
3067
3068 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3069                                  struct intel_crtc_state *pipe_config)
3070 {
3071         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3072         enum intel_display_power_domain power_domain;
3073         intel_wakeref_t wakeref;
3074         u32 tmp;
3075         bool ret;
3076
3077         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3078         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3079         if (!wakeref)
3080                 return false;
3081
3082         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3083         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3084         pipe_config->shared_dpll = NULL;
3085
3086         ret = false;
3087
3088         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3089         if (!(tmp & TRANSCONF_ENABLE))
3090                 goto out;
3091
3092         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3093             IS_CHERRYVIEW(dev_priv)) {
3094                 switch (tmp & TRANSCONF_BPC_MASK) {
3095                 case TRANSCONF_BPC_6:
3096                         pipe_config->pipe_bpp = 18;
3097                         break;
3098                 case TRANSCONF_BPC_8:
3099                         pipe_config->pipe_bpp = 24;
3100                         break;
3101                 case TRANSCONF_BPC_10:
3102                         pipe_config->pipe_bpp = 30;
3103                         break;
3104                 default:
3105                         MISSING_CASE(tmp);
3106                         break;
3107                 }
3108         }
3109
3110         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3111             (tmp & TRANSCONF_COLOR_RANGE_SELECT))
3112                 pipe_config->limited_color_range = true;
3113
3114         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
3115
3116         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3117
3118         if (IS_CHERRYVIEW(dev_priv))
3119                 pipe_config->cgm_mode = intel_de_read(dev_priv,
3120                                                       CGM_PIPE_MODE(crtc->pipe));
3121
3122         i9xx_get_pipe_color_config(pipe_config);
3123         intel_color_get_config(pipe_config);
3124
3125         if (DISPLAY_VER(dev_priv) < 4)
3126                 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
3127
3128         intel_get_transcoder_timings(crtc, pipe_config);
3129         intel_get_pipe_src_size(crtc, pipe_config);
3130
3131         i9xx_get_pfit_config(pipe_config);
3132
3133         if (DISPLAY_VER(dev_priv) >= 4) {
3134                 /* No way to read it out on pipes B and C */
3135                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3136                         tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
3137                 else
3138                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3139                 pipe_config->pixel_multiplier =
3140                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3141                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3142                 pipe_config->dpll_hw_state.dpll_md = tmp;
3143         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3144                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3145                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3146                 pipe_config->pixel_multiplier =
3147                         ((tmp & SDVO_MULTIPLIER_MASK)
3148                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3149         } else {
3150                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3151                  * port and will be fixed up in the encoder->get_config
3152                  * function. */
3153                 pipe_config->pixel_multiplier = 1;
3154         }
3155         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3156                                                         DPLL(crtc->pipe));
3157         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3158                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3159                                                                FP0(crtc->pipe));
3160                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3161                                                                FP1(crtc->pipe));
3162         } else {
3163                 /* Mask out read-only status bits. */
3164                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3165                                                      DPLL_PORTC_READY_MASK |
3166                                                      DPLL_PORTB_READY_MASK);
3167         }
3168
3169         if (IS_CHERRYVIEW(dev_priv))
3170                 chv_crtc_clock_get(crtc, pipe_config);
3171         else if (IS_VALLEYVIEW(dev_priv))
3172                 vlv_crtc_clock_get(crtc, pipe_config);
3173         else
3174                 i9xx_crtc_clock_get(crtc, pipe_config);
3175
3176         /*
3177          * Normally the dotclock is filled in by the encoder .get_config()
3178          * but in case the pipe is enabled w/o any ports we need a sane
3179          * default.
3180          */
3181         pipe_config->hw.adjusted_mode.crtc_clock =
3182                 pipe_config->port_clock / pipe_config->pixel_multiplier;
3183
3184         ret = true;
3185
3186 out:
3187         intel_display_power_put(dev_priv, power_domain, wakeref);
3188
3189         return ret;
3190 }
3191
3192 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3193 {
3194         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3195         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3196         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3197         u32 val = 0;
3198
3199         /*
3200          * - During modeset the pipe is still disabled and must remain so
3201          * - During fastset the pipe is already enabled and must remain so
3202          */
3203         if (!intel_crtc_needs_modeset(crtc_state))
3204                 val |= TRANSCONF_ENABLE;
3205
3206         switch (crtc_state->pipe_bpp) {
3207         default:
3208                 /* Case prevented by intel_choose_pipe_bpp_dither. */
3209                 MISSING_CASE(crtc_state->pipe_bpp);
3210                 fallthrough;
3211         case 18:
3212                 val |= TRANSCONF_BPC_6;
3213                 break;
3214         case 24:
3215                 val |= TRANSCONF_BPC_8;
3216                 break;
3217         case 30:
3218                 val |= TRANSCONF_BPC_10;
3219                 break;
3220         case 36:
3221                 val |= TRANSCONF_BPC_12;
3222                 break;
3223         }
3224
3225         if (crtc_state->dither)
3226                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3227
3228         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3229                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3230         else
3231                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3232
3233         /*
3234          * This would end up with an odd purple hue over
3235          * the entire display. Make sure we don't do it.
3236          */
3237         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3238                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3239
3240         if (crtc_state->limited_color_range &&
3241             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3242                 val |= TRANSCONF_COLOR_RANGE_SELECT;
3243
3244         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3245                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3246
3247         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3248
3249         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3250         val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3251
3252         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3253         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3254 }
3255
3256 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3257 {
3258         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3259         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3260         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3261         u32 val = 0;
3262
3263         /*
3264          * - During modeset the pipe is still disabled and must remain so
3265          * - During fastset the pipe is already enabled and must remain so
3266          */
3267         if (!intel_crtc_needs_modeset(crtc_state))
3268                 val |= TRANSCONF_ENABLE;
3269
3270         if (IS_HASWELL(dev_priv) && crtc_state->dither)
3271                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3272
3273         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3274                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3275         else
3276                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3277
3278         if (IS_HASWELL(dev_priv) &&
3279             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3280                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3281
3282         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3283         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3284 }
3285
3286 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
3287 {
3288         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3289         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3290         u32 val = 0;
3291
3292         switch (crtc_state->pipe_bpp) {
3293         case 18:
3294                 val |= PIPE_MISC_BPC_6;
3295                 break;
3296         case 24:
3297                 val |= PIPE_MISC_BPC_8;
3298                 break;
3299         case 30:
3300                 val |= PIPE_MISC_BPC_10;
3301                 break;
3302         case 36:
3303                 /* Port output 12BPC defined for ADLP+ */
3304                 if (DISPLAY_VER(dev_priv) > 12)
3305                         val |= PIPE_MISC_BPC_12_ADLP;
3306                 break;
3307         default:
3308                 MISSING_CASE(crtc_state->pipe_bpp);
3309                 break;
3310         }
3311
3312         if (crtc_state->dither)
3313                 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
3314
3315         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3316             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3317                 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
3318
3319         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3320                 val |= PIPE_MISC_YUV420_ENABLE |
3321                         PIPE_MISC_YUV420_MODE_FULL_BLEND;
3322
3323         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3324                 val |= PIPE_MISC_HDR_MODE_PRECISION;
3325
3326         if (DISPLAY_VER(dev_priv) >= 12)
3327                 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
3328
3329         intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
3330 }
3331
3332 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
3333 {
3334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3335         u32 tmp;
3336
3337         tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3338
3339         switch (tmp & PIPE_MISC_BPC_MASK) {
3340         case PIPE_MISC_BPC_6:
3341                 return 18;
3342         case PIPE_MISC_BPC_8:
3343                 return 24;
3344         case PIPE_MISC_BPC_10:
3345                 return 30;
3346         /*
3347          * PORT OUTPUT 12 BPC defined for ADLP+.
3348          *
3349          * TODO:
3350          * For previous platforms with DSI interface, bits 5:7
3351          * are used for storing pipe_bpp irrespective of dithering.
3352          * Since the value of 12 BPC is not defined for these bits
3353          * on older platforms, need to find a workaround for 12 BPC
3354          * MIPI DSI HW readout.
3355          */
3356         case PIPE_MISC_BPC_12_ADLP:
3357                 if (DISPLAY_VER(dev_priv) > 12)
3358                         return 36;
3359                 fallthrough;
3360         default:
3361                 MISSING_CASE(tmp);
3362                 return 0;
3363         }
3364 }
3365
3366 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3367 {
3368         /*
3369          * Account for spread spectrum to avoid
3370          * oversubscribing the link. Max center spread
3371          * is 2.5%; use 5% for safety's sake.
3372          */
3373         u32 bps = target_clock * bpp * 21 / 20;
3374         return DIV_ROUND_UP(bps, link_bw * 8);
3375 }
3376
3377 void intel_get_m_n(struct drm_i915_private *i915,
3378                    struct intel_link_m_n *m_n,
3379                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3380                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3381 {
3382         m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3383         m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3384         m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3385         m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3386         m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3387 }
3388
3389 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3390                                     enum transcoder transcoder,
3391                                     struct intel_link_m_n *m_n)
3392 {
3393         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3394         enum pipe pipe = crtc->pipe;
3395
3396         if (DISPLAY_VER(dev_priv) >= 5)
3397                 intel_get_m_n(dev_priv, m_n,
3398                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3399                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3400         else
3401                 intel_get_m_n(dev_priv, m_n,
3402                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3403                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3404 }
3405
3406 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3407                                     enum transcoder transcoder,
3408                                     struct intel_link_m_n *m_n)
3409 {
3410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3411
3412         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3413                 return;
3414
3415         intel_get_m_n(dev_priv, m_n,
3416                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3417                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3418 }
3419
3420 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3421                                   u32 pos, u32 size)
3422 {
3423         drm_rect_init(&crtc_state->pch_pfit.dst,
3424                       pos >> 16, pos & 0xffff,
3425                       size >> 16, size & 0xffff);
3426 }
3427
3428 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3429 {
3430         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3431         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3432         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3433         int id = -1;
3434         int i;
3435
3436         /* find scaler attached to this pipe */
3437         for (i = 0; i < crtc->num_scalers; i++) {
3438                 u32 ctl, pos, size;
3439
3440                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3441                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3442                         continue;
3443
3444                 id = i;
3445                 crtc_state->pch_pfit.enabled = true;
3446
3447                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3448                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3449
3450                 ilk_get_pfit_pos_size(crtc_state, pos, size);
3451
3452                 scaler_state->scalers[i].in_use = true;
3453                 break;
3454         }
3455
3456         scaler_state->scaler_id = id;
3457         if (id >= 0)
3458                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3459         else
3460                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3461 }
3462
3463 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3464 {
3465         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3467         u32 ctl, pos, size;
3468
3469         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3470         if ((ctl & PF_ENABLE) == 0)
3471                 return;
3472
3473         crtc_state->pch_pfit.enabled = true;
3474
3475         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3476         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3477
3478         ilk_get_pfit_pos_size(crtc_state, pos, size);
3479
3480         /*
3481          * We currently do not free assignements of panel fitters on
3482          * ivb/hsw (since we don't use the higher upscaling modes which
3483          * differentiates them) so just WARN about this case for now.
3484          */
3485         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3486                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3487 }
3488
3489 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3490                                 struct intel_crtc_state *pipe_config)
3491 {
3492         struct drm_device *dev = crtc->base.dev;
3493         struct drm_i915_private *dev_priv = to_i915(dev);
3494         enum intel_display_power_domain power_domain;
3495         intel_wakeref_t wakeref;
3496         u32 tmp;
3497         bool ret;
3498
3499         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3500         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3501         if (!wakeref)
3502                 return false;
3503
3504         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3505         pipe_config->shared_dpll = NULL;
3506
3507         ret = false;
3508         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3509         if (!(tmp & TRANSCONF_ENABLE))
3510                 goto out;
3511
3512         switch (tmp & TRANSCONF_BPC_MASK) {
3513         case TRANSCONF_BPC_6:
3514                 pipe_config->pipe_bpp = 18;
3515                 break;
3516         case TRANSCONF_BPC_8:
3517                 pipe_config->pipe_bpp = 24;
3518                 break;
3519         case TRANSCONF_BPC_10:
3520                 pipe_config->pipe_bpp = 30;
3521                 break;
3522         case TRANSCONF_BPC_12:
3523                 pipe_config->pipe_bpp = 36;
3524                 break;
3525         default:
3526                 break;
3527         }
3528
3529         if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3530                 pipe_config->limited_color_range = true;
3531
3532         switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3533         case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3534         case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3535                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3536                 break;
3537         default:
3538                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3539                 break;
3540         }
3541
3542         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3543
3544         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3545
3546         pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3547
3548         pipe_config->csc_mode = intel_de_read(dev_priv,
3549                                               PIPE_CSC_MODE(crtc->pipe));
3550
3551         i9xx_get_pipe_color_config(pipe_config);
3552         intel_color_get_config(pipe_config);
3553
3554         pipe_config->pixel_multiplier = 1;
3555
3556         ilk_pch_get_config(pipe_config);
3557
3558         intel_get_transcoder_timings(crtc, pipe_config);
3559         intel_get_pipe_src_size(crtc, pipe_config);
3560
3561         ilk_get_pfit_config(pipe_config);
3562
3563         ret = true;
3564
3565 out:
3566         intel_display_power_put(dev_priv, power_domain, wakeref);
3567
3568         return ret;
3569 }
3570
3571 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3572 {
3573         u8 pipes;
3574
3575         if (DISPLAY_VER(i915) >= 12)
3576                 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3577         else if (DISPLAY_VER(i915) >= 11)
3578                 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3579         else
3580                 pipes = 0;
3581
3582         return pipes & RUNTIME_INFO(i915)->pipe_mask;
3583 }
3584
3585 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3586                                            enum transcoder cpu_transcoder)
3587 {
3588         enum intel_display_power_domain power_domain;
3589         intel_wakeref_t wakeref;
3590         u32 tmp = 0;
3591
3592         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3593
3594         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3595                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3596
3597         return tmp & TRANS_DDI_FUNC_ENABLE;
3598 }
3599
3600 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3601                                     u8 *master_pipes, u8 *slave_pipes)
3602 {
3603         struct intel_crtc *crtc;
3604
3605         *master_pipes = 0;
3606         *slave_pipes = 0;
3607
3608         for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3609                                          bigjoiner_pipes(dev_priv)) {
3610                 enum intel_display_power_domain power_domain;
3611                 enum pipe pipe = crtc->pipe;
3612                 intel_wakeref_t wakeref;
3613
3614                 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3615                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3616                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3617
3618                         if (!(tmp & BIG_JOINER_ENABLE))
3619                                 continue;
3620
3621                         if (tmp & MASTER_BIG_JOINER_ENABLE)
3622                                 *master_pipes |= BIT(pipe);
3623                         else
3624                                 *slave_pipes |= BIT(pipe);
3625                 }
3626
3627                 if (DISPLAY_VER(dev_priv) < 13)
3628                         continue;
3629
3630                 power_domain = POWER_DOMAIN_PIPE(pipe);
3631                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3632                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3633
3634                         if (tmp & UNCOMPRESSED_JOINER_MASTER)
3635                                 *master_pipes |= BIT(pipe);
3636                         if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3637                                 *slave_pipes |= BIT(pipe);
3638                 }
3639         }
3640
3641         /* Bigjoiner pipes should always be consecutive master and slave */
3642         drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3643                  "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3644                  *master_pipes, *slave_pipes);
3645 }
3646
3647 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3648 {
3649         if ((slave_pipes & BIT(pipe)) == 0)
3650                 return pipe;
3651
3652         /* ignore everything above our pipe */
3653         master_pipes &= ~GENMASK(7, pipe);
3654
3655         /* highest remaining bit should be our master pipe */
3656         return fls(master_pipes) - 1;
3657 }
3658
3659 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3660 {
3661         enum pipe master_pipe, next_master_pipe;
3662
3663         master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3664
3665         if ((master_pipes & BIT(master_pipe)) == 0)
3666                 return 0;
3667
3668         /* ignore our master pipe and everything below it */
3669         master_pipes &= ~GENMASK(master_pipe, 0);
3670         /* make sure a high bit is set for the ffs() */
3671         master_pipes |= BIT(7);
3672         /* lowest remaining bit should be the next master pipe */
3673         next_master_pipe = ffs(master_pipes) - 1;
3674
3675         return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3676 }
3677
3678 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3679 {
3680         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3681
3682         if (DISPLAY_VER(i915) >= 11)
3683                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3684
3685         return panel_transcoder_mask;
3686 }
3687
3688 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3689 {
3690         struct drm_device *dev = crtc->base.dev;
3691         struct drm_i915_private *dev_priv = to_i915(dev);
3692         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3693         enum transcoder cpu_transcoder;
3694         u8 master_pipes, slave_pipes;
3695         u8 enabled_transcoders = 0;
3696
3697         /*
3698          * XXX: Do intel_display_power_get_if_enabled before reading this (for
3699          * consistency and less surprising code; it's in always on power).
3700          */
3701         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3702                                        panel_transcoder_mask) {
3703                 enum intel_display_power_domain power_domain;
3704                 intel_wakeref_t wakeref;
3705                 enum pipe trans_pipe;
3706                 u32 tmp = 0;
3707
3708                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3709                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3710                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3711
3712                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3713                         continue;
3714
3715                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3716                 default:
3717                         drm_WARN(dev, 1,
3718                                  "unknown pipe linked to transcoder %s\n",
3719                                  transcoder_name(cpu_transcoder));
3720                         fallthrough;
3721                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3722                 case TRANS_DDI_EDP_INPUT_A_ON:
3723                         trans_pipe = PIPE_A;
3724                         break;
3725                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3726                         trans_pipe = PIPE_B;
3727                         break;
3728                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3729                         trans_pipe = PIPE_C;
3730                         break;
3731                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3732                         trans_pipe = PIPE_D;
3733                         break;
3734                 }
3735
3736                 if (trans_pipe == crtc->pipe)
3737                         enabled_transcoders |= BIT(cpu_transcoder);
3738         }
3739
3740         /* single pipe or bigjoiner master */
3741         cpu_transcoder = (enum transcoder) crtc->pipe;
3742         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3743                 enabled_transcoders |= BIT(cpu_transcoder);
3744
3745         /* bigjoiner slave -> consider the master pipe's transcoder as well */
3746         enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3747         if (slave_pipes & BIT(crtc->pipe)) {
3748                 cpu_transcoder = (enum transcoder)
3749                         get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3750                 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3751                         enabled_transcoders |= BIT(cpu_transcoder);
3752         }
3753
3754         return enabled_transcoders;
3755 }
3756
3757 static bool has_edp_transcoders(u8 enabled_transcoders)
3758 {
3759         return enabled_transcoders & BIT(TRANSCODER_EDP);
3760 }
3761
3762 static bool has_dsi_transcoders(u8 enabled_transcoders)
3763 {
3764         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3765                                       BIT(TRANSCODER_DSI_1));
3766 }
3767
3768 static bool has_pipe_transcoders(u8 enabled_transcoders)
3769 {
3770         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3771                                        BIT(TRANSCODER_DSI_0) |
3772                                        BIT(TRANSCODER_DSI_1));
3773 }
3774
3775 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3776                                        u8 enabled_transcoders)
3777 {
3778         /* Only one type of transcoder please */
3779         drm_WARN_ON(&i915->drm,
3780                     has_edp_transcoders(enabled_transcoders) +
3781                     has_dsi_transcoders(enabled_transcoders) +
3782                     has_pipe_transcoders(enabled_transcoders) > 1);
3783
3784         /* Only DSI transcoders can be ganged */
3785         drm_WARN_ON(&i915->drm,
3786                     !has_dsi_transcoders(enabled_transcoders) &&
3787                     !is_power_of_2(enabled_transcoders));
3788 }
3789
3790 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3791                                      struct intel_crtc_state *pipe_config,
3792                                      struct intel_display_power_domain_set *power_domain_set)
3793 {
3794         struct drm_device *dev = crtc->base.dev;
3795         struct drm_i915_private *dev_priv = to_i915(dev);
3796         unsigned long enabled_transcoders;
3797         u32 tmp;
3798
3799         enabled_transcoders = hsw_enabled_transcoders(crtc);
3800         if (!enabled_transcoders)
3801                 return false;
3802
3803         assert_enabled_transcoders(dev_priv, enabled_transcoders);
3804
3805         /*
3806          * With the exception of DSI we should only ever have
3807          * a single enabled transcoder. With DSI let's just
3808          * pick the first one.
3809          */
3810         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3811
3812         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3813                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3814                 return false;
3815
3816         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3817                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3818
3819                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3820                         pipe_config->pch_pfit.force_thru = true;
3821         }
3822
3823         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3824
3825         return tmp & TRANSCONF_ENABLE;
3826 }
3827
3828 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3829                                          struct intel_crtc_state *pipe_config,
3830                                          struct intel_display_power_domain_set *power_domain_set)
3831 {
3832         struct drm_device *dev = crtc->base.dev;
3833         struct drm_i915_private *dev_priv = to_i915(dev);
3834         enum transcoder cpu_transcoder;
3835         enum port port;
3836         u32 tmp;
3837
3838         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3839                 if (port == PORT_A)
3840                         cpu_transcoder = TRANSCODER_DSI_A;
3841                 else
3842                         cpu_transcoder = TRANSCODER_DSI_C;
3843
3844                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3845                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3846                         continue;
3847
3848                 /*
3849                  * The PLL needs to be enabled with a valid divider
3850                  * configuration, otherwise accessing DSI registers will hang
3851                  * the machine. See BSpec North Display Engine
3852                  * registers/MIPI[BXT]. We can break out here early, since we
3853                  * need the same DSI PLL to be enabled for both DSI ports.
3854                  */
3855                 if (!bxt_dsi_pll_is_enabled(dev_priv))
3856                         break;
3857
3858                 /* XXX: this works for video mode only */
3859                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
3860                 if (!(tmp & DPI_ENABLE))
3861                         continue;
3862
3863                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
3864                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3865                         continue;
3866
3867                 pipe_config->cpu_transcoder = cpu_transcoder;
3868                 break;
3869         }
3870
3871         return transcoder_is_dsi(pipe_config->cpu_transcoder);
3872 }
3873
3874 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
3875 {
3876         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3877         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3878         u8 master_pipes, slave_pipes;
3879         enum pipe pipe = crtc->pipe;
3880
3881         enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
3882
3883         if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
3884                 return;
3885
3886         crtc_state->bigjoiner_pipes =
3887                 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
3888                 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
3889 }
3890
3891 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3892                                 struct intel_crtc_state *pipe_config)
3893 {
3894         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3895         bool active;
3896         u32 tmp;
3897
3898         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3899                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
3900                 return false;
3901
3902         pipe_config->shared_dpll = NULL;
3903
3904         active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3905
3906         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3907             bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3908                 drm_WARN_ON(&dev_priv->drm, active);
3909                 active = true;
3910         }
3911
3912         if (!active)
3913                 goto out;
3914
3915         intel_dsc_get_config(pipe_config);
3916         intel_bigjoiner_get_config(pipe_config);
3917
3918         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3919             DISPLAY_VER(dev_priv) >= 11)
3920                 intel_get_transcoder_timings(crtc, pipe_config);
3921
3922         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3923                 intel_vrr_get_config(pipe_config);
3924
3925         intel_get_pipe_src_size(crtc, pipe_config);
3926
3927         if (IS_HASWELL(dev_priv)) {
3928                 u32 tmp = intel_de_read(dev_priv,
3929                                         TRANSCONF(pipe_config->cpu_transcoder));
3930
3931                 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3932                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3933                 else
3934                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3935         } else {
3936                 pipe_config->output_format =
3937                         bdw_get_pipe_misc_output_format(crtc);
3938         }
3939
3940         pipe_config->gamma_mode = intel_de_read(dev_priv,
3941                                                 GAMMA_MODE(crtc->pipe));
3942
3943         pipe_config->csc_mode = intel_de_read(dev_priv,
3944                                               PIPE_CSC_MODE(crtc->pipe));
3945
3946         if (DISPLAY_VER(dev_priv) >= 9) {
3947                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
3948
3949                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
3950                         pipe_config->gamma_enable = true;
3951
3952                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
3953                         pipe_config->csc_enable = true;
3954         } else {
3955                 i9xx_get_pipe_color_config(pipe_config);
3956         }
3957
3958         intel_color_get_config(pipe_config);
3959
3960         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
3961         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
3962         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3963                 pipe_config->ips_linetime =
3964                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
3965
3966         if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3967                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
3968                 if (DISPLAY_VER(dev_priv) >= 9)
3969                         skl_get_pfit_config(pipe_config);
3970                 else
3971                         ilk_get_pfit_config(pipe_config);
3972         }
3973
3974         hsw_ips_get_config(pipe_config);
3975
3976         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
3977             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3978                 pipe_config->pixel_multiplier =
3979                         intel_de_read(dev_priv,
3980                                       TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
3981         } else {
3982                 pipe_config->pixel_multiplier = 1;
3983         }
3984
3985         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3986                 tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
3987                                     MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
3988                                     CHICKEN_TRANS(pipe_config->cpu_transcoder));
3989
3990                 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
3991         } else {
3992                 /* no idea if this is correct */
3993                 pipe_config->framestart_delay = 1;
3994         }
3995
3996 out:
3997         intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
3998
3999         return active;
4000 }
4001
4002 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4003 {
4004         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4005         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4006
4007         if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
4008                 return false;
4009
4010         crtc_state->hw.active = true;
4011
4012         intel_crtc_readout_derived_state(crtc_state);
4013
4014         return true;
4015 }
4016
4017 /* VESA 640x480x72Hz mode to set on the pipe */
4018 static const struct drm_display_mode load_detect_mode = {
4019         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4020                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4021 };
4022
4023 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4024                                         struct drm_crtc *crtc)
4025 {
4026         struct drm_plane *plane;
4027         struct drm_plane_state *plane_state;
4028         int ret, i;
4029
4030         ret = drm_atomic_add_affected_planes(state, crtc);
4031         if (ret)
4032                 return ret;
4033
4034         for_each_new_plane_in_state(state, plane, plane_state, i) {
4035                 if (plane_state->crtc != crtc)
4036                         continue;
4037
4038                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4039                 if (ret)
4040                         return ret;
4041
4042                 drm_atomic_set_fb_for_plane(plane_state, NULL);
4043         }
4044
4045         return 0;
4046 }
4047
4048 int intel_get_load_detect_pipe(struct drm_connector *connector,
4049                                struct intel_load_detect_pipe *old,
4050                                struct drm_modeset_acquire_ctx *ctx)
4051 {
4052         struct intel_encoder *encoder =
4053                 intel_attached_encoder(to_intel_connector(connector));
4054         struct intel_crtc *possible_crtc;
4055         struct intel_crtc *crtc = NULL;
4056         struct drm_device *dev = encoder->base.dev;
4057         struct drm_i915_private *dev_priv = to_i915(dev);
4058         struct drm_mode_config *config = &dev->mode_config;
4059         struct drm_atomic_state *state = NULL, *restore_state = NULL;
4060         struct drm_connector_state *connector_state;
4061         struct intel_crtc_state *crtc_state;
4062         int ret;
4063
4064         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4065                     connector->base.id, connector->name,
4066                     encoder->base.base.id, encoder->base.name);
4067
4068         old->restore_state = NULL;
4069
4070         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4071
4072         /*
4073          * Algorithm gets a little messy:
4074          *
4075          *   - if the connector already has an assigned crtc, use it (but make
4076          *     sure it's on first)
4077          *
4078          *   - try to find the first unused crtc that can drive this connector,
4079          *     and use that if we find one
4080          */
4081
4082         /* See if we already have a CRTC for this connector */
4083         if (connector->state->crtc) {
4084                 crtc = to_intel_crtc(connector->state->crtc);
4085
4086                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4087                 if (ret)
4088                         goto fail;
4089
4090                 /* Make sure the crtc and connector are running */
4091                 goto found;
4092         }
4093
4094         /* Find an unused one (if possible) */
4095         for_each_intel_crtc(dev, possible_crtc) {
4096                 if (!(encoder->base.possible_crtcs &
4097                       drm_crtc_mask(&possible_crtc->base)))
4098                         continue;
4099
4100                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4101                 if (ret)
4102                         goto fail;
4103
4104                 if (possible_crtc->base.state->enable) {
4105                         drm_modeset_unlock(&possible_crtc->base.mutex);
4106                         continue;
4107                 }
4108
4109                 crtc = possible_crtc;
4110                 break;
4111         }
4112
4113         /*
4114          * If we didn't find an unused CRTC, don't use any.
4115          */
4116         if (!crtc) {
4117                 drm_dbg_kms(&dev_priv->drm,
4118                             "no pipe available for load-detect\n");
4119                 ret = -ENODEV;
4120                 goto fail;
4121         }
4122
4123 found:
4124         state = drm_atomic_state_alloc(dev);
4125         restore_state = drm_atomic_state_alloc(dev);
4126         if (!state || !restore_state) {
4127                 ret = -ENOMEM;
4128                 goto fail;
4129         }
4130
4131         state->acquire_ctx = ctx;
4132         to_intel_atomic_state(state)->internal = true;
4133
4134         restore_state->acquire_ctx = ctx;
4135         to_intel_atomic_state(restore_state)->internal = true;
4136
4137         connector_state = drm_atomic_get_connector_state(state, connector);
4138         if (IS_ERR(connector_state)) {
4139                 ret = PTR_ERR(connector_state);
4140                 goto fail;
4141         }
4142
4143         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4144         if (ret)
4145                 goto fail;
4146
4147         crtc_state = intel_atomic_get_crtc_state(state, crtc);
4148         if (IS_ERR(crtc_state)) {
4149                 ret = PTR_ERR(crtc_state);
4150                 goto fail;
4151         }
4152
4153         crtc_state->uapi.active = true;
4154
4155         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4156                                            &load_detect_mode);
4157         if (ret)
4158                 goto fail;
4159
4160         ret = intel_modeset_disable_planes(state, &crtc->base);
4161         if (ret)
4162                 goto fail;
4163
4164         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4165         if (!ret)
4166                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4167         if (!ret)
4168                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4169         if (ret) {
4170                 drm_dbg_kms(&dev_priv->drm,
4171                             "Failed to create a copy of old state to restore: %i\n",
4172                             ret);
4173                 goto fail;
4174         }
4175
4176         ret = drm_atomic_commit(state);
4177         if (ret) {
4178                 drm_dbg_kms(&dev_priv->drm,
4179                             "failed to set mode on load-detect pipe\n");
4180                 goto fail;
4181         }
4182
4183         old->restore_state = restore_state;
4184         drm_atomic_state_put(state);
4185
4186         /* let the connector get through one full cycle before testing */
4187         intel_crtc_wait_for_next_vblank(crtc);
4188
4189         return true;
4190
4191 fail:
4192         if (state) {
4193                 drm_atomic_state_put(state);
4194                 state = NULL;
4195         }
4196         if (restore_state) {
4197                 drm_atomic_state_put(restore_state);
4198                 restore_state = NULL;
4199         }
4200
4201         if (ret == -EDEADLK)
4202                 return ret;
4203
4204         return false;
4205 }
4206
4207 void intel_release_load_detect_pipe(struct drm_connector *connector,
4208                                     struct intel_load_detect_pipe *old,
4209                                     struct drm_modeset_acquire_ctx *ctx)
4210 {
4211         struct intel_encoder *intel_encoder =
4212                 intel_attached_encoder(to_intel_connector(connector));
4213         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4214         struct drm_encoder *encoder = &intel_encoder->base;
4215         struct drm_atomic_state *state = old->restore_state;
4216         int ret;
4217
4218         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4219                     connector->base.id, connector->name,
4220                     encoder->base.id, encoder->name);
4221
4222         if (!state)
4223                 return;
4224
4225         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4226         if (ret)
4227                 drm_dbg_kms(&i915->drm,
4228                             "Couldn't release load detect pipe: %i\n", ret);
4229         drm_atomic_state_put(state);
4230 }
4231
4232 static int i9xx_pll_refclk(struct drm_device *dev,
4233                            const struct intel_crtc_state *pipe_config)
4234 {
4235         struct drm_i915_private *dev_priv = to_i915(dev);
4236         u32 dpll = pipe_config->dpll_hw_state.dpll;
4237
4238         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4239                 return dev_priv->display.vbt.lvds_ssc_freq;
4240         else if (HAS_PCH_SPLIT(dev_priv))
4241                 return 120000;
4242         else if (DISPLAY_VER(dev_priv) != 2)
4243                 return 96000;
4244         else
4245                 return 48000;
4246 }
4247
4248 /* Returns the clock of the currently programmed mode of the given pipe. */
4249 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4250                          struct intel_crtc_state *pipe_config)
4251 {
4252         struct drm_device *dev = crtc->base.dev;
4253         struct drm_i915_private *dev_priv = to_i915(dev);
4254         u32 dpll = pipe_config->dpll_hw_state.dpll;
4255         u32 fp;
4256         struct dpll clock;
4257         int port_clock;
4258         int refclk = i9xx_pll_refclk(dev, pipe_config);
4259
4260         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4261                 fp = pipe_config->dpll_hw_state.fp0;
4262         else
4263                 fp = pipe_config->dpll_hw_state.fp1;
4264
4265         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4266         if (IS_PINEVIEW(dev_priv)) {
4267                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4268                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4269         } else {
4270                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4271                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4272         }
4273
4274         if (DISPLAY_VER(dev_priv) != 2) {
4275                 if (IS_PINEVIEW(dev_priv))
4276                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4277                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4278                 else
4279                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4280                                DPLL_FPA01_P1_POST_DIV_SHIFT);
4281
4282                 switch (dpll & DPLL_MODE_MASK) {
4283                 case DPLLB_MODE_DAC_SERIAL:
4284                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4285                                 5 : 10;
4286                         break;
4287                 case DPLLB_MODE_LVDS:
4288                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4289                                 7 : 14;
4290                         break;
4291                 default:
4292                         drm_dbg_kms(&dev_priv->drm,
4293                                     "Unknown DPLL mode %08x in programmed "
4294                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
4295                         return;
4296                 }
4297
4298                 if (IS_PINEVIEW(dev_priv))
4299                         port_clock = pnv_calc_dpll_params(refclk, &clock);
4300                 else
4301                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
4302         } else {
4303                 enum pipe lvds_pipe;
4304
4305                 if (IS_I85X(dev_priv) &&
4306                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4307                     lvds_pipe == crtc->pipe) {
4308                         u32 lvds = intel_de_read(dev_priv, LVDS);
4309
4310                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4311                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
4312
4313                         if (lvds & LVDS_CLKB_POWER_UP)
4314                                 clock.p2 = 7;
4315                         else
4316                                 clock.p2 = 14;
4317                 } else {
4318                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
4319                                 clock.p1 = 2;
4320                         else {
4321                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4322                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4323                         }
4324                         if (dpll & PLL_P2_DIVIDE_BY_4)
4325                                 clock.p2 = 4;
4326                         else
4327                                 clock.p2 = 2;
4328                 }
4329
4330                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4331         }
4332
4333         /*
4334          * This value includes pixel_multiplier. We will use
4335          * port_clock to compute adjusted_mode.crtc_clock in the
4336          * encoder's get_config() function.
4337          */
4338         pipe_config->port_clock = port_clock;
4339 }
4340
4341 int intel_dotclock_calculate(int link_freq,
4342                              const struct intel_link_m_n *m_n)
4343 {
4344         /*
4345          * The calculation for the data clock is:
4346          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4347          * But we want to avoid losing precison if possible, so:
4348          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4349          *
4350          * and the link clock is simpler:
4351          * link_clock = (m * link_clock) / n
4352          */
4353
4354         if (!m_n->link_n)
4355                 return 0;
4356
4357         return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4358                                 m_n->link_n);
4359 }
4360
4361 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4362 {
4363         int dotclock;
4364
4365         if (intel_crtc_has_dp_encoder(pipe_config))
4366                 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4367                                                     &pipe_config->dp_m_n);
4368         else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4369                 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4370                                              pipe_config->pipe_bpp);
4371         else
4372                 dotclock = pipe_config->port_clock;
4373
4374         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4375             !intel_crtc_has_dp_encoder(pipe_config))
4376                 dotclock *= 2;
4377
4378         if (pipe_config->pixel_multiplier)
4379                 dotclock /= pipe_config->pixel_multiplier;
4380
4381         return dotclock;
4382 }
4383
4384 /* Returns the currently programmed mode of the given encoder. */
4385 struct drm_display_mode *
4386 intel_encoder_current_mode(struct intel_encoder *encoder)
4387 {
4388         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4389         struct intel_crtc_state *crtc_state;
4390         struct drm_display_mode *mode;
4391         struct intel_crtc *crtc;
4392         enum pipe pipe;
4393
4394         if (!encoder->get_hw_state(encoder, &pipe))
4395                 return NULL;
4396
4397         crtc = intel_crtc_for_pipe(dev_priv, pipe);
4398
4399         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4400         if (!mode)
4401                 return NULL;
4402
4403         crtc_state = intel_crtc_state_alloc(crtc);
4404         if (!crtc_state) {
4405                 kfree(mode);
4406                 return NULL;
4407         }
4408
4409         if (!intel_crtc_get_pipe_config(crtc_state)) {
4410                 kfree(crtc_state);
4411                 kfree(mode);
4412                 return NULL;
4413         }
4414
4415         intel_encoder_get_config(encoder, crtc_state);
4416
4417         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4418
4419         kfree(crtc_state);
4420
4421         return mode;
4422 }
4423
4424 static bool encoders_cloneable(const struct intel_encoder *a,
4425                                const struct intel_encoder *b)
4426 {
4427         /* masks could be asymmetric, so check both ways */
4428         return a == b || (a->cloneable & BIT(b->type) &&
4429                           b->cloneable & BIT(a->type));
4430 }
4431
4432 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4433                                          struct intel_crtc *crtc,
4434                                          struct intel_encoder *encoder)
4435 {
4436         struct intel_encoder *source_encoder;
4437         struct drm_connector *connector;
4438         struct drm_connector_state *connector_state;
4439         int i;
4440
4441         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4442                 if (connector_state->crtc != &crtc->base)
4443                         continue;
4444
4445                 source_encoder =
4446                         to_intel_encoder(connector_state->best_encoder);
4447                 if (!encoders_cloneable(encoder, source_encoder))
4448                         return false;
4449         }
4450
4451         return true;
4452 }
4453
4454 static int icl_add_linked_planes(struct intel_atomic_state *state)
4455 {
4456         struct intel_plane *plane, *linked;
4457         struct intel_plane_state *plane_state, *linked_plane_state;
4458         int i;
4459
4460         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4461                 linked = plane_state->planar_linked_plane;
4462
4463                 if (!linked)
4464                         continue;
4465
4466                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4467                 if (IS_ERR(linked_plane_state))
4468                         return PTR_ERR(linked_plane_state);
4469
4470                 drm_WARN_ON(state->base.dev,
4471                             linked_plane_state->planar_linked_plane != plane);
4472                 drm_WARN_ON(state->base.dev,
4473                             linked_plane_state->planar_slave == plane_state->planar_slave);
4474         }
4475
4476         return 0;
4477 }
4478
4479 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4480 {
4481         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4483         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4484         struct intel_plane *plane, *linked;
4485         struct intel_plane_state *plane_state;
4486         int i;
4487
4488         if (DISPLAY_VER(dev_priv) < 11)
4489                 return 0;
4490
4491         /*
4492          * Destroy all old plane links and make the slave plane invisible
4493          * in the crtc_state->active_planes mask.
4494          */
4495         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4496                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4497                         continue;
4498
4499                 plane_state->planar_linked_plane = NULL;
4500                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4501                         crtc_state->enabled_planes &= ~BIT(plane->id);
4502                         crtc_state->active_planes &= ~BIT(plane->id);
4503                         crtc_state->update_planes |= BIT(plane->id);
4504                         crtc_state->data_rate[plane->id] = 0;
4505                         crtc_state->rel_data_rate[plane->id] = 0;
4506                 }
4507
4508                 plane_state->planar_slave = false;
4509         }
4510
4511         if (!crtc_state->nv12_planes)
4512                 return 0;
4513
4514         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4515                 struct intel_plane_state *linked_state = NULL;
4516
4517                 if (plane->pipe != crtc->pipe ||
4518                     !(crtc_state->nv12_planes & BIT(plane->id)))
4519                         continue;
4520
4521                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4522                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4523                                 continue;
4524
4525                         if (crtc_state->active_planes & BIT(linked->id))
4526                                 continue;
4527
4528                         linked_state = intel_atomic_get_plane_state(state, linked);
4529                         if (IS_ERR(linked_state))
4530                                 return PTR_ERR(linked_state);
4531
4532                         break;
4533                 }
4534
4535                 if (!linked_state) {
4536                         drm_dbg_kms(&dev_priv->drm,
4537                                     "Need %d free Y planes for planar YUV\n",
4538                                     hweight8(crtc_state->nv12_planes));
4539
4540                         return -EINVAL;
4541                 }
4542
4543                 plane_state->planar_linked_plane = linked;
4544
4545                 linked_state->planar_slave = true;
4546                 linked_state->planar_linked_plane = plane;
4547                 crtc_state->enabled_planes |= BIT(linked->id);
4548                 crtc_state->active_planes |= BIT(linked->id);
4549                 crtc_state->update_planes |= BIT(linked->id);
4550                 crtc_state->data_rate[linked->id] =
4551                         crtc_state->data_rate_y[plane->id];
4552                 crtc_state->rel_data_rate[linked->id] =
4553                         crtc_state->rel_data_rate_y[plane->id];
4554                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4555                             linked->base.name, plane->base.name);
4556
4557                 /* Copy parameters to slave plane */
4558                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4559                 linked_state->color_ctl = plane_state->color_ctl;
4560                 linked_state->view = plane_state->view;
4561                 linked_state->decrypt = plane_state->decrypt;
4562
4563                 intel_plane_copy_hw_state(linked_state, plane_state);
4564                 linked_state->uapi.src = plane_state->uapi.src;
4565                 linked_state->uapi.dst = plane_state->uapi.dst;
4566
4567                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4568                         if (linked->id == PLANE_SPRITE5)
4569                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4570                         else if (linked->id == PLANE_SPRITE4)
4571                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4572                         else if (linked->id == PLANE_SPRITE3)
4573                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4574                         else if (linked->id == PLANE_SPRITE2)
4575                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4576                         else
4577                                 MISSING_CASE(linked->id);
4578                 }
4579         }
4580
4581         return 0;
4582 }
4583
4584 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4585 {
4586         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4587         struct intel_atomic_state *state =
4588                 to_intel_atomic_state(new_crtc_state->uapi.state);
4589         const struct intel_crtc_state *old_crtc_state =
4590                 intel_atomic_get_old_crtc_state(state, crtc);
4591
4592         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4593 }
4594
4595 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4596 {
4597         const struct drm_display_mode *pipe_mode =
4598                 &crtc_state->hw.pipe_mode;
4599         int linetime_wm;
4600
4601         if (!crtc_state->hw.enable)
4602                 return 0;
4603
4604         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4605                                         pipe_mode->crtc_clock);
4606
4607         return min(linetime_wm, 0x1ff);
4608 }
4609
4610 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4611                                const struct intel_cdclk_state *cdclk_state)
4612 {
4613         const struct drm_display_mode *pipe_mode =
4614                 &crtc_state->hw.pipe_mode;
4615         int linetime_wm;
4616
4617         if (!crtc_state->hw.enable)
4618                 return 0;
4619
4620         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4621                                         cdclk_state->logical.cdclk);
4622
4623         return min(linetime_wm, 0x1ff);
4624 }
4625
4626 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4627 {
4628         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4629         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4630         const struct drm_display_mode *pipe_mode =
4631                 &crtc_state->hw.pipe_mode;
4632         int linetime_wm;
4633
4634         if (!crtc_state->hw.enable)
4635                 return 0;
4636
4637         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4638                                    crtc_state->pixel_rate);
4639
4640         /* Display WA #1135: BXT:ALL GLK:ALL */
4641         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4642             skl_watermark_ipc_enabled(dev_priv))
4643                 linetime_wm /= 2;
4644
4645         return min(linetime_wm, 0x1ff);
4646 }
4647
4648 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4649                                    struct intel_crtc *crtc)
4650 {
4651         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4652         struct intel_crtc_state *crtc_state =
4653                 intel_atomic_get_new_crtc_state(state, crtc);
4654         const struct intel_cdclk_state *cdclk_state;
4655
4656         if (DISPLAY_VER(dev_priv) >= 9)
4657                 crtc_state->linetime = skl_linetime_wm(crtc_state);
4658         else
4659                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4660
4661         if (!hsw_crtc_supports_ips(crtc))
4662                 return 0;
4663
4664         cdclk_state = intel_atomic_get_cdclk_state(state);
4665         if (IS_ERR(cdclk_state))
4666                 return PTR_ERR(cdclk_state);
4667
4668         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4669                                                        cdclk_state);
4670
4671         return 0;
4672 }
4673
4674 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4675                                    struct intel_crtc *crtc)
4676 {
4677         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4678         struct intel_crtc_state *crtc_state =
4679                 intel_atomic_get_new_crtc_state(state, crtc);
4680         int ret;
4681
4682         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4683             intel_crtc_needs_modeset(crtc_state) &&
4684             !crtc_state->hw.active)
4685                 crtc_state->update_wm_post = true;
4686
4687         if (intel_crtc_needs_modeset(crtc_state)) {
4688                 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4689                 if (ret)
4690                         return ret;
4691         }
4692
4693         /*
4694          * May need to update pipe gamma enable bits
4695          * when C8 planes are getting enabled/disabled.
4696          */
4697         if (c8_planes_changed(crtc_state))
4698                 crtc_state->uapi.color_mgmt_changed = true;
4699
4700         if (intel_crtc_needs_color_update(crtc_state)) {
4701                 ret = intel_color_check(crtc_state);
4702                 if (ret)
4703                         return ret;
4704         }
4705
4706         ret = intel_compute_pipe_wm(state, crtc);
4707         if (ret) {
4708                 drm_dbg_kms(&dev_priv->drm,
4709                             "Target pipe watermarks are invalid\n");
4710                 return ret;
4711         }
4712
4713         /*
4714          * Calculate 'intermediate' watermarks that satisfy both the
4715          * old state and the new state.  We can program these
4716          * immediately.
4717          */
4718         ret = intel_compute_intermediate_wm(state, crtc);
4719         if (ret) {
4720                 drm_dbg_kms(&dev_priv->drm,
4721                             "No valid intermediate pipe watermarks are possible\n");
4722                 return ret;
4723         }
4724
4725         if (DISPLAY_VER(dev_priv) >= 9) {
4726                 if (intel_crtc_needs_modeset(crtc_state) ||
4727                     intel_crtc_needs_fastset(crtc_state)) {
4728                         ret = skl_update_scaler_crtc(crtc_state);
4729                         if (ret)
4730                                 return ret;
4731                 }
4732
4733                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4734                 if (ret)
4735                         return ret;
4736         }
4737
4738         if (HAS_IPS(dev_priv)) {
4739                 ret = hsw_ips_compute_config(state, crtc);
4740                 if (ret)
4741                         return ret;
4742         }
4743
4744         if (DISPLAY_VER(dev_priv) >= 9 ||
4745             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4746                 ret = hsw_compute_linetime_wm(state, crtc);
4747                 if (ret)
4748                         return ret;
4749
4750         }
4751
4752         ret = intel_psr2_sel_fetch_update(state, crtc);
4753         if (ret)
4754                 return ret;
4755
4756         return 0;
4757 }
4758
4759 static int
4760 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4761                       struct intel_crtc_state *crtc_state)
4762 {
4763         struct drm_connector *connector = conn_state->connector;
4764         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4765         const struct drm_display_info *info = &connector->display_info;
4766         int bpp;
4767
4768         switch (conn_state->max_bpc) {
4769         case 6 ... 7:
4770                 bpp = 6 * 3;
4771                 break;
4772         case 8 ... 9:
4773                 bpp = 8 * 3;
4774                 break;
4775         case 10 ... 11:
4776                 bpp = 10 * 3;
4777                 break;
4778         case 12 ... 16:
4779                 bpp = 12 * 3;
4780                 break;
4781         default:
4782                 MISSING_CASE(conn_state->max_bpc);
4783                 return -EINVAL;
4784         }
4785
4786         if (bpp < crtc_state->pipe_bpp) {
4787                 drm_dbg_kms(&i915->drm,
4788                             "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4789                             "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4790                             connector->base.id, connector->name,
4791                             bpp, 3 * info->bpc,
4792                             3 * conn_state->max_requested_bpc,
4793                             crtc_state->pipe_bpp);
4794
4795                 crtc_state->pipe_bpp = bpp;
4796         }
4797
4798         return 0;
4799 }
4800
4801 static int
4802 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4803                           struct intel_crtc *crtc)
4804 {
4805         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4806         struct intel_crtc_state *crtc_state =
4807                 intel_atomic_get_new_crtc_state(state, crtc);
4808         struct drm_connector *connector;
4809         struct drm_connector_state *connector_state;
4810         int bpp, i;
4811
4812         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4813             IS_CHERRYVIEW(dev_priv)))
4814                 bpp = 10*3;
4815         else if (DISPLAY_VER(dev_priv) >= 5)
4816                 bpp = 12*3;
4817         else
4818                 bpp = 8*3;
4819
4820         crtc_state->pipe_bpp = bpp;
4821
4822         /* Clamp display bpp to connector max bpp */
4823         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4824                 int ret;
4825
4826                 if (connector_state->crtc != &crtc->base)
4827                         continue;
4828
4829                 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4830                 if (ret)
4831                         return ret;
4832         }
4833
4834         return 0;
4835 }
4836
4837 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4838 {
4839         struct drm_device *dev = state->base.dev;
4840         struct drm_connector *connector;
4841         struct drm_connector_list_iter conn_iter;
4842         unsigned int used_ports = 0;
4843         unsigned int used_mst_ports = 0;
4844         bool ret = true;
4845
4846         /*
4847          * We're going to peek into connector->state,
4848          * hence connection_mutex must be held.
4849          */
4850         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4851
4852         /*
4853          * Walk the connector list instead of the encoder
4854          * list to detect the problem on ddi platforms
4855          * where there's just one encoder per digital port.
4856          */
4857         drm_connector_list_iter_begin(dev, &conn_iter);
4858         drm_for_each_connector_iter(connector, &conn_iter) {
4859                 struct drm_connector_state *connector_state;
4860                 struct intel_encoder *encoder;
4861
4862                 connector_state =
4863                         drm_atomic_get_new_connector_state(&state->base,
4864                                                            connector);
4865                 if (!connector_state)
4866                         connector_state = connector->state;
4867
4868                 if (!connector_state->best_encoder)
4869                         continue;
4870
4871                 encoder = to_intel_encoder(connector_state->best_encoder);
4872
4873                 drm_WARN_ON(dev, !connector_state->crtc);
4874
4875                 switch (encoder->type) {
4876                 case INTEL_OUTPUT_DDI:
4877                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4878                                 break;
4879                         fallthrough;
4880                 case INTEL_OUTPUT_DP:
4881                 case INTEL_OUTPUT_HDMI:
4882                 case INTEL_OUTPUT_EDP:
4883                         /* the same port mustn't appear more than once */
4884                         if (used_ports & BIT(encoder->port))
4885                                 ret = false;
4886
4887                         used_ports |= BIT(encoder->port);
4888                         break;
4889                 case INTEL_OUTPUT_DP_MST:
4890                         used_mst_ports |=
4891                                 1 << encoder->port;
4892                         break;
4893                 default:
4894                         break;
4895                 }
4896         }
4897         drm_connector_list_iter_end(&conn_iter);
4898
4899         /* can't mix MST and SST/HDMI on the same port */
4900         if (used_ports & used_mst_ports)
4901                 return false;
4902
4903         return ret;
4904 }
4905
4906 static void
4907 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4908                                            struct intel_crtc *crtc)
4909 {
4910         struct intel_crtc_state *crtc_state =
4911                 intel_atomic_get_new_crtc_state(state, crtc);
4912
4913         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4914
4915         drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4916                                   crtc_state->uapi.degamma_lut);
4917         drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4918                                   crtc_state->uapi.gamma_lut);
4919         drm_property_replace_blob(&crtc_state->hw.ctm,
4920                                   crtc_state->uapi.ctm);
4921 }
4922
4923 static void
4924 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4925                                          struct intel_crtc *crtc)
4926 {
4927         struct intel_crtc_state *crtc_state =
4928                 intel_atomic_get_new_crtc_state(state, crtc);
4929
4930         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4931
4932         crtc_state->hw.enable = crtc_state->uapi.enable;
4933         crtc_state->hw.active = crtc_state->uapi.active;
4934         drm_mode_copy(&crtc_state->hw.mode,
4935                       &crtc_state->uapi.mode);
4936         drm_mode_copy(&crtc_state->hw.adjusted_mode,
4937                       &crtc_state->uapi.adjusted_mode);
4938         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4939
4940         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4941 }
4942
4943 static void
4944 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4945                                     struct intel_crtc *slave_crtc)
4946 {
4947         struct intel_crtc_state *slave_crtc_state =
4948                 intel_atomic_get_new_crtc_state(state, slave_crtc);
4949         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4950         const struct intel_crtc_state *master_crtc_state =
4951                 intel_atomic_get_new_crtc_state(state, master_crtc);
4952
4953         drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
4954                                   master_crtc_state->hw.degamma_lut);
4955         drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
4956                                   master_crtc_state->hw.gamma_lut);
4957         drm_property_replace_blob(&slave_crtc_state->hw.ctm,
4958                                   master_crtc_state->hw.ctm);
4959
4960         slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
4961 }
4962
4963 static int
4964 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
4965                                   struct intel_crtc *slave_crtc)
4966 {
4967         struct intel_crtc_state *slave_crtc_state =
4968                 intel_atomic_get_new_crtc_state(state, slave_crtc);
4969         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4970         const struct intel_crtc_state *master_crtc_state =
4971                 intel_atomic_get_new_crtc_state(state, master_crtc);
4972         struct intel_crtc_state *saved_state;
4973
4974         WARN_ON(master_crtc_state->bigjoiner_pipes !=
4975                 slave_crtc_state->bigjoiner_pipes);
4976
4977         saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
4978         if (!saved_state)
4979                 return -ENOMEM;
4980
4981         /* preserve some things from the slave's original crtc state */
4982         saved_state->uapi = slave_crtc_state->uapi;
4983         saved_state->scaler_state = slave_crtc_state->scaler_state;
4984         saved_state->shared_dpll = slave_crtc_state->shared_dpll;
4985         saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
4986         saved_state->crc_enabled = slave_crtc_state->crc_enabled;
4987
4988         intel_crtc_free_hw_state(slave_crtc_state);
4989         memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
4990         kfree(saved_state);
4991
4992         /* Re-init hw state */
4993         memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
4994         slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
4995         slave_crtc_state->hw.active = master_crtc_state->hw.active;
4996         drm_mode_copy(&slave_crtc_state->hw.mode,
4997                       &master_crtc_state->hw.mode);
4998         drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
4999                       &master_crtc_state->hw.pipe_mode);
5000         drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5001                       &master_crtc_state->hw.adjusted_mode);
5002         slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5003
5004         copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5005
5006         slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5007         slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5008         slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5009
5010         WARN_ON(master_crtc_state->bigjoiner_pipes !=
5011                 slave_crtc_state->bigjoiner_pipes);
5012
5013         return 0;
5014 }
5015
5016 static int
5017 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5018                                  struct intel_crtc *crtc)
5019 {
5020         struct intel_crtc_state *crtc_state =
5021                 intel_atomic_get_new_crtc_state(state, crtc);
5022         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5023         struct intel_crtc_state *saved_state;
5024
5025         saved_state = intel_crtc_state_alloc(crtc);
5026         if (!saved_state)
5027                 return -ENOMEM;
5028
5029         /* free the old crtc_state->hw members */
5030         intel_crtc_free_hw_state(crtc_state);
5031
5032         /* FIXME: before the switch to atomic started, a new pipe_config was
5033          * kzalloc'd. Code that depends on any field being zero should be
5034          * fixed, so that the crtc_state can be safely duplicated. For now,
5035          * only fields that are know to not cause problems are preserved. */
5036
5037         saved_state->uapi = crtc_state->uapi;
5038         saved_state->inherited = crtc_state->inherited;
5039         saved_state->scaler_state = crtc_state->scaler_state;
5040         saved_state->shared_dpll = crtc_state->shared_dpll;
5041         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5042         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5043                sizeof(saved_state->icl_port_dplls));
5044         saved_state->crc_enabled = crtc_state->crc_enabled;
5045         if (IS_G4X(dev_priv) ||
5046             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5047                 saved_state->wm = crtc_state->wm;
5048
5049         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5050         kfree(saved_state);
5051
5052         intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5053
5054         return 0;
5055 }
5056
5057 static int
5058 intel_modeset_pipe_config(struct intel_atomic_state *state,
5059                           struct intel_crtc *crtc)
5060 {
5061         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5062         struct intel_crtc_state *crtc_state =
5063                 intel_atomic_get_new_crtc_state(state, crtc);
5064         struct drm_connector *connector;
5065         struct drm_connector_state *connector_state;
5066         int pipe_src_w, pipe_src_h;
5067         int base_bpp, ret, i;
5068         bool retry = true;
5069
5070         crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
5071
5072         crtc_state->framestart_delay = 1;
5073
5074         /*
5075          * Sanitize sync polarity flags based on requested ones. If neither
5076          * positive or negative polarity is requested, treat this as meaning
5077          * negative polarity.
5078          */
5079         if (!(crtc_state->hw.adjusted_mode.flags &
5080               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5081                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5082
5083         if (!(crtc_state->hw.adjusted_mode.flags &
5084               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5085                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5086
5087         ret = compute_baseline_pipe_bpp(state, crtc);
5088         if (ret)
5089                 return ret;
5090
5091         base_bpp = crtc_state->pipe_bpp;
5092
5093         /*
5094          * Determine the real pipe dimensions. Note that stereo modes can
5095          * increase the actual pipe size due to the frame doubling and
5096          * insertion of additional space for blanks between the frame. This
5097          * is stored in the crtc timings. We use the requested mode to do this
5098          * computation to clearly distinguish it from the adjusted mode, which
5099          * can be changed by the connectors in the below retry loop.
5100          */
5101         drm_mode_get_hv_timing(&crtc_state->hw.mode,
5102                                &pipe_src_w, &pipe_src_h);
5103         drm_rect_init(&crtc_state->pipe_src, 0, 0,
5104                       pipe_src_w, pipe_src_h);
5105
5106         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5107                 struct intel_encoder *encoder =
5108                         to_intel_encoder(connector_state->best_encoder);
5109
5110                 if (connector_state->crtc != &crtc->base)
5111                         continue;
5112
5113                 if (!check_single_encoder_cloning(state, crtc, encoder)) {
5114                         drm_dbg_kms(&i915->drm,
5115                                     "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
5116                                     encoder->base.base.id, encoder->base.name);
5117                         return -EINVAL;
5118                 }
5119
5120                 /*
5121                  * Determine output_types before calling the .compute_config()
5122                  * hooks so that the hooks can use this information safely.
5123                  */
5124                 if (encoder->compute_output_type)
5125                         crtc_state->output_types |=
5126                                 BIT(encoder->compute_output_type(encoder, crtc_state,
5127                                                                  connector_state));
5128                 else
5129                         crtc_state->output_types |= BIT(encoder->type);
5130         }
5131
5132 encoder_retry:
5133         /* Ensure the port clock defaults are reset when retrying. */
5134         crtc_state->port_clock = 0;
5135         crtc_state->pixel_multiplier = 1;
5136
5137         /* Fill in default crtc timings, allow encoders to overwrite them. */
5138         drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
5139                               CRTC_STEREO_DOUBLE);
5140
5141         /* Pass our mode to the connectors and the CRTC to give them a chance to
5142          * adjust it according to limitations or connector properties, and also
5143          * a chance to reject the mode entirely.
5144          */
5145         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5146                 struct intel_encoder *encoder =
5147                         to_intel_encoder(connector_state->best_encoder);
5148
5149                 if (connector_state->crtc != &crtc->base)
5150                         continue;
5151
5152                 ret = encoder->compute_config(encoder, crtc_state,
5153                                               connector_state);
5154                 if (ret == -EDEADLK)
5155                         return ret;
5156                 if (ret < 0) {
5157                         drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
5158                                     encoder->base.base.id, encoder->base.name, ret);
5159                         return ret;
5160                 }
5161         }
5162
5163         /* Set default port clock if not overwritten by the encoder. Needs to be
5164          * done afterwards in case the encoder adjusts the mode. */
5165         if (!crtc_state->port_clock)
5166                 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
5167                         * crtc_state->pixel_multiplier;
5168
5169         ret = intel_crtc_compute_config(state, crtc);
5170         if (ret == -EDEADLK)
5171                 return ret;
5172         if (ret == -EAGAIN) {
5173                 if (drm_WARN(&i915->drm, !retry,
5174                              "[CRTC:%d:%s] loop in pipe configuration computation\n",
5175                              crtc->base.base.id, crtc->base.name))
5176                         return -EINVAL;
5177
5178                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
5179                             crtc->base.base.id, crtc->base.name);
5180                 retry = false;
5181                 goto encoder_retry;
5182         }
5183         if (ret < 0) {
5184                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
5185                             crtc->base.base.id, crtc->base.name, ret);
5186                 return ret;
5187         }
5188
5189         /* Dithering seems to not pass-through bits correctly when it should, so
5190          * only enable it on 6bpc panels and when its not a compliance
5191          * test requesting 6bpc video pattern.
5192          */
5193         crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5194                 !crtc_state->dither_force_disable;
5195         drm_dbg_kms(&i915->drm,
5196                     "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5197                     crtc->base.base.id, crtc->base.name,
5198                     base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5199
5200         return 0;
5201 }
5202
5203 static int
5204 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5205                                struct intel_crtc *crtc)
5206 {
5207         struct intel_crtc_state *crtc_state =
5208                 intel_atomic_get_new_crtc_state(state, crtc);
5209         struct drm_connector_state *conn_state;
5210         struct drm_connector *connector;
5211         int i;
5212
5213         intel_bigjoiner_adjust_pipe_src(crtc_state);
5214
5215         for_each_new_connector_in_state(&state->base, connector,
5216                                         conn_state, i) {
5217                 struct intel_encoder *encoder =
5218                         to_intel_encoder(conn_state->best_encoder);
5219                 int ret;
5220
5221                 if (conn_state->crtc != &crtc->base ||
5222                     !encoder->compute_config_late)
5223                         continue;
5224
5225                 ret = encoder->compute_config_late(encoder, crtc_state,
5226                                                    conn_state);
5227                 if (ret)
5228                         return ret;
5229         }
5230
5231         return 0;
5232 }
5233
5234 bool intel_fuzzy_clock_check(int clock1, int clock2)
5235 {
5236         int diff;
5237
5238         if (clock1 == clock2)
5239                 return true;
5240
5241         if (!clock1 || !clock2)
5242                 return false;
5243
5244         diff = abs(clock1 - clock2);
5245
5246         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5247                 return true;
5248
5249         return false;
5250 }
5251
5252 static bool
5253 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5254                        const struct intel_link_m_n *m2_n2)
5255 {
5256         return m_n->tu == m2_n2->tu &&
5257                 m_n->data_m == m2_n2->data_m &&
5258                 m_n->data_n == m2_n2->data_n &&
5259                 m_n->link_m == m2_n2->link_m &&
5260                 m_n->link_n == m2_n2->link_n;
5261 }
5262
5263 static bool
5264 intel_compare_infoframe(const union hdmi_infoframe *a,
5265                         const union hdmi_infoframe *b)
5266 {
5267         return memcmp(a, b, sizeof(*a)) == 0;
5268 }
5269
5270 static bool
5271 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5272                          const struct drm_dp_vsc_sdp *b)
5273 {
5274         return memcmp(a, b, sizeof(*a)) == 0;
5275 }
5276
5277 static bool
5278 intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
5279 {
5280         return memcmp(a, b, len) == 0;
5281 }
5282
5283 static void
5284 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5285                                bool fastset, const char *name,
5286                                const union hdmi_infoframe *a,
5287                                const union hdmi_infoframe *b)
5288 {
5289         if (fastset) {
5290                 if (!drm_debug_enabled(DRM_UT_KMS))
5291                         return;
5292
5293                 drm_dbg_kms(&dev_priv->drm,
5294                             "fastset mismatch in %s infoframe\n", name);
5295                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5296                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5297                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5298                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5299         } else {
5300                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5301                 drm_err(&dev_priv->drm, "expected:\n");
5302                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5303                 drm_err(&dev_priv->drm, "found:\n");
5304                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5305         }
5306 }
5307
5308 static void
5309 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5310                                 bool fastset, const char *name,
5311                                 const struct drm_dp_vsc_sdp *a,
5312                                 const struct drm_dp_vsc_sdp *b)
5313 {
5314         if (fastset) {
5315                 if (!drm_debug_enabled(DRM_UT_KMS))
5316                         return;
5317
5318                 drm_dbg_kms(&dev_priv->drm,
5319                             "fastset mismatch in %s dp sdp\n", name);
5320                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5321                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5322                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5323                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5324         } else {
5325                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5326                 drm_err(&dev_priv->drm, "expected:\n");
5327                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5328                 drm_err(&dev_priv->drm, "found:\n");
5329                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5330         }
5331 }
5332
5333 /* Returns the length up to and including the last differing byte */
5334 static size_t
5335 memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
5336 {
5337         int i;
5338
5339         for (i = len - 1; i >= 0; i--) {
5340                 if (a[i] != b[i])
5341                         return i + 1;
5342         }
5343
5344         return 0;
5345 }
5346
5347 static void
5348 pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
5349                             bool fastset, const char *name,
5350                             const u8 *a, const u8 *b, size_t len)
5351 {
5352         if (fastset) {
5353                 if (!drm_debug_enabled(DRM_UT_KMS))
5354                         return;
5355
5356                 /* only dump up to the last difference */
5357                 len = memcmp_diff_len(a, b, len);
5358
5359                 drm_dbg_kms(&dev_priv->drm,
5360                             "fastset mismatch in %s buffer\n", name);
5361                 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
5362                                16, 0, a, len, false);
5363                 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
5364                                16, 0, b, len, false);
5365         } else {
5366                 /* only dump up to the last difference */
5367                 len = memcmp_diff_len(a, b, len);
5368
5369                 drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
5370                 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
5371                                16, 0, a, len, false);
5372                 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
5373                                16, 0, b, len, false);
5374         }
5375 }
5376
5377 static void __printf(4, 5)
5378 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5379                      const char *name, const char *format, ...)
5380 {
5381         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5382         struct va_format vaf;
5383         va_list args;
5384
5385         va_start(args, format);
5386         vaf.fmt = format;
5387         vaf.va = &args;
5388
5389         if (fastset)
5390                 drm_dbg_kms(&i915->drm,
5391                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5392                             crtc->base.base.id, crtc->base.name, name, &vaf);
5393         else
5394                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5395                         crtc->base.base.id, crtc->base.name, name, &vaf);
5396
5397         va_end(args);
5398 }
5399
5400 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5401 {
5402         if (dev_priv->params.fastboot != -1)
5403                 return dev_priv->params.fastboot;
5404
5405         /* Enable fastboot by default on Skylake and newer */
5406         if (DISPLAY_VER(dev_priv) >= 9)
5407                 return true;
5408
5409         /* Enable fastboot by default on VLV and CHV */
5410         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5411                 return true;
5412
5413         /* Disabled by default on all others */
5414         return false;
5415 }
5416
5417 bool
5418 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5419                           const struct intel_crtc_state *pipe_config,
5420                           bool fastset)
5421 {
5422         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5423         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5424         bool ret = true;
5425         bool fixup_inherited = fastset &&
5426                 current_config->inherited && !pipe_config->inherited;
5427
5428         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5429                 drm_dbg_kms(&dev_priv->drm,
5430                             "initial modeset and fastboot not set\n");
5431                 ret = false;
5432         }
5433
5434 #define PIPE_CONF_CHECK_X(name) do { \
5435         if (current_config->name != pipe_config->name) { \
5436                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5437                                      "(expected 0x%08x, found 0x%08x)", \
5438                                      current_config->name, \
5439                                      pipe_config->name); \
5440                 ret = false; \
5441         } \
5442 } while (0)
5443
5444 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5445         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5446                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5447                                      "(expected 0x%08x, found 0x%08x)", \
5448                                      current_config->name & (mask), \
5449                                      pipe_config->name & (mask)); \
5450                 ret = false; \
5451         } \
5452 } while (0)
5453
5454 #define PIPE_CONF_CHECK_I(name) do { \
5455         if (current_config->name != pipe_config->name) { \
5456                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5457                                      "(expected %i, found %i)", \
5458                                      current_config->name, \
5459                                      pipe_config->name); \
5460                 ret = false; \
5461         } \
5462 } while (0)
5463
5464 #define PIPE_CONF_CHECK_BOOL(name) do { \
5465         if (current_config->name != pipe_config->name) { \
5466                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
5467                                      "(expected %s, found %s)", \
5468                                      str_yes_no(current_config->name), \
5469                                      str_yes_no(pipe_config->name)); \
5470                 ret = false; \
5471         } \
5472 } while (0)
5473
5474 /*
5475  * Checks state where we only read out the enabling, but not the entire
5476  * state itself (like full infoframes or ELD for audio). These states
5477  * require a full modeset on bootup to fix up.
5478  */
5479 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5480         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5481                 PIPE_CONF_CHECK_BOOL(name); \
5482         } else { \
5483                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5484                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5485                                      str_yes_no(current_config->name), \
5486                                      str_yes_no(pipe_config->name)); \
5487                 ret = false; \
5488         } \
5489 } while (0)
5490
5491 #define PIPE_CONF_CHECK_P(name) do { \
5492         if (current_config->name != pipe_config->name) { \
5493                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5494                                      "(expected %p, found %p)", \
5495                                      current_config->name, \
5496                                      pipe_config->name); \
5497                 ret = false; \
5498         } \
5499 } while (0)
5500
5501 #define PIPE_CONF_CHECK_M_N(name) do { \
5502         if (!intel_compare_link_m_n(&current_config->name, \
5503                                     &pipe_config->name)) { \
5504                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5505                                      "(expected tu %i data %i/%i link %i/%i, " \
5506                                      "found tu %i, data %i/%i link %i/%i)", \
5507                                      current_config->name.tu, \
5508                                      current_config->name.data_m, \
5509                                      current_config->name.data_n, \
5510                                      current_config->name.link_m, \
5511                                      current_config->name.link_n, \
5512                                      pipe_config->name.tu, \
5513                                      pipe_config->name.data_m, \
5514                                      pipe_config->name.data_n, \
5515                                      pipe_config->name.link_m, \
5516                                      pipe_config->name.link_n); \
5517                 ret = false; \
5518         } \
5519 } while (0)
5520
5521 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5522         PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5523         PIPE_CONF_CHECK_I(name.crtc_htotal); \
5524         PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5525         PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5526         PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5527         PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5528         PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5529         PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5530         PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5531         PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5532         PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5533         PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5534 } while (0)
5535
5536 #define PIPE_CONF_CHECK_RECT(name) do { \
5537         PIPE_CONF_CHECK_I(name.x1); \
5538         PIPE_CONF_CHECK_I(name.x2); \
5539         PIPE_CONF_CHECK_I(name.y1); \
5540         PIPE_CONF_CHECK_I(name.y2); \
5541 } while (0)
5542
5543 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5544         if ((current_config->name ^ pipe_config->name) & (mask)) { \
5545                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5546                                      "(%x) (expected %i, found %i)", \
5547                                      (mask), \
5548                                      current_config->name & (mask), \
5549                                      pipe_config->name & (mask)); \
5550                 ret = false; \
5551         } \
5552 } while (0)
5553
5554 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5555         if (!intel_compare_infoframe(&current_config->infoframes.name, \
5556                                      &pipe_config->infoframes.name)) { \
5557                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5558                                                &current_config->infoframes.name, \
5559                                                &pipe_config->infoframes.name); \
5560                 ret = false; \
5561         } \
5562 } while (0)
5563
5564 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5565         if (!current_config->has_psr && !pipe_config->has_psr && \
5566             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
5567                                       &pipe_config->infoframes.name)) { \
5568                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5569                                                 &current_config->infoframes.name, \
5570                                                 &pipe_config->infoframes.name); \
5571                 ret = false; \
5572         } \
5573 } while (0)
5574
5575 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5576         BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5577         BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5578         if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5579                 pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
5580                                             current_config->name, \
5581                                             pipe_config->name, \
5582                                             (len)); \
5583                 ret = false; \
5584         } \
5585 } while (0)
5586
5587 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5588         if (current_config->gamma_mode == pipe_config->gamma_mode && \
5589             !intel_color_lut_equal(current_config, \
5590                                    current_config->lut, pipe_config->lut, \
5591                                    is_pre_csc_lut)) {   \
5592                 pipe_config_mismatch(fastset, crtc, __stringify(lut), \
5593                                      "hw_state doesn't match sw_state"); \
5594                 ret = false; \
5595         } \
5596 } while (0)
5597
5598 #define PIPE_CONF_CHECK_CSC(name) do { \
5599         PIPE_CONF_CHECK_X(name.preoff[0]); \
5600         PIPE_CONF_CHECK_X(name.preoff[1]); \
5601         PIPE_CONF_CHECK_X(name.preoff[2]); \
5602         PIPE_CONF_CHECK_X(name.coeff[0]); \
5603         PIPE_CONF_CHECK_X(name.coeff[1]); \
5604         PIPE_CONF_CHECK_X(name.coeff[2]); \
5605         PIPE_CONF_CHECK_X(name.coeff[3]); \
5606         PIPE_CONF_CHECK_X(name.coeff[4]); \
5607         PIPE_CONF_CHECK_X(name.coeff[5]); \
5608         PIPE_CONF_CHECK_X(name.coeff[6]); \
5609         PIPE_CONF_CHECK_X(name.coeff[7]); \
5610         PIPE_CONF_CHECK_X(name.coeff[8]); \
5611         PIPE_CONF_CHECK_X(name.postoff[0]); \
5612         PIPE_CONF_CHECK_X(name.postoff[1]); \
5613         PIPE_CONF_CHECK_X(name.postoff[2]); \
5614 } while (0)
5615
5616 #define PIPE_CONF_QUIRK(quirk) \
5617         ((current_config->quirks | pipe_config->quirks) & (quirk))
5618
5619         PIPE_CONF_CHECK_I(hw.enable);
5620         PIPE_CONF_CHECK_I(hw.active);
5621
5622         PIPE_CONF_CHECK_I(cpu_transcoder);
5623         PIPE_CONF_CHECK_I(mst_master_transcoder);
5624
5625         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5626         PIPE_CONF_CHECK_I(fdi_lanes);
5627         PIPE_CONF_CHECK_M_N(fdi_m_n);
5628
5629         PIPE_CONF_CHECK_I(lane_count);
5630         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5631
5632         if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5633                 if (!fastset || !pipe_config->seamless_m_n)
5634                         PIPE_CONF_CHECK_M_N(dp_m_n);
5635         } else {
5636                 PIPE_CONF_CHECK_M_N(dp_m_n);
5637                 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5638         }
5639
5640         PIPE_CONF_CHECK_X(output_types);
5641
5642         PIPE_CONF_CHECK_I(framestart_delay);
5643         PIPE_CONF_CHECK_I(msa_timing_delay);
5644
5645         PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5646         PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5647
5648         PIPE_CONF_CHECK_I(pixel_multiplier);
5649
5650         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5651                               DRM_MODE_FLAG_INTERLACE);
5652
5653         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5654                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5655                                       DRM_MODE_FLAG_PHSYNC);
5656                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5657                                       DRM_MODE_FLAG_NHSYNC);
5658                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5659                                       DRM_MODE_FLAG_PVSYNC);
5660                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5661                                       DRM_MODE_FLAG_NVSYNC);
5662         }
5663
5664         PIPE_CONF_CHECK_I(output_format);
5665         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5666         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5667             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5668                 PIPE_CONF_CHECK_BOOL(limited_color_range);
5669
5670         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5671         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5672         PIPE_CONF_CHECK_BOOL(has_infoframe);
5673         PIPE_CONF_CHECK_BOOL(fec_enable);
5674
5675         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5676         PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5677
5678         PIPE_CONF_CHECK_X(gmch_pfit.control);
5679         /* pfit ratios are autocomputed by the hw on gen4+ */
5680         if (DISPLAY_VER(dev_priv) < 4)
5681                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5682         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5683
5684         /*
5685          * Changing the EDP transcoder input mux
5686          * (A_ONOFF vs. A_ON) requires a full modeset.
5687          */
5688         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5689
5690         if (!fastset) {
5691                 PIPE_CONF_CHECK_RECT(pipe_src);
5692
5693                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5694                 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5695
5696                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5697                 PIPE_CONF_CHECK_I(pixel_rate);
5698
5699                 PIPE_CONF_CHECK_X(gamma_mode);
5700                 if (IS_CHERRYVIEW(dev_priv))
5701                         PIPE_CONF_CHECK_X(cgm_mode);
5702                 else
5703                         PIPE_CONF_CHECK_X(csc_mode);
5704                 PIPE_CONF_CHECK_BOOL(gamma_enable);
5705                 PIPE_CONF_CHECK_BOOL(csc_enable);
5706
5707                 PIPE_CONF_CHECK_I(linetime);
5708                 PIPE_CONF_CHECK_I(ips_linetime);
5709
5710                 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5711                 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5712
5713                 PIPE_CONF_CHECK_CSC(csc);
5714                 PIPE_CONF_CHECK_CSC(output_csc);
5715
5716                 if (current_config->active_planes) {
5717                         PIPE_CONF_CHECK_BOOL(has_psr);
5718                         PIPE_CONF_CHECK_BOOL(has_psr2);
5719                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5720                         PIPE_CONF_CHECK_I(dc3co_exitline);
5721                 }
5722         }
5723
5724         PIPE_CONF_CHECK_BOOL(double_wide);
5725
5726         if (dev_priv->display.dpll.mgr) {
5727                 PIPE_CONF_CHECK_P(shared_dpll);
5728
5729                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5730                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5731                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5732                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5733                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5734                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5735                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5736                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5737                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5738                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5739                 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5740                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5741                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5742                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5743                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5744                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5745                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5746                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5747                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5748                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5749                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5750                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5751                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5752                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5753                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5754                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5755                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5756                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5757                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5758                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5759                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5760                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5761         }
5762
5763         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5764         PIPE_CONF_CHECK_X(dsi_pll.div);
5765
5766         if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5767                 PIPE_CONF_CHECK_I(pipe_bpp);
5768
5769         if (!fastset || !pipe_config->seamless_m_n) {
5770                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5771                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5772         }
5773         PIPE_CONF_CHECK_I(port_clock);
5774
5775         PIPE_CONF_CHECK_I(min_voltage_level);
5776
5777         if (current_config->has_psr || pipe_config->has_psr)
5778                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5779                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5780         else
5781                 PIPE_CONF_CHECK_X(infoframes.enable);
5782
5783         PIPE_CONF_CHECK_X(infoframes.gcp);
5784         PIPE_CONF_CHECK_INFOFRAME(avi);
5785         PIPE_CONF_CHECK_INFOFRAME(spd);
5786         PIPE_CONF_CHECK_INFOFRAME(hdmi);
5787         PIPE_CONF_CHECK_INFOFRAME(drm);
5788         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5789
5790         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5791         PIPE_CONF_CHECK_I(master_transcoder);
5792         PIPE_CONF_CHECK_X(bigjoiner_pipes);
5793
5794         PIPE_CONF_CHECK_I(dsc.compression_enable);
5795         PIPE_CONF_CHECK_I(dsc.dsc_split);
5796         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5797
5798         PIPE_CONF_CHECK_BOOL(splitter.enable);
5799         PIPE_CONF_CHECK_I(splitter.link_count);
5800         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5801
5802         if (!fastset)
5803                 PIPE_CONF_CHECK_BOOL(vrr.enable);
5804         PIPE_CONF_CHECK_I(vrr.vmin);
5805         PIPE_CONF_CHECK_I(vrr.vmax);
5806         PIPE_CONF_CHECK_I(vrr.flipline);
5807         PIPE_CONF_CHECK_I(vrr.pipeline_full);
5808         PIPE_CONF_CHECK_I(vrr.guardband);
5809
5810 #undef PIPE_CONF_CHECK_X
5811 #undef PIPE_CONF_CHECK_I
5812 #undef PIPE_CONF_CHECK_BOOL
5813 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5814 #undef PIPE_CONF_CHECK_P
5815 #undef PIPE_CONF_CHECK_FLAGS
5816 #undef PIPE_CONF_CHECK_COLOR_LUT
5817 #undef PIPE_CONF_CHECK_TIMINGS
5818 #undef PIPE_CONF_CHECK_RECT
5819 #undef PIPE_CONF_QUIRK
5820
5821         return ret;
5822 }
5823
5824 static void
5825 intel_verify_planes(struct intel_atomic_state *state)
5826 {
5827         struct intel_plane *plane;
5828         const struct intel_plane_state *plane_state;
5829         int i;
5830
5831         for_each_new_intel_plane_in_state(state, plane,
5832                                           plane_state, i)
5833                 assert_plane(plane, plane_state->planar_slave ||
5834                              plane_state->uapi.visible);
5835 }
5836
5837 int intel_modeset_all_pipes(struct intel_atomic_state *state,
5838                             const char *reason)
5839 {
5840         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5841         struct intel_crtc *crtc;
5842
5843         /*
5844          * Add all pipes to the state, and force
5845          * a modeset on all the active ones.
5846          */
5847         for_each_intel_crtc(&dev_priv->drm, crtc) {
5848                 struct intel_crtc_state *crtc_state;
5849                 int ret;
5850
5851                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5852                 if (IS_ERR(crtc_state))
5853                         return PTR_ERR(crtc_state);
5854
5855                 if (!crtc_state->hw.active ||
5856                     intel_crtc_needs_modeset(crtc_state))
5857                         continue;
5858
5859                 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5860                             crtc->base.base.id, crtc->base.name, reason);
5861
5862                 crtc_state->uapi.mode_changed = true;
5863                 crtc_state->update_pipe = false;
5864
5865                 ret = drm_atomic_add_affected_connectors(&state->base,
5866                                                          &crtc->base);
5867                 if (ret)
5868                         return ret;
5869
5870                 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5871                 if (ret)
5872                         return ret;
5873
5874                 ret = intel_atomic_add_affected_planes(state, crtc);
5875                 if (ret)
5876                         return ret;
5877
5878                 crtc_state->update_planes |= crtc_state->active_planes;
5879                 crtc_state->async_flip_planes = 0;
5880                 crtc_state->do_async_flip = false;
5881         }
5882
5883         return 0;
5884 }
5885
5886 /*
5887  * This implements the workaround described in the "notes" section of the mode
5888  * set sequence documentation. When going from no pipes or single pipe to
5889  * multiple pipes, and planes are enabled after the pipe, we need to wait at
5890  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5891  */
5892 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5893 {
5894         struct intel_crtc_state *crtc_state;
5895         struct intel_crtc *crtc;
5896         struct intel_crtc_state *first_crtc_state = NULL;
5897         struct intel_crtc_state *other_crtc_state = NULL;
5898         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5899         int i;
5900
5901         /* look at all crtc's that are going to be enabled in during modeset */
5902         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5903                 if (!crtc_state->hw.active ||
5904                     !intel_crtc_needs_modeset(crtc_state))
5905                         continue;
5906
5907                 if (first_crtc_state) {
5908                         other_crtc_state = crtc_state;
5909                         break;
5910                 } else {
5911                         first_crtc_state = crtc_state;
5912                         first_pipe = crtc->pipe;
5913                 }
5914         }
5915
5916         /* No workaround needed? */
5917         if (!first_crtc_state)
5918                 return 0;
5919
5920         /* w/a possibly needed, check how many crtc's are already enabled. */
5921         for_each_intel_crtc(state->base.dev, crtc) {
5922                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5923                 if (IS_ERR(crtc_state))
5924                         return PTR_ERR(crtc_state);
5925
5926                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
5927
5928                 if (!crtc_state->hw.active ||
5929                     intel_crtc_needs_modeset(crtc_state))
5930                         continue;
5931
5932                 /* 2 or more enabled crtcs means no need for w/a */
5933                 if (enabled_pipe != INVALID_PIPE)
5934                         return 0;
5935
5936                 enabled_pipe = crtc->pipe;
5937         }
5938
5939         if (enabled_pipe != INVALID_PIPE)
5940                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
5941         else if (other_crtc_state)
5942                 other_crtc_state->hsw_workaround_pipe = first_pipe;
5943
5944         return 0;
5945 }
5946
5947 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
5948                            u8 active_pipes)
5949 {
5950         const struct intel_crtc_state *crtc_state;
5951         struct intel_crtc *crtc;
5952         int i;
5953
5954         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5955                 if (crtc_state->hw.active)
5956                         active_pipes |= BIT(crtc->pipe);
5957                 else
5958                         active_pipes &= ~BIT(crtc->pipe);
5959         }
5960
5961         return active_pipes;
5962 }
5963
5964 static int intel_modeset_checks(struct intel_atomic_state *state)
5965 {
5966         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5967
5968         state->modeset = true;
5969
5970         if (IS_HASWELL(dev_priv))
5971                 return hsw_mode_set_planes_workaround(state);
5972
5973         return 0;
5974 }
5975
5976 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
5977                                      struct intel_crtc_state *new_crtc_state)
5978 {
5979         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
5980                 return;
5981
5982         new_crtc_state->uapi.mode_changed = false;
5983         if (!intel_crtc_needs_modeset(new_crtc_state))
5984                 new_crtc_state->update_pipe = true;
5985 }
5986
5987 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
5988                                           struct intel_crtc *crtc,
5989                                           u8 plane_ids_mask)
5990 {
5991         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5992         struct intel_plane *plane;
5993
5994         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5995                 struct intel_plane_state *plane_state;
5996
5997                 if ((plane_ids_mask & BIT(plane->id)) == 0)
5998                         continue;
5999
6000                 plane_state = intel_atomic_get_plane_state(state, plane);
6001                 if (IS_ERR(plane_state))
6002                         return PTR_ERR(plane_state);
6003         }
6004
6005         return 0;
6006 }
6007
6008 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
6009                                      struct intel_crtc *crtc)
6010 {
6011         const struct intel_crtc_state *old_crtc_state =
6012                 intel_atomic_get_old_crtc_state(state, crtc);
6013         const struct intel_crtc_state *new_crtc_state =
6014                 intel_atomic_get_new_crtc_state(state, crtc);
6015
6016         return intel_crtc_add_planes_to_state(state, crtc,
6017                                               old_crtc_state->enabled_planes |
6018                                               new_crtc_state->enabled_planes);
6019 }
6020
6021 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
6022 {
6023         /* See {hsw,vlv,ivb}_plane_ratio() */
6024         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
6025                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6026                 IS_IVYBRIDGE(dev_priv);
6027 }
6028
6029 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
6030                                            struct intel_crtc *crtc,
6031                                            struct intel_crtc *other)
6032 {
6033         const struct intel_plane_state *plane_state;
6034         struct intel_plane *plane;
6035         u8 plane_ids = 0;
6036         int i;
6037
6038         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6039                 if (plane->pipe == crtc->pipe)
6040                         plane_ids |= BIT(plane->id);
6041         }
6042
6043         return intel_crtc_add_planes_to_state(state, other, plane_ids);
6044 }
6045
6046 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
6047 {
6048         struct drm_i915_private *i915 = to_i915(state->base.dev);
6049         const struct intel_crtc_state *crtc_state;
6050         struct intel_crtc *crtc;
6051         int i;
6052
6053         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6054                 struct intel_crtc *other;
6055
6056                 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
6057                                                  crtc_state->bigjoiner_pipes) {
6058                         int ret;
6059
6060                         if (crtc == other)
6061                                 continue;
6062
6063                         ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
6064                         if (ret)
6065                                 return ret;
6066                 }
6067         }
6068
6069         return 0;
6070 }
6071
6072 static int intel_atomic_check_planes(struct intel_atomic_state *state)
6073 {
6074         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6075         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6076         struct intel_plane_state *plane_state;
6077         struct intel_plane *plane;
6078         struct intel_crtc *crtc;
6079         int i, ret;
6080
6081         ret = icl_add_linked_planes(state);
6082         if (ret)
6083                 return ret;
6084
6085         ret = intel_bigjoiner_add_affected_planes(state);
6086         if (ret)
6087                 return ret;
6088
6089         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6090                 ret = intel_plane_atomic_check(state, plane);
6091                 if (ret) {
6092                         drm_dbg_atomic(&dev_priv->drm,
6093                                        "[PLANE:%d:%s] atomic driver check failed\n",
6094                                        plane->base.base.id, plane->base.name);
6095                         return ret;
6096                 }
6097         }
6098
6099         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6100                                             new_crtc_state, i) {
6101                 u8 old_active_planes, new_active_planes;
6102
6103                 ret = icl_check_nv12_planes(new_crtc_state);
6104                 if (ret)
6105                         return ret;
6106
6107                 /*
6108                  * On some platforms the number of active planes affects
6109                  * the planes' minimum cdclk calculation. Add such planes
6110                  * to the state before we compute the minimum cdclk.
6111                  */
6112                 if (!active_planes_affects_min_cdclk(dev_priv))
6113                         continue;
6114
6115                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6116                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6117
6118                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
6119                         continue;
6120
6121                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
6122                 if (ret)
6123                         return ret;
6124         }
6125
6126         return 0;
6127 }
6128
6129 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
6130 {
6131         struct intel_crtc_state *crtc_state;
6132         struct intel_crtc *crtc;
6133         int i;
6134
6135         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6136                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6137                 int ret;
6138
6139                 ret = intel_crtc_atomic_check(state, crtc);
6140                 if (ret) {
6141                         drm_dbg_atomic(&i915->drm,
6142                                        "[CRTC:%d:%s] atomic driver check failed\n",
6143                                        crtc->base.base.id, crtc->base.name);
6144                         return ret;
6145                 }
6146         }
6147
6148         return 0;
6149 }
6150
6151 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
6152                                                u8 transcoders)
6153 {
6154         const struct intel_crtc_state *new_crtc_state;
6155         struct intel_crtc *crtc;
6156         int i;
6157
6158         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6159                 if (new_crtc_state->hw.enable &&
6160                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
6161                     intel_crtc_needs_modeset(new_crtc_state))
6162                         return true;
6163         }
6164
6165         return false;
6166 }
6167
6168 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
6169                                      u8 pipes)
6170 {
6171         const struct intel_crtc_state *new_crtc_state;
6172         struct intel_crtc *crtc;
6173         int i;
6174
6175         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6176                 if (new_crtc_state->hw.enable &&
6177                     pipes & BIT(crtc->pipe) &&
6178                     intel_crtc_needs_modeset(new_crtc_state))
6179                         return true;
6180         }
6181
6182         return false;
6183 }
6184
6185 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
6186                                         struct intel_crtc *master_crtc)
6187 {
6188         struct drm_i915_private *i915 = to_i915(state->base.dev);
6189         struct intel_crtc_state *master_crtc_state =
6190                 intel_atomic_get_new_crtc_state(state, master_crtc);
6191         struct intel_crtc *slave_crtc;
6192
6193         if (!master_crtc_state->bigjoiner_pipes)
6194                 return 0;
6195
6196         /* sanity check */
6197         if (drm_WARN_ON(&i915->drm,
6198                         master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6199                 return -EINVAL;
6200
6201         if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6202                 drm_dbg_kms(&i915->drm,
6203                             "[CRTC:%d:%s] Cannot act as big joiner master "
6204                             "(need 0x%x as pipes, only 0x%x possible)\n",
6205                             master_crtc->base.base.id, master_crtc->base.name,
6206                             master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6207                 return -EINVAL;
6208         }
6209
6210         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6211                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6212                 struct intel_crtc_state *slave_crtc_state;
6213                 int ret;
6214
6215                 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6216                 if (IS_ERR(slave_crtc_state))
6217                         return PTR_ERR(slave_crtc_state);
6218
6219                 /* master being enabled, slave was already configured? */
6220                 if (slave_crtc_state->uapi.enable) {
6221                         drm_dbg_kms(&i915->drm,
6222                                     "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6223                                     "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6224                                     slave_crtc->base.base.id, slave_crtc->base.name,
6225                                     master_crtc->base.base.id, master_crtc->base.name);
6226                         return -EINVAL;
6227                 }
6228
6229                 /*
6230                  * The state copy logic assumes the master crtc gets processed
6231                  * before the slave crtc during the main compute_config loop.
6232                  * This works because the crtcs are created in pipe order,
6233                  * and the hardware requires master pipe < slave pipe as well.
6234                  * Should that change we need to rethink the logic.
6235                  */
6236                 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6237                             drm_crtc_index(&slave_crtc->base)))
6238                         return -EINVAL;
6239
6240                 drm_dbg_kms(&i915->drm,
6241                             "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6242                             slave_crtc->base.base.id, slave_crtc->base.name,
6243                             master_crtc->base.base.id, master_crtc->base.name);
6244
6245                 slave_crtc_state->bigjoiner_pipes =
6246                         master_crtc_state->bigjoiner_pipes;
6247
6248                 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6249                 if (ret)
6250                         return ret;
6251         }
6252
6253         return 0;
6254 }
6255
6256 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6257                                  struct intel_crtc *master_crtc)
6258 {
6259         struct drm_i915_private *i915 = to_i915(state->base.dev);
6260         struct intel_crtc_state *master_crtc_state =
6261                 intel_atomic_get_new_crtc_state(state, master_crtc);
6262         struct intel_crtc *slave_crtc;
6263
6264         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6265                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6266                 struct intel_crtc_state *slave_crtc_state =
6267                         intel_atomic_get_new_crtc_state(state, slave_crtc);
6268
6269                 slave_crtc_state->bigjoiner_pipes = 0;
6270
6271                 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6272         }
6273
6274         master_crtc_state->bigjoiner_pipes = 0;
6275 }
6276
6277 /**
6278  * DOC: asynchronous flip implementation
6279  *
6280  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6281  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6282  * Correspondingly, support is currently added for primary plane only.
6283  *
6284  * Async flip can only change the plane surface address, so anything else
6285  * changing is rejected from the intel_async_flip_check_hw() function.
6286  * Once this check is cleared, flip done interrupt is enabled using
6287  * the intel_crtc_enable_flip_done() function.
6288  *
6289  * As soon as the surface address register is written, flip done interrupt is
6290  * generated and the requested events are sent to the usersapce in the interrupt
6291  * handler itself. The timestamp and sequence sent during the flip done event
6292  * correspond to the last vblank and have no relation to the actual time when
6293  * the flip done event was sent.
6294  */
6295 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6296                                        struct intel_crtc *crtc)
6297 {
6298         struct drm_i915_private *i915 = to_i915(state->base.dev);
6299         const struct intel_crtc_state *new_crtc_state =
6300                 intel_atomic_get_new_crtc_state(state, crtc);
6301         const struct intel_plane_state *old_plane_state;
6302         struct intel_plane_state *new_plane_state;
6303         struct intel_plane *plane;
6304         int i;
6305
6306         if (!new_crtc_state->uapi.async_flip)
6307                 return 0;
6308
6309         if (!new_crtc_state->uapi.active) {
6310                 drm_dbg_kms(&i915->drm,
6311                             "[CRTC:%d:%s] not active\n",
6312                             crtc->base.base.id, crtc->base.name);
6313                 return -EINVAL;
6314         }
6315
6316         if (intel_crtc_needs_modeset(new_crtc_state)) {
6317                 drm_dbg_kms(&i915->drm,
6318                             "[CRTC:%d:%s] modeset required\n",
6319                             crtc->base.base.id, crtc->base.name);
6320                 return -EINVAL;
6321         }
6322
6323         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6324                                              new_plane_state, i) {
6325                 if (plane->pipe != crtc->pipe)
6326                         continue;
6327
6328                 /*
6329                  * TODO: Async flip is only supported through the page flip IOCTL
6330                  * as of now. So support currently added for primary plane only.
6331                  * Support for other planes on platforms on which supports
6332                  * this(vlv/chv and icl+) should be added when async flip is
6333                  * enabled in the atomic IOCTL path.
6334                  */
6335                 if (!plane->async_flip) {
6336                         drm_dbg_kms(&i915->drm,
6337                                     "[PLANE:%d:%s] async flip not supported\n",
6338                                     plane->base.base.id, plane->base.name);
6339                         return -EINVAL;
6340                 }
6341
6342                 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6343                         drm_dbg_kms(&i915->drm,
6344                                     "[PLANE:%d:%s] no old or new framebuffer\n",
6345                                     plane->base.base.id, plane->base.name);
6346                         return -EINVAL;
6347                 }
6348         }
6349
6350         return 0;
6351 }
6352
6353 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6354 {
6355         struct drm_i915_private *i915 = to_i915(state->base.dev);
6356         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6357         const struct intel_plane_state *new_plane_state, *old_plane_state;
6358         struct intel_plane *plane;
6359         int i;
6360
6361         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6362         new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6363
6364         if (!new_crtc_state->uapi.async_flip)
6365                 return 0;
6366
6367         if (!new_crtc_state->hw.active) {
6368                 drm_dbg_kms(&i915->drm,
6369                             "[CRTC:%d:%s] not active\n",
6370                             crtc->base.base.id, crtc->base.name);
6371                 return -EINVAL;
6372         }
6373
6374         if (intel_crtc_needs_modeset(new_crtc_state)) {
6375                 drm_dbg_kms(&i915->drm,
6376                             "[CRTC:%d:%s] modeset required\n",
6377                             crtc->base.base.id, crtc->base.name);
6378                 return -EINVAL;
6379         }
6380
6381         if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6382                 drm_dbg_kms(&i915->drm,
6383                             "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6384                             crtc->base.base.id, crtc->base.name);
6385                 return -EINVAL;
6386         }
6387
6388         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6389                                              new_plane_state, i) {
6390                 if (plane->pipe != crtc->pipe)
6391                         continue;
6392
6393                 /*
6394                  * Only async flip capable planes should be in the state
6395                  * if we're really about to ask the hardware to perform
6396                  * an async flip. We should never get this far otherwise.
6397                  */
6398                 if (drm_WARN_ON(&i915->drm,
6399                                 new_crtc_state->do_async_flip && !plane->async_flip))
6400                         return -EINVAL;
6401
6402                 /*
6403                  * Only check async flip capable planes other planes
6404                  * may be involved in the initial commit due to
6405                  * the wm0/ddb optimization.
6406                  *
6407                  * TODO maybe should track which planes actually
6408                  * were requested to do the async flip...
6409                  */
6410                 if (!plane->async_flip)
6411                         continue;
6412
6413                 /*
6414                  * FIXME: This check is kept generic for all platforms.
6415                  * Need to verify this for all gen9 platforms to enable
6416                  * this selectively if required.
6417                  */
6418                 switch (new_plane_state->hw.fb->modifier) {
6419                 case I915_FORMAT_MOD_X_TILED:
6420                 case I915_FORMAT_MOD_Y_TILED:
6421                 case I915_FORMAT_MOD_Yf_TILED:
6422                 case I915_FORMAT_MOD_4_TILED:
6423                         break;
6424                 default:
6425                         drm_dbg_kms(&i915->drm,
6426                                     "[PLANE:%d:%s] Modifier does not support async flips\n",
6427                                     plane->base.base.id, plane->base.name);
6428                         return -EINVAL;
6429                 }
6430
6431                 if (new_plane_state->hw.fb->format->num_planes > 1) {
6432                         drm_dbg_kms(&i915->drm,
6433                                     "[PLANE:%d:%s] Planar formats do not support async flips\n",
6434                                     plane->base.base.id, plane->base.name);
6435                         return -EINVAL;
6436                 }
6437
6438                 if (old_plane_state->view.color_plane[0].mapping_stride !=
6439                     new_plane_state->view.color_plane[0].mapping_stride) {
6440                         drm_dbg_kms(&i915->drm,
6441                                     "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6442                                     plane->base.base.id, plane->base.name);
6443                         return -EINVAL;
6444                 }
6445
6446                 if (old_plane_state->hw.fb->modifier !=
6447                     new_plane_state->hw.fb->modifier) {
6448                         drm_dbg_kms(&i915->drm,
6449                                     "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6450                                     plane->base.base.id, plane->base.name);
6451                         return -EINVAL;
6452                 }
6453
6454                 if (old_plane_state->hw.fb->format !=
6455                     new_plane_state->hw.fb->format) {
6456                         drm_dbg_kms(&i915->drm,
6457                                     "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6458                                     plane->base.base.id, plane->base.name);
6459                         return -EINVAL;
6460                 }
6461
6462                 if (old_plane_state->hw.rotation !=
6463                     new_plane_state->hw.rotation) {
6464                         drm_dbg_kms(&i915->drm,
6465                                     "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6466                                     plane->base.base.id, plane->base.name);
6467                         return -EINVAL;
6468                 }
6469
6470                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6471                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6472                         drm_dbg_kms(&i915->drm,
6473                                     "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6474                                     plane->base.base.id, plane->base.name);
6475                         return -EINVAL;
6476                 }
6477
6478                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6479                         drm_dbg_kms(&i915->drm,
6480                                     "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6481                                     plane->base.base.id, plane->base.name);
6482                         return -EINVAL;
6483                 }
6484
6485                 if (old_plane_state->hw.pixel_blend_mode !=
6486                     new_plane_state->hw.pixel_blend_mode) {
6487                         drm_dbg_kms(&i915->drm,
6488                                     "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6489                                     plane->base.base.id, plane->base.name);
6490                         return -EINVAL;
6491                 }
6492
6493                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6494                         drm_dbg_kms(&i915->drm,
6495                                     "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6496                                     plane->base.base.id, plane->base.name);
6497                         return -EINVAL;
6498                 }
6499
6500                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6501                         drm_dbg_kms(&i915->drm,
6502                                     "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6503                                     plane->base.base.id, plane->base.name);
6504                         return -EINVAL;
6505                 }
6506
6507                 /* plane decryption is allow to change only in synchronous flips */
6508                 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6509                         drm_dbg_kms(&i915->drm,
6510                                     "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6511                                     plane->base.base.id, plane->base.name);
6512                         return -EINVAL;
6513                 }
6514         }
6515
6516         return 0;
6517 }
6518
6519 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6520 {
6521         struct drm_i915_private *i915 = to_i915(state->base.dev);
6522         struct intel_crtc_state *crtc_state;
6523         struct intel_crtc *crtc;
6524         u8 affected_pipes = 0;
6525         u8 modeset_pipes = 0;
6526         int i;
6527
6528         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6529                 affected_pipes |= crtc_state->bigjoiner_pipes;
6530                 if (intel_crtc_needs_modeset(crtc_state))
6531                         modeset_pipes |= crtc_state->bigjoiner_pipes;
6532         }
6533
6534         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6535                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6536                 if (IS_ERR(crtc_state))
6537                         return PTR_ERR(crtc_state);
6538         }
6539
6540         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6541                 int ret;
6542
6543                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6544
6545                 crtc_state->uapi.mode_changed = true;
6546
6547                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6548                 if (ret)
6549                         return ret;
6550
6551                 ret = intel_atomic_add_affected_planes(state, crtc);
6552                 if (ret)
6553                         return ret;
6554         }
6555
6556         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6557                 /* Kill old bigjoiner link, we may re-establish afterwards */
6558                 if (intel_crtc_needs_modeset(crtc_state) &&
6559                     intel_crtc_is_bigjoiner_master(crtc_state))
6560                         kill_bigjoiner_slave(state, crtc);
6561         }
6562
6563         return 0;
6564 }
6565
6566 /**
6567  * intel_atomic_check - validate state object
6568  * @dev: drm device
6569  * @_state: state to validate
6570  */
6571 int intel_atomic_check(struct drm_device *dev,
6572                        struct drm_atomic_state *_state)
6573 {
6574         struct drm_i915_private *dev_priv = to_i915(dev);
6575         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6576         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6577         struct intel_crtc *crtc;
6578         int ret, i;
6579         bool any_ms = false;
6580
6581         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6582                                             new_crtc_state, i) {
6583                 /*
6584                  * crtc's state no longer considered to be inherited
6585                  * after the first userspace/client initiated commit.
6586                  */
6587                 if (!state->internal)
6588                         new_crtc_state->inherited = false;
6589
6590                 if (new_crtc_state->inherited != old_crtc_state->inherited)
6591                         new_crtc_state->uapi.mode_changed = true;
6592
6593                 if (new_crtc_state->uapi.scaling_filter !=
6594                     old_crtc_state->uapi.scaling_filter)
6595                         new_crtc_state->uapi.mode_changed = true;
6596         }
6597
6598         intel_vrr_check_modeset(state);
6599
6600         ret = drm_atomic_helper_check_modeset(dev, &state->base);
6601         if (ret)
6602                 goto fail;
6603
6604         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6605                 ret = intel_async_flip_check_uapi(state, crtc);
6606                 if (ret)
6607                         return ret;
6608         }
6609
6610         ret = intel_bigjoiner_add_affected_crtcs(state);
6611         if (ret)
6612                 goto fail;
6613
6614         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6615                                             new_crtc_state, i) {
6616                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6617                         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6618                                 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6619                         else
6620                                 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6621                         continue;
6622                 }
6623
6624                 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6625                         drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6626                         continue;
6627                 }
6628
6629                 ret = intel_crtc_prepare_cleared_state(state, crtc);
6630                 if (ret)
6631                         goto fail;
6632
6633                 if (!new_crtc_state->hw.enable)
6634                         continue;
6635
6636                 ret = intel_modeset_pipe_config(state, crtc);
6637                 if (ret)
6638                         goto fail;
6639
6640                 ret = intel_atomic_check_bigjoiner(state, crtc);
6641                 if (ret)
6642                         goto fail;
6643         }
6644
6645         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6646                                             new_crtc_state, i) {
6647                 if (!intel_crtc_needs_modeset(new_crtc_state))
6648                         continue;
6649
6650                 if (new_crtc_state->hw.enable) {
6651                         ret = intel_modeset_pipe_config_late(state, crtc);
6652                         if (ret)
6653                                 goto fail;
6654                 }
6655
6656                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6657         }
6658
6659         /**
6660          * Check if fastset is allowed by external dependencies like other
6661          * pipes and transcoders.
6662          *
6663          * Right now it only forces a fullmodeset when the MST master
6664          * transcoder did not changed but the pipe of the master transcoder
6665          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6666          * in case of port synced crtcs, if one of the synced crtcs
6667          * needs a full modeset, all other synced crtcs should be
6668          * forced a full modeset.
6669          */
6670         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6671                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6672                         continue;
6673
6674                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6675                         enum transcoder master = new_crtc_state->mst_master_transcoder;
6676
6677                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6678                                 new_crtc_state->uapi.mode_changed = true;
6679                                 new_crtc_state->update_pipe = false;
6680                         }
6681                 }
6682
6683                 if (is_trans_port_sync_mode(new_crtc_state)) {
6684                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
6685
6686                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6687                                 trans |= BIT(new_crtc_state->master_transcoder);
6688
6689                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
6690                                 new_crtc_state->uapi.mode_changed = true;
6691                                 new_crtc_state->update_pipe = false;
6692                         }
6693                 }
6694
6695                 if (new_crtc_state->bigjoiner_pipes) {
6696                         if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6697                                 new_crtc_state->uapi.mode_changed = true;
6698                                 new_crtc_state->update_pipe = false;
6699                         }
6700                 }
6701         }
6702
6703         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6704                                             new_crtc_state, i) {
6705                 if (!intel_crtc_needs_modeset(new_crtc_state))
6706                         continue;
6707
6708                 any_ms = true;
6709
6710                 intel_release_shared_dplls(state, crtc);
6711         }
6712
6713         if (any_ms && !check_digital_port_conflicts(state)) {
6714                 drm_dbg_kms(&dev_priv->drm,
6715                             "rejecting conflicting digital port configuration\n");
6716                 ret = -EINVAL;
6717                 goto fail;
6718         }
6719
6720         ret = drm_dp_mst_atomic_check(&state->base);
6721         if (ret)
6722                 goto fail;
6723
6724         ret = intel_atomic_check_planes(state);
6725         if (ret)
6726                 goto fail;
6727
6728         ret = intel_compute_global_watermarks(state);
6729         if (ret)
6730                 goto fail;
6731
6732         ret = intel_bw_atomic_check(state);
6733         if (ret)
6734                 goto fail;
6735
6736         ret = intel_cdclk_atomic_check(state, &any_ms);
6737         if (ret)
6738                 goto fail;
6739
6740         if (intel_any_crtc_needs_modeset(state))
6741                 any_ms = true;
6742
6743         if (any_ms) {
6744                 ret = intel_modeset_checks(state);
6745                 if (ret)
6746                         goto fail;
6747
6748                 ret = intel_modeset_calc_cdclk(state);
6749                 if (ret)
6750                         return ret;
6751         }
6752
6753         ret = intel_atomic_check_crtcs(state);
6754         if (ret)
6755                 goto fail;
6756
6757         ret = intel_fbc_atomic_check(state);
6758         if (ret)
6759                 goto fail;
6760
6761         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6762                                             new_crtc_state, i) {
6763                 intel_color_assert_luts(new_crtc_state);
6764
6765                 ret = intel_async_flip_check_hw(state, crtc);
6766                 if (ret)
6767                         goto fail;
6768
6769                 /* Either full modeset or fastset (or neither), never both */
6770                 drm_WARN_ON(&dev_priv->drm,
6771                             intel_crtc_needs_modeset(new_crtc_state) &&
6772                             intel_crtc_needs_fastset(new_crtc_state));
6773
6774                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6775                     !intel_crtc_needs_fastset(new_crtc_state))
6776                         continue;
6777
6778                 intel_crtc_state_dump(new_crtc_state, state,
6779                                       intel_crtc_needs_modeset(new_crtc_state) ?
6780                                       "modeset" : "fastset");
6781         }
6782
6783         return 0;
6784
6785  fail:
6786         if (ret == -EDEADLK)
6787                 return ret;
6788
6789         /*
6790          * FIXME would probably be nice to know which crtc specifically
6791          * caused the failure, in cases where we can pinpoint it.
6792          */
6793         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6794                                             new_crtc_state, i)
6795                 intel_crtc_state_dump(new_crtc_state, state, "failed");
6796
6797         return ret;
6798 }
6799
6800 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6801 {
6802         struct intel_crtc_state *crtc_state;
6803         struct intel_crtc *crtc;
6804         int i, ret;
6805
6806         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6807         if (ret < 0)
6808                 return ret;
6809
6810         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6811                 if (intel_crtc_needs_color_update(crtc_state))
6812                         intel_color_prepare_commit(crtc_state);
6813         }
6814
6815         return 0;
6816 }
6817
6818 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6819                                   struct intel_crtc_state *crtc_state)
6820 {
6821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6822
6823         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6824                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6825
6826         if (crtc_state->has_pch_encoder) {
6827                 enum pipe pch_transcoder =
6828                         intel_crtc_pch_transcoder(crtc);
6829
6830                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6831         }
6832 }
6833
6834 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6835                                const struct intel_crtc_state *new_crtc_state)
6836 {
6837         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6839
6840         /*
6841          * Update pipe size and adjust fitter if needed: the reason for this is
6842          * that in compute_mode_changes we check the native mode (not the pfit
6843          * mode) to see if we can flip rather than do a full mode set. In the
6844          * fastboot case, we'll flip, but if we don't update the pipesrc and
6845          * pfit state, we'll end up with a big fb scanned out into the wrong
6846          * sized surface.
6847          */
6848         intel_set_pipe_src_size(new_crtc_state);
6849
6850         /* on skylake this is done by detaching scalers */
6851         if (DISPLAY_VER(dev_priv) >= 9) {
6852                 if (new_crtc_state->pch_pfit.enabled)
6853                         skl_pfit_enable(new_crtc_state);
6854         } else if (HAS_PCH_SPLIT(dev_priv)) {
6855                 if (new_crtc_state->pch_pfit.enabled)
6856                         ilk_pfit_enable(new_crtc_state);
6857                 else if (old_crtc_state->pch_pfit.enabled)
6858                         ilk_pfit_disable(old_crtc_state);
6859         }
6860
6861         /*
6862          * The register is supposedly single buffered so perhaps
6863          * not 100% correct to do this here. But SKL+ calculate
6864          * this based on the adjust pixel rate so pfit changes do
6865          * affect it and so it must be updated for fastsets.
6866          * HSW/BDW only really need this here for fastboot, after
6867          * that the value should not change without a full modeset.
6868          */
6869         if (DISPLAY_VER(dev_priv) >= 9 ||
6870             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6871                 hsw_set_linetime_wm(new_crtc_state);
6872
6873         if (new_crtc_state->seamless_m_n)
6874                 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6875                                                &new_crtc_state->dp_m_n);
6876 }
6877
6878 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6879                                    struct intel_crtc *crtc)
6880 {
6881         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6882         const struct intel_crtc_state *old_crtc_state =
6883                 intel_atomic_get_old_crtc_state(state, crtc);
6884         const struct intel_crtc_state *new_crtc_state =
6885                 intel_atomic_get_new_crtc_state(state, crtc);
6886         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6887
6888         /*
6889          * During modesets pipe configuration was programmed as the
6890          * CRTC was enabled.
6891          */
6892         if (!modeset) {
6893                 if (intel_crtc_needs_color_update(new_crtc_state))
6894                         intel_color_commit_arm(new_crtc_state);
6895
6896                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6897                         bdw_set_pipe_misc(new_crtc_state);
6898
6899                 if (intel_crtc_needs_fastset(new_crtc_state))
6900                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
6901         }
6902
6903         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6904
6905         intel_atomic_update_watermarks(state, crtc);
6906 }
6907
6908 static void commit_pipe_post_planes(struct intel_atomic_state *state,
6909                                     struct intel_crtc *crtc)
6910 {
6911         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6912         const struct intel_crtc_state *new_crtc_state =
6913                 intel_atomic_get_new_crtc_state(state, crtc);
6914
6915         /*
6916          * Disable the scaler(s) after the plane(s) so that we don't
6917          * get a catastrophic underrun even if the two operations
6918          * end up happening in two different frames.
6919          */
6920         if (DISPLAY_VER(dev_priv) >= 9 &&
6921             !intel_crtc_needs_modeset(new_crtc_state))
6922                 skl_detach_scalers(new_crtc_state);
6923 }
6924
6925 static void intel_enable_crtc(struct intel_atomic_state *state,
6926                               struct intel_crtc *crtc)
6927 {
6928         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6929         const struct intel_crtc_state *new_crtc_state =
6930                 intel_atomic_get_new_crtc_state(state, crtc);
6931
6932         if (!intel_crtc_needs_modeset(new_crtc_state))
6933                 return;
6934
6935         /* VRR will be enable later, if required */
6936         intel_crtc_update_active_timings(new_crtc_state, false);
6937
6938         dev_priv->display.funcs.display->crtc_enable(state, crtc);
6939
6940         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6941                 return;
6942
6943         /* vblanks work again, re-enable pipe CRC. */
6944         intel_crtc_enable_pipe_crc(crtc);
6945 }
6946
6947 static void intel_update_crtc(struct intel_atomic_state *state,
6948                               struct intel_crtc *crtc)
6949 {
6950         struct drm_i915_private *i915 = to_i915(state->base.dev);
6951         const struct intel_crtc_state *old_crtc_state =
6952                 intel_atomic_get_old_crtc_state(state, crtc);
6953         struct intel_crtc_state *new_crtc_state =
6954                 intel_atomic_get_new_crtc_state(state, crtc);
6955         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6956
6957         if (old_crtc_state->inherited ||
6958             intel_crtc_needs_modeset(new_crtc_state)) {
6959                 if (HAS_DPT(i915))
6960                         intel_dpt_configure(crtc);
6961         }
6962
6963         if (vrr_enabling(old_crtc_state, new_crtc_state)) {
6964                 intel_vrr_enable(new_crtc_state);
6965                 intel_crtc_update_active_timings(new_crtc_state,
6966                                                  new_crtc_state->vrr.enable);
6967         }
6968
6969         if (!modeset) {
6970                 if (new_crtc_state->preload_luts &&
6971                     intel_crtc_needs_color_update(new_crtc_state))
6972                         intel_color_load_luts(new_crtc_state);
6973
6974                 intel_pre_plane_update(state, crtc);
6975
6976                 if (intel_crtc_needs_fastset(new_crtc_state))
6977                         intel_encoders_update_pipe(state, crtc);
6978
6979                 if (DISPLAY_VER(i915) >= 11 &&
6980                     intel_crtc_needs_fastset(new_crtc_state))
6981                         icl_set_pipe_chicken(new_crtc_state);
6982         }
6983
6984         intel_fbc_update(state, crtc);
6985
6986         drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
6987
6988         if (!modeset &&
6989             intel_crtc_needs_color_update(new_crtc_state))
6990                 intel_color_commit_noarm(new_crtc_state);
6991
6992         intel_crtc_planes_update_noarm(state, crtc);
6993
6994         /* Perform vblank evasion around commit operation */
6995         intel_pipe_update_start(new_crtc_state);
6996
6997         commit_pipe_pre_planes(state, crtc);
6998
6999         intel_crtc_planes_update_arm(state, crtc);
7000
7001         commit_pipe_post_planes(state, crtc);
7002
7003         intel_pipe_update_end(new_crtc_state);
7004
7005         /*
7006          * We usually enable FIFO underrun interrupts as part of the
7007          * CRTC enable sequence during modesets.  But when we inherit a
7008          * valid pipe configuration from the BIOS we need to take care
7009          * of enabling them on the CRTC's first fastset.
7010          */
7011         if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
7012             old_crtc_state->inherited)
7013                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
7014 }
7015
7016 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
7017                                           struct intel_crtc_state *old_crtc_state,
7018                                           struct intel_crtc_state *new_crtc_state,
7019                                           struct intel_crtc *crtc)
7020 {
7021         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7022
7023         /*
7024          * We need to disable pipe CRC before disabling the pipe,
7025          * or we race against vblank off.
7026          */
7027         intel_crtc_disable_pipe_crc(crtc);
7028
7029         dev_priv->display.funcs.display->crtc_disable(state, crtc);
7030         crtc->active = false;
7031         intel_fbc_disable(crtc);
7032
7033         if (!new_crtc_state->hw.active)
7034                 intel_initial_watermarks(state, crtc);
7035 }
7036
7037 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7038 {
7039         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7040         struct intel_crtc *crtc;
7041         u32 handled = 0;
7042         int i;
7043
7044         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7045                                             new_crtc_state, i) {
7046                 if (!intel_crtc_needs_modeset(new_crtc_state))
7047                         continue;
7048
7049                 if (!old_crtc_state->hw.active)
7050                         continue;
7051
7052                 intel_pre_plane_update(state, crtc);
7053                 intel_crtc_disable_planes(state, crtc);
7054         }
7055
7056         /* Only disable port sync and MST slaves */
7057         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7058                                             new_crtc_state, i) {
7059                 if (!intel_crtc_needs_modeset(new_crtc_state))
7060                         continue;
7061
7062                 if (!old_crtc_state->hw.active)
7063                         continue;
7064
7065                 /* In case of Transcoder port Sync master slave CRTCs can be
7066                  * assigned in any order and we need to make sure that
7067                  * slave CRTCs are disabled first and then master CRTC since
7068                  * Slave vblanks are masked till Master Vblanks.
7069                  */
7070                 if (!is_trans_port_sync_slave(old_crtc_state) &&
7071                     !intel_dp_mst_is_slave_trans(old_crtc_state) &&
7072                     !intel_crtc_is_bigjoiner_slave(old_crtc_state))
7073                         continue;
7074
7075                 intel_old_crtc_state_disables(state, old_crtc_state,
7076                                               new_crtc_state, crtc);
7077                 handled |= BIT(crtc->pipe);
7078         }
7079
7080         /* Disable everything else left on */
7081         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7082                                             new_crtc_state, i) {
7083                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
7084                     (handled & BIT(crtc->pipe)))
7085                         continue;
7086
7087                 if (!old_crtc_state->hw.active)
7088                         continue;
7089
7090                 intel_old_crtc_state_disables(state, old_crtc_state,
7091                                               new_crtc_state, crtc);
7092         }
7093 }
7094
7095 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7096 {
7097         struct intel_crtc_state *new_crtc_state;
7098         struct intel_crtc *crtc;
7099         int i;
7100
7101         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7102                 if (!new_crtc_state->hw.active)
7103                         continue;
7104
7105                 intel_enable_crtc(state, crtc);
7106                 intel_update_crtc(state, crtc);
7107         }
7108 }
7109
7110 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7111 {
7112         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7113         struct intel_crtc *crtc;
7114         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7115         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7116         u8 update_pipes = 0, modeset_pipes = 0;
7117         int i;
7118
7119         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7120                 enum pipe pipe = crtc->pipe;
7121
7122                 if (!new_crtc_state->hw.active)
7123                         continue;
7124
7125                 /* ignore allocations for crtc's that have been turned off. */
7126                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7127                         entries[pipe] = old_crtc_state->wm.skl.ddb;
7128                         update_pipes |= BIT(pipe);
7129                 } else {
7130                         modeset_pipes |= BIT(pipe);
7131                 }
7132         }
7133
7134         /*
7135          * Whenever the number of active pipes changes, we need to make sure we
7136          * update the pipes in the right order so that their ddb allocations
7137          * never overlap with each other between CRTC updates. Otherwise we'll
7138          * cause pipe underruns and other bad stuff.
7139          *
7140          * So first lets enable all pipes that do not need a fullmodeset as
7141          * those don't have any external dependency.
7142          */
7143         while (update_pipes) {
7144                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7145                                                     new_crtc_state, i) {
7146                         enum pipe pipe = crtc->pipe;
7147
7148                         if ((update_pipes & BIT(pipe)) == 0)
7149                                 continue;
7150
7151                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7152                                                         entries, I915_MAX_PIPES, pipe))
7153                                 continue;
7154
7155                         entries[pipe] = new_crtc_state->wm.skl.ddb;
7156                         update_pipes &= ~BIT(pipe);
7157
7158                         intel_update_crtc(state, crtc);
7159
7160                         /*
7161                          * If this is an already active pipe, it's DDB changed,
7162                          * and this isn't the last pipe that needs updating
7163                          * then we need to wait for a vblank to pass for the
7164                          * new ddb allocation to take effect.
7165                          */
7166                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7167                                                  &old_crtc_state->wm.skl.ddb) &&
7168                             (update_pipes | modeset_pipes))
7169                                 intel_crtc_wait_for_next_vblank(crtc);
7170                 }
7171         }
7172
7173         update_pipes = modeset_pipes;
7174
7175         /*
7176          * Enable all pipes that needs a modeset and do not depends on other
7177          * pipes
7178          */
7179         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7180                 enum pipe pipe = crtc->pipe;
7181
7182                 if ((modeset_pipes & BIT(pipe)) == 0)
7183                         continue;
7184
7185                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7186                     is_trans_port_sync_master(new_crtc_state) ||
7187                     intel_crtc_is_bigjoiner_master(new_crtc_state))
7188                         continue;
7189
7190                 modeset_pipes &= ~BIT(pipe);
7191
7192                 intel_enable_crtc(state, crtc);
7193         }
7194
7195         /*
7196          * Then we enable all remaining pipes that depend on other
7197          * pipes: MST slaves and port sync masters, big joiner master
7198          */
7199         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7200                 enum pipe pipe = crtc->pipe;
7201
7202                 if ((modeset_pipes & BIT(pipe)) == 0)
7203                         continue;
7204
7205                 modeset_pipes &= ~BIT(pipe);
7206
7207                 intel_enable_crtc(state, crtc);
7208         }
7209
7210         /*
7211          * Finally we do the plane updates/etc. for all pipes that got enabled.
7212          */
7213         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7214                 enum pipe pipe = crtc->pipe;
7215
7216                 if ((update_pipes & BIT(pipe)) == 0)
7217                         continue;
7218
7219                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7220                                                                         entries, I915_MAX_PIPES, pipe));
7221
7222                 entries[pipe] = new_crtc_state->wm.skl.ddb;
7223                 update_pipes &= ~BIT(pipe);
7224
7225                 intel_update_crtc(state, crtc);
7226         }
7227
7228         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7229         drm_WARN_ON(&dev_priv->drm, update_pipes);
7230 }
7231
7232 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7233 {
7234         struct intel_atomic_state *state, *next;
7235         struct llist_node *freed;
7236
7237         freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7238         llist_for_each_entry_safe(state, next, freed, freed)
7239                 drm_atomic_state_put(&state->base);
7240 }
7241
7242 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
7243 {
7244         struct drm_i915_private *dev_priv =
7245                 container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7246
7247         intel_atomic_helper_free_state(dev_priv);
7248 }
7249
7250 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7251 {
7252         struct wait_queue_entry wait_fence, wait_reset;
7253         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7254
7255         init_wait_entry(&wait_fence, 0);
7256         init_wait_entry(&wait_reset, 0);
7257         for (;;) {
7258                 prepare_to_wait(&intel_state->commit_ready.wait,
7259                                 &wait_fence, TASK_UNINTERRUPTIBLE);
7260                 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7261                                               I915_RESET_MODESET),
7262                                 &wait_reset, TASK_UNINTERRUPTIBLE);
7263
7264
7265                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
7266                     test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7267                         break;
7268
7269                 schedule();
7270         }
7271         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7272         finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7273                                   I915_RESET_MODESET),
7274                     &wait_reset);
7275 }
7276
7277 static void intel_atomic_cleanup_work(struct work_struct *work)
7278 {
7279         struct intel_atomic_state *state =
7280                 container_of(work, struct intel_atomic_state, base.commit_work);
7281         struct drm_i915_private *i915 = to_i915(state->base.dev);
7282         struct intel_crtc_state *old_crtc_state;
7283         struct intel_crtc *crtc;
7284         int i;
7285
7286         for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7287                 intel_color_cleanup_commit(old_crtc_state);
7288
7289         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7290         drm_atomic_helper_commit_cleanup_done(&state->base);
7291         drm_atomic_state_put(&state->base);
7292
7293         intel_atomic_helper_free_state(i915);
7294 }
7295
7296 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7297 {
7298         struct drm_i915_private *i915 = to_i915(state->base.dev);
7299         struct intel_plane *plane;
7300         struct intel_plane_state *plane_state;
7301         int i;
7302
7303         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7304                 struct drm_framebuffer *fb = plane_state->hw.fb;
7305                 int cc_plane;
7306                 int ret;
7307
7308                 if (!fb)
7309                         continue;
7310
7311                 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7312                 if (cc_plane < 0)
7313                         continue;
7314
7315                 /*
7316                  * The layout of the fast clear color value expected by HW
7317                  * (the DRM ABI requiring this value to be located in fb at
7318                  * offset 0 of cc plane, plane #2 previous generations or
7319                  * plane #1 for flat ccs):
7320                  * - 4 x 4 bytes per-channel value
7321                  *   (in surface type specific float/int format provided by the fb user)
7322                  * - 8 bytes native color value used by the display
7323                  *   (converted/written by GPU during a fast clear operation using the
7324                  *    above per-channel values)
7325                  *
7326                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
7327                  * caller made sure that the object is synced wrt. the related color clear value
7328                  * GPU write on it.
7329                  */
7330                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7331                                                      fb->offsets[cc_plane] + 16,
7332                                                      &plane_state->ccval,
7333                                                      sizeof(plane_state->ccval));
7334                 /* The above could only fail if the FB obj has an unexpected backing store type. */
7335                 drm_WARN_ON(&i915->drm, ret);
7336         }
7337 }
7338
7339 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7340 {
7341         struct drm_device *dev = state->base.dev;
7342         struct drm_i915_private *dev_priv = to_i915(dev);
7343         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7344         struct intel_crtc *crtc;
7345         struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7346         intel_wakeref_t wakeref = 0;
7347         int i;
7348
7349         intel_atomic_commit_fence_wait(state);
7350
7351         drm_atomic_helper_wait_for_dependencies(&state->base);
7352         drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7353
7354         /*
7355          * During full modesets we write a lot of registers, wait
7356          * for PLLs, etc. Doing that while DC states are enabled
7357          * is not a good idea.
7358          *
7359          * During fastsets and other updates we also need to
7360          * disable DC states due to the following scenario:
7361          * 1. DC5 exit and PSR exit happen
7362          * 2. Some or all _noarm() registers are written
7363          * 3. Due to some long delay PSR is re-entered
7364          * 4. DC5 entry -> DMC saves the already written new
7365          *    _noarm() registers and the old not yet written
7366          *    _arm() registers
7367          * 5. DC5 exit -> DMC restores a mixture of old and
7368          *    new register values and arms the update
7369          * 6. PSR exit -> hardware latches a mixture of old and
7370          *    new register values -> corrupted frame, or worse
7371          * 7. New _arm() registers are finally written
7372          * 8. Hardware finally latches a complete set of new
7373          *    register values, and subsequent frames will be OK again
7374          *
7375          * Also note that due to the pipe CSC hardware issues on
7376          * SKL/GLK DC states must remain off until the pipe CSC
7377          * state readout has happened. Otherwise we risk corrupting
7378          * the CSC latched register values with the readout (see
7379          * skl_read_csc() and skl_color_commit_noarm()).
7380          */
7381         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
7382
7383         intel_atomic_prepare_plane_clear_colors(state);
7384
7385         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7386                                             new_crtc_state, i) {
7387                 if (intel_crtc_needs_modeset(new_crtc_state) ||
7388                     intel_crtc_needs_fastset(new_crtc_state))
7389                         intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7390         }
7391
7392         intel_commit_modeset_disables(state);
7393
7394         /* FIXME: Eventually get rid of our crtc->config pointer */
7395         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7396                 crtc->config = new_crtc_state;
7397
7398         if (state->modeset) {
7399                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7400
7401                 intel_set_cdclk_pre_plane_update(state);
7402
7403                 intel_modeset_verify_disabled(dev_priv, state);
7404         }
7405
7406         intel_sagv_pre_plane_update(state);
7407
7408         /* Complete the events for pipes that have now been disabled */
7409         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7410                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7411
7412                 /* Complete events for now disable pipes here. */
7413                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7414                         spin_lock_irq(&dev->event_lock);
7415                         drm_crtc_send_vblank_event(&crtc->base,
7416                                                    new_crtc_state->uapi.event);
7417                         spin_unlock_irq(&dev->event_lock);
7418
7419                         new_crtc_state->uapi.event = NULL;
7420                 }
7421         }
7422
7423         intel_encoders_update_prepare(state);
7424
7425         intel_dbuf_pre_plane_update(state);
7426         intel_mbus_dbox_update(state);
7427
7428         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7429                 if (new_crtc_state->do_async_flip)
7430                         intel_crtc_enable_flip_done(state, crtc);
7431         }
7432
7433         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7434         dev_priv->display.funcs.display->commit_modeset_enables(state);
7435
7436         if (state->modeset)
7437                 intel_set_cdclk_post_plane_update(state);
7438
7439         intel_wait_for_vblank_workers(state);
7440
7441         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7442          * already, but still need the state for the delayed optimization. To
7443          * fix this:
7444          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7445          * - schedule that vblank worker _before_ calling hw_done
7446          * - at the start of commit_tail, cancel it _synchrously
7447          * - switch over to the vblank wait helper in the core after that since
7448          *   we don't need out special handling any more.
7449          */
7450         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7451
7452         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7453                 if (new_crtc_state->do_async_flip)
7454                         intel_crtc_disable_flip_done(state, crtc);
7455         }
7456
7457         /*
7458          * Now that the vblank has passed, we can go ahead and program the
7459          * optimal watermarks on platforms that need two-step watermark
7460          * programming.
7461          *
7462          * TODO: Move this (and other cleanup) to an async worker eventually.
7463          */
7464         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7465                                             new_crtc_state, i) {
7466                 /*
7467                  * Gen2 reports pipe underruns whenever all planes are disabled.
7468                  * So re-enable underrun reporting after some planes get enabled.
7469                  *
7470                  * We do this before .optimize_watermarks() so that we have a
7471                  * chance of catching underruns with the intermediate watermarks
7472                  * vs. the new plane configuration.
7473                  */
7474                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7475                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7476
7477                 intel_optimize_watermarks(state, crtc);
7478         }
7479
7480         intel_dbuf_post_plane_update(state);
7481         intel_psr_post_plane_update(state);
7482
7483         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7484                 intel_post_plane_update(state, crtc);
7485
7486                 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7487
7488                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7489
7490                 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7491                 hsw_ips_post_update(state, crtc);
7492
7493                 /*
7494                  * Activate DRRS after state readout to avoid
7495                  * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7496                  */
7497                 intel_drrs_activate(new_crtc_state);
7498
7499                 /*
7500                  * DSB cleanup is done in cleanup_work aligning with framebuffer
7501                  * cleanup. So copy and reset the dsb structure to sync with
7502                  * commit_done and later do dsb cleanup in cleanup_work.
7503                  *
7504                  * FIXME get rid of this funny new->old swapping
7505                  */
7506                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7507         }
7508
7509         /* Underruns don't always raise interrupts, so check manually */
7510         intel_check_cpu_fifo_underruns(dev_priv);
7511         intel_check_pch_fifo_underruns(dev_priv);
7512
7513         if (state->modeset)
7514                 intel_verify_planes(state);
7515
7516         intel_sagv_post_plane_update(state);
7517
7518         drm_atomic_helper_commit_hw_done(&state->base);
7519
7520         if (state->modeset) {
7521                 /* As one of the primary mmio accessors, KMS has a high
7522                  * likelihood of triggering bugs in unclaimed access. After we
7523                  * finish modesetting, see if an error has been flagged, and if
7524                  * so enable debugging for the next modeset - and hope we catch
7525                  * the culprit.
7526                  */
7527                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7528         }
7529         intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
7530         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7531
7532         /*
7533          * Defer the cleanup of the old state to a separate worker to not
7534          * impede the current task (userspace for blocking modesets) that
7535          * are executed inline. For out-of-line asynchronous modesets/flips,
7536          * deferring to a new worker seems overkill, but we would place a
7537          * schedule point (cond_resched()) here anyway to keep latencies
7538          * down.
7539          */
7540         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7541         queue_work(system_highpri_wq, &state->base.commit_work);
7542 }
7543
7544 static void intel_atomic_commit_work(struct work_struct *work)
7545 {
7546         struct intel_atomic_state *state =
7547                 container_of(work, struct intel_atomic_state, base.commit_work);
7548
7549         intel_atomic_commit_tail(state);
7550 }
7551
7552 static int
7553 intel_atomic_commit_ready(struct i915_sw_fence *fence,
7554                           enum i915_sw_fence_notify notify)
7555 {
7556         struct intel_atomic_state *state =
7557                 container_of(fence, struct intel_atomic_state, commit_ready);
7558
7559         switch (notify) {
7560         case FENCE_COMPLETE:
7561                 /* we do blocking waits in the worker, nothing to do here */
7562                 break;
7563         case FENCE_FREE:
7564                 {
7565                         struct intel_atomic_helper *helper =
7566                                 &to_i915(state->base.dev)->display.atomic_helper;
7567
7568                         if (llist_add(&state->freed, &helper->free_list))
7569                                 schedule_work(&helper->free_work);
7570                         break;
7571                 }
7572         }
7573
7574         return NOTIFY_DONE;
7575 }
7576
7577 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7578 {
7579         struct intel_plane_state *old_plane_state, *new_plane_state;
7580         struct intel_plane *plane;
7581         int i;
7582
7583         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7584                                              new_plane_state, i)
7585                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7586                                         to_intel_frontbuffer(new_plane_state->hw.fb),
7587                                         plane->frontbuffer_bit);
7588 }
7589
7590 static int intel_atomic_commit(struct drm_device *dev,
7591                                struct drm_atomic_state *_state,
7592                                bool nonblock)
7593 {
7594         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7595         struct drm_i915_private *dev_priv = to_i915(dev);
7596         int ret = 0;
7597
7598         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7599
7600         drm_atomic_state_get(&state->base);
7601         i915_sw_fence_init(&state->commit_ready,
7602                            intel_atomic_commit_ready);
7603
7604         /*
7605          * The intel_legacy_cursor_update() fast path takes care
7606          * of avoiding the vblank waits for simple cursor
7607          * movement and flips. For cursor on/off and size changes,
7608          * we want to perform the vblank waits so that watermark
7609          * updates happen during the correct frames. Gen9+ have
7610          * double buffered watermarks and so shouldn't need this.
7611          *
7612          * Unset state->legacy_cursor_update before the call to
7613          * drm_atomic_helper_setup_commit() because otherwise
7614          * drm_atomic_helper_wait_for_flip_done() is a noop and
7615          * we get FIFO underruns because we didn't wait
7616          * for vblank.
7617          *
7618          * FIXME doing watermarks and fb cleanup from a vblank worker
7619          * (assuming we had any) would solve these problems.
7620          */
7621         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7622                 struct intel_crtc_state *new_crtc_state;
7623                 struct intel_crtc *crtc;
7624                 int i;
7625
7626                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7627                         if (new_crtc_state->wm.need_postvbl_update ||
7628                             new_crtc_state->update_wm_post)
7629                                 state->base.legacy_cursor_update = false;
7630         }
7631
7632         ret = intel_atomic_prepare_commit(state);
7633         if (ret) {
7634                 drm_dbg_atomic(&dev_priv->drm,
7635                                "Preparing state failed with %i\n", ret);
7636                 i915_sw_fence_commit(&state->commit_ready);
7637                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7638                 return ret;
7639         }
7640
7641         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7642         if (!ret)
7643                 ret = drm_atomic_helper_swap_state(&state->base, true);
7644         if (!ret)
7645                 intel_atomic_swap_global_state(state);
7646
7647         if (ret) {
7648                 struct intel_crtc_state *new_crtc_state;
7649                 struct intel_crtc *crtc;
7650                 int i;
7651
7652                 i915_sw_fence_commit(&state->commit_ready);
7653
7654                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7655                         intel_color_cleanup_commit(new_crtc_state);
7656
7657                 drm_atomic_helper_cleanup_planes(dev, &state->base);
7658                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7659                 return ret;
7660         }
7661         intel_shared_dpll_swap_state(state);
7662         intel_atomic_track_fbs(state);
7663
7664         drm_atomic_state_get(&state->base);
7665         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7666
7667         i915_sw_fence_commit(&state->commit_ready);
7668         if (nonblock && state->modeset) {
7669                 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7670         } else if (nonblock) {
7671                 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7672         } else {
7673                 if (state->modeset)
7674                         flush_workqueue(dev_priv->display.wq.modeset);
7675                 intel_atomic_commit_tail(state);
7676         }
7677
7678         return 0;
7679 }
7680
7681 /**
7682  * intel_plane_destroy - destroy a plane
7683  * @plane: plane to destroy
7684  *
7685  * Common destruction function for all types of planes (primary, cursor,
7686  * sprite).
7687  */
7688 void intel_plane_destroy(struct drm_plane *plane)
7689 {
7690         drm_plane_cleanup(plane);
7691         kfree(to_intel_plane(plane));
7692 }
7693
7694 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
7695 {
7696         struct intel_plane *plane;
7697
7698         for_each_intel_plane(&dev_priv->drm, plane) {
7699                 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
7700                                                               plane->pipe);
7701
7702                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
7703         }
7704 }
7705
7706
7707 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7708                                       struct drm_file *file)
7709 {
7710         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7711         struct drm_crtc *drmmode_crtc;
7712         struct intel_crtc *crtc;
7713
7714         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7715         if (!drmmode_crtc)
7716                 return -ENOENT;
7717
7718         crtc = to_intel_crtc(drmmode_crtc);
7719         pipe_from_crtc_id->pipe = crtc->pipe;
7720
7721         return 0;
7722 }
7723
7724 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7725 {
7726         struct drm_device *dev = encoder->base.dev;
7727         struct intel_encoder *source_encoder;
7728         u32 possible_clones = 0;
7729
7730         for_each_intel_encoder(dev, source_encoder) {
7731                 if (encoders_cloneable(encoder, source_encoder))
7732                         possible_clones |= drm_encoder_mask(&source_encoder->base);
7733         }
7734
7735         return possible_clones;
7736 }
7737
7738 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7739 {
7740         struct drm_device *dev = encoder->base.dev;
7741         struct intel_crtc *crtc;
7742         u32 possible_crtcs = 0;
7743
7744         for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7745                 possible_crtcs |= drm_crtc_mask(&crtc->base);
7746
7747         return possible_crtcs;
7748 }
7749
7750 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7751 {
7752         if (!IS_MOBILE(dev_priv))
7753                 return false;
7754
7755         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7756                 return false;
7757
7758         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7759                 return false;
7760
7761         return true;
7762 }
7763
7764 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7765 {
7766         if (DISPLAY_VER(dev_priv) >= 9)
7767                 return false;
7768
7769         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7770                 return false;
7771
7772         if (HAS_PCH_LPT_H(dev_priv) &&
7773             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7774                 return false;
7775
7776         /* DDI E can't be used if DDI A requires 4 lanes */
7777         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7778                 return false;
7779
7780         if (!dev_priv->display.vbt.int_crt_support)
7781                 return false;
7782
7783         return true;
7784 }
7785
7786 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
7787 {
7788         struct intel_encoder *encoder;
7789         bool dpd_is_edp = false;
7790
7791         intel_pps_unlock_regs_wa(dev_priv);
7792
7793         if (!HAS_DISPLAY(dev_priv))
7794                 return;
7795
7796         if (IS_METEORLAKE(dev_priv)) {
7797                 /* TODO: initialize TC ports as well */
7798                 intel_ddi_init(dev_priv, PORT_A);
7799                 intel_ddi_init(dev_priv, PORT_B);
7800         } else if (IS_DG2(dev_priv)) {
7801                 intel_ddi_init(dev_priv, PORT_A);
7802                 intel_ddi_init(dev_priv, PORT_B);
7803                 intel_ddi_init(dev_priv, PORT_C);
7804                 intel_ddi_init(dev_priv, PORT_D_XELPD);
7805                 intel_ddi_init(dev_priv, PORT_TC1);
7806         } else if (IS_ALDERLAKE_P(dev_priv)) {
7807                 intel_ddi_init(dev_priv, PORT_A);
7808                 intel_ddi_init(dev_priv, PORT_B);
7809                 intel_ddi_init(dev_priv, PORT_TC1);
7810                 intel_ddi_init(dev_priv, PORT_TC2);
7811                 intel_ddi_init(dev_priv, PORT_TC3);
7812                 intel_ddi_init(dev_priv, PORT_TC4);
7813                 icl_dsi_init(dev_priv);
7814         } else if (IS_ALDERLAKE_S(dev_priv)) {
7815                 intel_ddi_init(dev_priv, PORT_A);
7816                 intel_ddi_init(dev_priv, PORT_TC1);
7817                 intel_ddi_init(dev_priv, PORT_TC2);
7818                 intel_ddi_init(dev_priv, PORT_TC3);
7819                 intel_ddi_init(dev_priv, PORT_TC4);
7820         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7821                 intel_ddi_init(dev_priv, PORT_A);
7822                 intel_ddi_init(dev_priv, PORT_B);
7823                 intel_ddi_init(dev_priv, PORT_TC1);
7824                 intel_ddi_init(dev_priv, PORT_TC2);
7825         } else if (DISPLAY_VER(dev_priv) >= 12) {
7826                 intel_ddi_init(dev_priv, PORT_A);
7827                 intel_ddi_init(dev_priv, PORT_B);
7828                 intel_ddi_init(dev_priv, PORT_TC1);
7829                 intel_ddi_init(dev_priv, PORT_TC2);
7830                 intel_ddi_init(dev_priv, PORT_TC3);
7831                 intel_ddi_init(dev_priv, PORT_TC4);
7832                 intel_ddi_init(dev_priv, PORT_TC5);
7833                 intel_ddi_init(dev_priv, PORT_TC6);
7834                 icl_dsi_init(dev_priv);
7835         } else if (IS_JSL_EHL(dev_priv)) {
7836                 intel_ddi_init(dev_priv, PORT_A);
7837                 intel_ddi_init(dev_priv, PORT_B);
7838                 intel_ddi_init(dev_priv, PORT_C);
7839                 intel_ddi_init(dev_priv, PORT_D);
7840                 icl_dsi_init(dev_priv);
7841         } else if (DISPLAY_VER(dev_priv) == 11) {
7842                 intel_ddi_init(dev_priv, PORT_A);
7843                 intel_ddi_init(dev_priv, PORT_B);
7844                 intel_ddi_init(dev_priv, PORT_C);
7845                 intel_ddi_init(dev_priv, PORT_D);
7846                 intel_ddi_init(dev_priv, PORT_E);
7847                 intel_ddi_init(dev_priv, PORT_F);
7848                 icl_dsi_init(dev_priv);
7849         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7850                 intel_ddi_init(dev_priv, PORT_A);
7851                 intel_ddi_init(dev_priv, PORT_B);
7852                 intel_ddi_init(dev_priv, PORT_C);
7853                 vlv_dsi_init(dev_priv);
7854         } else if (DISPLAY_VER(dev_priv) >= 9) {
7855                 intel_ddi_init(dev_priv, PORT_A);
7856                 intel_ddi_init(dev_priv, PORT_B);
7857                 intel_ddi_init(dev_priv, PORT_C);
7858                 intel_ddi_init(dev_priv, PORT_D);
7859                 intel_ddi_init(dev_priv, PORT_E);
7860         } else if (HAS_DDI(dev_priv)) {
7861                 u32 found;
7862
7863                 if (intel_ddi_crt_present(dev_priv))
7864                         intel_crt_init(dev_priv);
7865
7866                 /* Haswell uses DDI functions to detect digital outputs. */
7867                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7868                 if (found)
7869                         intel_ddi_init(dev_priv, PORT_A);
7870
7871                 found = intel_de_read(dev_priv, SFUSE_STRAP);
7872                 if (found & SFUSE_STRAP_DDIB_DETECTED)
7873                         intel_ddi_init(dev_priv, PORT_B);
7874                 if (found & SFUSE_STRAP_DDIC_DETECTED)
7875                         intel_ddi_init(dev_priv, PORT_C);
7876                 if (found & SFUSE_STRAP_DDID_DETECTED)
7877                         intel_ddi_init(dev_priv, PORT_D);
7878                 if (found & SFUSE_STRAP_DDIF_DETECTED)
7879                         intel_ddi_init(dev_priv, PORT_F);
7880         } else if (HAS_PCH_SPLIT(dev_priv)) {
7881                 int found;
7882
7883                 /*
7884                  * intel_edp_init_connector() depends on this completing first,
7885                  * to prevent the registration of both eDP and LVDS and the
7886                  * incorrect sharing of the PPS.
7887                  */
7888                 intel_lvds_init(dev_priv);
7889                 intel_crt_init(dev_priv);
7890
7891                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7892
7893                 if (ilk_has_edp_a(dev_priv))
7894                         g4x_dp_init(dev_priv, DP_A, PORT_A);
7895
7896                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7897                         /* PCH SDVOB multiplex with HDMIB */
7898                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7899                         if (!found)
7900                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7901                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7902                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7903                 }
7904
7905                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7906                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7907
7908                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7909                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7910
7911                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7912                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7913
7914                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7915                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7916         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7917                 bool has_edp, has_port;
7918
7919                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7920                         intel_crt_init(dev_priv);
7921
7922                 /*
7923                  * The DP_DETECTED bit is the latched state of the DDC
7924                  * SDA pin at boot. However since eDP doesn't require DDC
7925                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
7926                  * eDP ports may have been muxed to an alternate function.
7927                  * Thus we can't rely on the DP_DETECTED bit alone to detect
7928                  * eDP ports. Consult the VBT as well as DP_DETECTED to
7929                  * detect eDP ports.
7930                  *
7931                  * Sadly the straps seem to be missing sometimes even for HDMI
7932                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7933                  * and VBT for the presence of the port. Additionally we can't
7934                  * trust the port type the VBT declares as we've seen at least
7935                  * HDMI ports that the VBT claim are DP or eDP.
7936                  */
7937                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7938                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
7939                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7940                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7941                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7942                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7943
7944                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7945                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
7946                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7947                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7948                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7949                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7950
7951                 if (IS_CHERRYVIEW(dev_priv)) {
7952                         /*
7953                          * eDP not supported on port D,
7954                          * so no need to worry about it
7955                          */
7956                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
7957                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7958                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7959                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7960                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7961                 }
7962
7963                 vlv_dsi_init(dev_priv);
7964         } else if (IS_PINEVIEW(dev_priv)) {
7965                 intel_lvds_init(dev_priv);
7966                 intel_crt_init(dev_priv);
7967         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
7968                 bool found = false;
7969
7970                 if (IS_MOBILE(dev_priv))
7971                         intel_lvds_init(dev_priv);
7972
7973                 intel_crt_init(dev_priv);
7974
7975                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7976                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
7977                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
7978                         if (!found && IS_G4X(dev_priv)) {
7979                                 drm_dbg_kms(&dev_priv->drm,
7980                                             "probing HDMI on SDVOB\n");
7981                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
7982                         }
7983
7984                         if (!found && IS_G4X(dev_priv))
7985                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
7986                 }
7987
7988                 /* Before G4X SDVOC doesn't have its own detect register */
7989
7990                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7991                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
7992                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
7993                 }
7994
7995                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
7996
7997                         if (IS_G4X(dev_priv)) {
7998                                 drm_dbg_kms(&dev_priv->drm,
7999                                             "probing HDMI on SDVOC\n");
8000                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
8001                         }
8002                         if (IS_G4X(dev_priv))
8003                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
8004                 }
8005
8006                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
8007                         g4x_dp_init(dev_priv, DP_D, PORT_D);
8008
8009                 if (SUPPORTS_TV(dev_priv))
8010                         intel_tv_init(dev_priv);
8011         } else if (DISPLAY_VER(dev_priv) == 2) {
8012                 if (IS_I85X(dev_priv))
8013                         intel_lvds_init(dev_priv);
8014
8015                 intel_crt_init(dev_priv);
8016                 intel_dvo_init(dev_priv);
8017         }
8018
8019         for_each_intel_encoder(&dev_priv->drm, encoder) {
8020                 encoder->base.possible_crtcs =
8021                         intel_encoder_possible_crtcs(encoder);
8022                 encoder->base.possible_clones =
8023                         intel_encoder_possible_clones(encoder);
8024         }
8025
8026         intel_init_pch_refclk(dev_priv);
8027
8028         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
8029 }
8030
8031 static int max_dotclock(struct drm_i915_private *i915)
8032 {
8033         int max_dotclock = i915->max_dotclk_freq;
8034
8035         /* icl+ might use bigjoiner */
8036         if (DISPLAY_VER(i915) >= 11)
8037                 max_dotclock *= 2;
8038
8039         return max_dotclock;
8040 }
8041
8042 static enum drm_mode_status
8043 intel_mode_valid(struct drm_device *dev,
8044                  const struct drm_display_mode *mode)
8045 {
8046         struct drm_i915_private *dev_priv = to_i915(dev);
8047         int hdisplay_max, htotal_max;
8048         int vdisplay_max, vtotal_max;
8049
8050         /*
8051          * Can't reject DBLSCAN here because Xorg ddxen can add piles
8052          * of DBLSCAN modes to the output's mode list when they detect
8053          * the scaling mode property on the connector. And they don't
8054          * ask the kernel to validate those modes in any way until
8055          * modeset time at which point the client gets a protocol error.
8056          * So in order to not upset those clients we silently ignore the
8057          * DBLSCAN flag on such connectors. For other connectors we will
8058          * reject modes with the DBLSCAN flag in encoder->compute_config().
8059          * And we always reject DBLSCAN modes in connector->mode_valid()
8060          * as we never want such modes on the connector's mode list.
8061          */
8062
8063         if (mode->vscan > 1)
8064                 return MODE_NO_VSCAN;
8065
8066         if (mode->flags & DRM_MODE_FLAG_HSKEW)
8067                 return MODE_H_ILLEGAL;
8068
8069         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
8070                            DRM_MODE_FLAG_NCSYNC |
8071                            DRM_MODE_FLAG_PCSYNC))
8072                 return MODE_HSYNC;
8073
8074         if (mode->flags & (DRM_MODE_FLAG_BCAST |
8075                            DRM_MODE_FLAG_PIXMUX |
8076                            DRM_MODE_FLAG_CLKDIV2))
8077                 return MODE_BAD;
8078
8079         /*
8080          * Reject clearly excessive dotclocks early to
8081          * avoid having to worry about huge integers later.
8082          */
8083         if (mode->clock > max_dotclock(dev_priv))
8084                 return MODE_CLOCK_HIGH;
8085
8086         /* Transcoder timing limits */
8087         if (DISPLAY_VER(dev_priv) >= 11) {
8088                 hdisplay_max = 16384;
8089                 vdisplay_max = 8192;
8090                 htotal_max = 16384;
8091                 vtotal_max = 8192;
8092         } else if (DISPLAY_VER(dev_priv) >= 9 ||
8093                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8094                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8095                 vdisplay_max = 4096;
8096                 htotal_max = 8192;
8097                 vtotal_max = 8192;
8098         } else if (DISPLAY_VER(dev_priv) >= 3) {
8099                 hdisplay_max = 4096;
8100                 vdisplay_max = 4096;
8101                 htotal_max = 8192;
8102                 vtotal_max = 8192;
8103         } else {
8104                 hdisplay_max = 2048;
8105                 vdisplay_max = 2048;
8106                 htotal_max = 4096;
8107                 vtotal_max = 4096;
8108         }
8109
8110         if (mode->hdisplay > hdisplay_max ||
8111             mode->hsync_start > htotal_max ||
8112             mode->hsync_end > htotal_max ||
8113             mode->htotal > htotal_max)
8114                 return MODE_H_ILLEGAL;
8115
8116         if (mode->vdisplay > vdisplay_max ||
8117             mode->vsync_start > vtotal_max ||
8118             mode->vsync_end > vtotal_max ||
8119             mode->vtotal > vtotal_max)
8120                 return MODE_V_ILLEGAL;
8121
8122         if (DISPLAY_VER(dev_priv) >= 5) {
8123                 if (mode->hdisplay < 64 ||
8124                     mode->htotal - mode->hdisplay < 32)
8125                         return MODE_H_ILLEGAL;
8126
8127                 if (mode->vtotal - mode->vdisplay < 5)
8128                         return MODE_V_ILLEGAL;
8129         } else {
8130                 if (mode->htotal - mode->hdisplay < 32)
8131                         return MODE_H_ILLEGAL;
8132
8133                 if (mode->vtotal - mode->vdisplay < 3)
8134                         return MODE_V_ILLEGAL;
8135         }
8136
8137         /*
8138          * Cantiga+ cannot handle modes with a hsync front porch of 0.
8139          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8140          */
8141         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8142             mode->hsync_start == mode->hdisplay)
8143                 return MODE_H_ILLEGAL;
8144
8145         return MODE_OK;
8146 }
8147
8148 enum drm_mode_status
8149 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8150                                 const struct drm_display_mode *mode,
8151                                 bool bigjoiner)
8152 {
8153         int plane_width_max, plane_height_max;
8154
8155         /*
8156          * intel_mode_valid() should be
8157          * sufficient on older platforms.
8158          */
8159         if (DISPLAY_VER(dev_priv) < 9)
8160                 return MODE_OK;
8161
8162         /*
8163          * Most people will probably want a fullscreen
8164          * plane so let's not advertize modes that are
8165          * too big for that.
8166          */
8167         if (DISPLAY_VER(dev_priv) >= 11) {
8168                 plane_width_max = 5120 << bigjoiner;
8169                 plane_height_max = 4320;
8170         } else {
8171                 plane_width_max = 5120;
8172                 plane_height_max = 4096;
8173         }
8174
8175         if (mode->hdisplay > plane_width_max)
8176                 return MODE_H_ILLEGAL;
8177
8178         if (mode->vdisplay > plane_height_max)
8179                 return MODE_V_ILLEGAL;
8180
8181         return MODE_OK;
8182 }
8183
8184 static const struct drm_mode_config_funcs intel_mode_funcs = {
8185         .fb_create = intel_user_framebuffer_create,
8186         .get_format_info = intel_fb_get_format_info,
8187         .output_poll_changed = intel_fbdev_output_poll_changed,
8188         .mode_valid = intel_mode_valid,
8189         .atomic_check = intel_atomic_check,
8190         .atomic_commit = intel_atomic_commit,
8191         .atomic_state_alloc = intel_atomic_state_alloc,
8192         .atomic_state_clear = intel_atomic_state_clear,
8193         .atomic_state_free = intel_atomic_state_free,
8194 };
8195
8196 static const struct intel_display_funcs skl_display_funcs = {
8197         .get_pipe_config = hsw_get_pipe_config,
8198         .crtc_enable = hsw_crtc_enable,
8199         .crtc_disable = hsw_crtc_disable,
8200         .commit_modeset_enables = skl_commit_modeset_enables,
8201         .get_initial_plane_config = skl_get_initial_plane_config,
8202 };
8203
8204 static const struct intel_display_funcs ddi_display_funcs = {
8205         .get_pipe_config = hsw_get_pipe_config,
8206         .crtc_enable = hsw_crtc_enable,
8207         .crtc_disable = hsw_crtc_disable,
8208         .commit_modeset_enables = intel_commit_modeset_enables,
8209         .get_initial_plane_config = i9xx_get_initial_plane_config,
8210 };
8211
8212 static const struct intel_display_funcs pch_split_display_funcs = {
8213         .get_pipe_config = ilk_get_pipe_config,
8214         .crtc_enable = ilk_crtc_enable,
8215         .crtc_disable = ilk_crtc_disable,
8216         .commit_modeset_enables = intel_commit_modeset_enables,
8217         .get_initial_plane_config = i9xx_get_initial_plane_config,
8218 };
8219
8220 static const struct intel_display_funcs vlv_display_funcs = {
8221         .get_pipe_config = i9xx_get_pipe_config,
8222         .crtc_enable = valleyview_crtc_enable,
8223         .crtc_disable = i9xx_crtc_disable,
8224         .commit_modeset_enables = intel_commit_modeset_enables,
8225         .get_initial_plane_config = i9xx_get_initial_plane_config,
8226 };
8227
8228 static const struct intel_display_funcs i9xx_display_funcs = {
8229         .get_pipe_config = i9xx_get_pipe_config,
8230         .crtc_enable = i9xx_crtc_enable,
8231         .crtc_disable = i9xx_crtc_disable,
8232         .commit_modeset_enables = intel_commit_modeset_enables,
8233         .get_initial_plane_config = i9xx_get_initial_plane_config,
8234 };
8235
8236 /**
8237  * intel_init_display_hooks - initialize the display modesetting hooks
8238  * @dev_priv: device private
8239  */
8240 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8241 {
8242         if (!HAS_DISPLAY(dev_priv))
8243                 return;
8244
8245         intel_color_init_hooks(dev_priv);
8246         intel_init_cdclk_hooks(dev_priv);
8247         intel_audio_hooks_init(dev_priv);
8248
8249         intel_dpll_init_clock_hook(dev_priv);
8250
8251         if (DISPLAY_VER(dev_priv) >= 9) {
8252                 dev_priv->display.funcs.display = &skl_display_funcs;
8253         } else if (HAS_DDI(dev_priv)) {
8254                 dev_priv->display.funcs.display = &ddi_display_funcs;
8255         } else if (HAS_PCH_SPLIT(dev_priv)) {
8256                 dev_priv->display.funcs.display = &pch_split_display_funcs;
8257         } else if (IS_CHERRYVIEW(dev_priv) ||
8258                    IS_VALLEYVIEW(dev_priv)) {
8259                 dev_priv->display.funcs.display = &vlv_display_funcs;
8260         } else {
8261                 dev_priv->display.funcs.display = &i9xx_display_funcs;
8262         }
8263
8264         intel_fdi_init_hook(dev_priv);
8265 }
8266
8267 void intel_modeset_init_hw(struct drm_i915_private *i915)
8268 {
8269         struct intel_cdclk_state *cdclk_state;
8270
8271         if (!HAS_DISPLAY(i915))
8272                 return;
8273
8274         cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
8275
8276         intel_update_cdclk(i915);
8277         intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
8278         cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
8279 }
8280
8281 static int intel_initial_commit(struct drm_device *dev)
8282 {
8283         struct drm_atomic_state *state = NULL;
8284         struct drm_modeset_acquire_ctx ctx;
8285         struct intel_crtc *crtc;
8286         int ret = 0;
8287
8288         state = drm_atomic_state_alloc(dev);
8289         if (!state)
8290                 return -ENOMEM;
8291
8292         drm_modeset_acquire_init(&ctx, 0);
8293
8294         state->acquire_ctx = &ctx;
8295         to_intel_atomic_state(state)->internal = true;
8296
8297 retry:
8298         for_each_intel_crtc(dev, crtc) {
8299                 struct intel_crtc_state *crtc_state =
8300                         intel_atomic_get_crtc_state(state, crtc);
8301
8302                 if (IS_ERR(crtc_state)) {
8303                         ret = PTR_ERR(crtc_state);
8304                         goto out;
8305                 }
8306
8307                 if (crtc_state->hw.active) {
8308                         struct intel_encoder *encoder;
8309
8310                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
8311                         if (ret)
8312                                 goto out;
8313
8314                         /*
8315                          * FIXME hack to force a LUT update to avoid the
8316                          * plane update forcing the pipe gamma on without
8317                          * having a proper LUT loaded. Remove once we
8318                          * have readout for pipe gamma enable.
8319                          */
8320                         crtc_state->uapi.color_mgmt_changed = true;
8321
8322                         for_each_intel_encoder_mask(dev, encoder,
8323                                                     crtc_state->uapi.encoder_mask) {
8324                                 if (encoder->initial_fastset_check &&
8325                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
8326                                         ret = drm_atomic_add_affected_connectors(state,
8327                                                                                  &crtc->base);
8328                                         if (ret)
8329                                                 goto out;
8330                                 }
8331                         }
8332                 }
8333         }
8334
8335         ret = drm_atomic_commit(state);
8336
8337 out:
8338         if (ret == -EDEADLK) {
8339                 drm_atomic_state_clear(state);
8340                 drm_modeset_backoff(&ctx);
8341                 goto retry;
8342         }
8343
8344         drm_atomic_state_put(state);
8345
8346         drm_modeset_drop_locks(&ctx);
8347         drm_modeset_acquire_fini(&ctx);
8348
8349         return ret;
8350 }
8351
8352 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
8353         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
8354 };
8355
8356 static void intel_mode_config_init(struct drm_i915_private *i915)
8357 {
8358         struct drm_mode_config *mode_config = &i915->drm.mode_config;
8359
8360         drm_mode_config_init(&i915->drm);
8361         INIT_LIST_HEAD(&i915->display.global.obj_list);
8362
8363         mode_config->min_width = 0;
8364         mode_config->min_height = 0;
8365
8366         mode_config->preferred_depth = 24;
8367         mode_config->prefer_shadow = 1;
8368
8369         mode_config->funcs = &intel_mode_funcs;
8370         mode_config->helper_private = &intel_mode_config_funcs;
8371
8372         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
8373
8374         /*
8375          * Maximum framebuffer dimensions, chosen to match
8376          * the maximum render engine surface size on gen4+.
8377          */
8378         if (DISPLAY_VER(i915) >= 7) {
8379                 mode_config->max_width = 16384;
8380                 mode_config->max_height = 16384;
8381         } else if (DISPLAY_VER(i915) >= 4) {
8382                 mode_config->max_width = 8192;
8383                 mode_config->max_height = 8192;
8384         } else if (DISPLAY_VER(i915) == 3) {
8385                 mode_config->max_width = 4096;
8386                 mode_config->max_height = 4096;
8387         } else {
8388                 mode_config->max_width = 2048;
8389                 mode_config->max_height = 2048;
8390         }
8391
8392         if (IS_I845G(i915) || IS_I865G(i915)) {
8393                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
8394                 mode_config->cursor_height = 1023;
8395         } else if (IS_I830(i915) || IS_I85X(i915) ||
8396                    IS_I915G(i915) || IS_I915GM(i915)) {
8397                 mode_config->cursor_width = 64;
8398                 mode_config->cursor_height = 64;
8399         } else {
8400                 mode_config->cursor_width = 256;
8401                 mode_config->cursor_height = 256;
8402         }
8403 }
8404
8405 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
8406 {
8407         intel_atomic_global_obj_cleanup(i915);
8408         drm_mode_config_cleanup(&i915->drm);
8409 }
8410
8411 /* part #1: call before irq install */
8412 int intel_modeset_init_noirq(struct drm_i915_private *i915)
8413 {
8414         int ret;
8415
8416         if (i915_inject_probe_failure(i915))
8417                 return -ENODEV;
8418
8419         if (HAS_DISPLAY(i915)) {
8420                 ret = drm_vblank_init(&i915->drm,
8421                                       INTEL_NUM_PIPES(i915));
8422                 if (ret)
8423                         return ret;
8424         }
8425
8426         intel_bios_init(i915);
8427
8428         ret = intel_vga_register(i915);
8429         if (ret)
8430                 goto cleanup_bios;
8431
8432         /* FIXME: completely on the wrong abstraction layer */
8433         ret = intel_power_domains_init(i915);
8434         if (ret < 0)
8435                 goto cleanup_vga;
8436
8437         intel_power_domains_init_hw(i915, false);
8438
8439         if (!HAS_DISPLAY(i915))
8440                 return 0;
8441
8442         intel_dmc_init(i915);
8443
8444         i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
8445         i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
8446                                                 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
8447
8448         intel_mode_config_init(i915);
8449
8450         ret = intel_cdclk_init(i915);
8451         if (ret)
8452                 goto cleanup_vga_client_pw_domain_dmc;
8453
8454         ret = intel_color_init(i915);
8455         if (ret)
8456                 goto cleanup_vga_client_pw_domain_dmc;
8457
8458         ret = intel_dbuf_init(i915);
8459         if (ret)
8460                 goto cleanup_vga_client_pw_domain_dmc;
8461
8462         ret = intel_bw_init(i915);
8463         if (ret)
8464                 goto cleanup_vga_client_pw_domain_dmc;
8465
8466         init_llist_head(&i915->display.atomic_helper.free_list);
8467         INIT_WORK(&i915->display.atomic_helper.free_work,
8468                   intel_atomic_helper_free_state_worker);
8469
8470         intel_init_quirks(i915);
8471
8472         intel_fbc_init(i915);
8473
8474         return 0;
8475
8476 cleanup_vga_client_pw_domain_dmc:
8477         intel_dmc_fini(i915);
8478         intel_power_domains_driver_remove(i915);
8479 cleanup_vga:
8480         intel_vga_unregister(i915);
8481 cleanup_bios:
8482         intel_bios_driver_remove(i915);
8483
8484         return ret;
8485 }
8486
8487 /* part #2: call after irq install, but before gem init */
8488 int intel_modeset_init_nogem(struct drm_i915_private *i915)
8489 {
8490         struct drm_device *dev = &i915->drm;
8491         enum pipe pipe;
8492         struct intel_crtc *crtc;
8493         int ret;
8494
8495         if (!HAS_DISPLAY(i915))
8496                 return 0;
8497
8498         intel_wm_init(i915);
8499
8500         intel_panel_sanitize_ssc(i915);
8501
8502         intel_pps_setup(i915);
8503
8504         intel_gmbus_setup(i915);
8505
8506         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
8507                     INTEL_NUM_PIPES(i915),
8508                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
8509
8510         for_each_pipe(i915, pipe) {
8511                 ret = intel_crtc_init(i915, pipe);
8512                 if (ret) {
8513                         intel_mode_config_cleanup(i915);
8514                         return ret;
8515                 }
8516         }
8517
8518         intel_plane_possible_crtcs_init(i915);
8519         intel_shared_dpll_init(i915);
8520         intel_fdi_pll_freq_update(i915);
8521
8522         intel_update_czclk(i915);
8523         intel_modeset_init_hw(i915);
8524         intel_dpll_update_ref_clks(i915);
8525
8526         intel_hdcp_component_init(i915);
8527
8528         if (i915->display.cdclk.max_cdclk_freq == 0)
8529                 intel_update_max_cdclk(i915);
8530
8531         intel_hti_init(i915);
8532
8533         /* Just disable it once at startup */
8534         intel_vga_disable(i915);
8535         intel_setup_outputs(i915);
8536
8537         drm_modeset_lock_all(dev);
8538         intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
8539         intel_acpi_assign_connector_fwnodes(i915);
8540         drm_modeset_unlock_all(dev);
8541
8542         for_each_intel_crtc(dev, crtc) {
8543                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
8544                         continue;
8545                 intel_crtc_initial_plane_config(crtc);
8546         }
8547
8548         /*
8549          * Make sure hardware watermarks really match the state we read out.
8550          * Note that we need to do this after reconstructing the BIOS fb's
8551          * since the watermark calculation done here will use pstate->fb.
8552          */
8553         if (!HAS_GMCH(i915))
8554                 ilk_wm_sanitize(i915);
8555
8556         return 0;
8557 }
8558
8559 /* part #3: call after gem init */
8560 int intel_modeset_init(struct drm_i915_private *i915)
8561 {
8562         int ret;
8563
8564         if (!HAS_DISPLAY(i915))
8565                 return 0;
8566
8567         /*
8568          * Force all active planes to recompute their states. So that on
8569          * mode_setcrtc after probe, all the intel_plane_state variables
8570          * are already calculated and there is no assert_plane warnings
8571          * during bootup.
8572          */
8573         ret = intel_initial_commit(&i915->drm);
8574         if (ret)
8575                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
8576
8577         intel_overlay_setup(i915);
8578
8579         ret = intel_fbdev_init(&i915->drm);
8580         if (ret)
8581                 return ret;
8582
8583         /* Only enable hotplug handling once the fbdev is fully set up. */
8584         intel_hpd_init(i915);
8585         intel_hpd_poll_disable(i915);
8586
8587         skl_watermark_ipc_init(i915);
8588
8589         return 0;
8590 }
8591
8592 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8593 {
8594         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8595         enum transcoder cpu_transcoder = (enum transcoder)pipe;
8596         /* 640x480@60Hz, ~25175 kHz */
8597         struct dpll clock = {
8598                 .m1 = 18,
8599                 .m2 = 7,
8600                 .p1 = 13,
8601                 .p2 = 4,
8602                 .n = 2,
8603         };
8604         u32 dpll, fp;
8605         int i;
8606
8607         drm_WARN_ON(&dev_priv->drm,
8608                     i9xx_calc_dpll_params(48000, &clock) != 25154);
8609
8610         drm_dbg_kms(&dev_priv->drm,
8611                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8612                     pipe_name(pipe), clock.vco, clock.dot);
8613
8614         fp = i9xx_dpll_compute_fp(&clock);
8615         dpll = DPLL_DVO_2X_MODE |
8616                 DPLL_VGA_MODE_DIS |
8617                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8618                 PLL_P2_DIVIDE_BY_4 |
8619                 PLL_REF_INPUT_DREFCLK |
8620                 DPLL_VCO_ENABLE;
8621
8622         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
8623                        HACTIVE(640 - 1) | HTOTAL(800 - 1));
8624         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
8625                        HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
8626         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
8627                        HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
8628         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
8629                        VACTIVE(480 - 1) | VTOTAL(525 - 1));
8630         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
8631                        VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
8632         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
8633                        VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
8634         intel_de_write(dev_priv, PIPESRC(pipe),
8635                        PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
8636
8637         intel_de_write(dev_priv, FP0(pipe), fp);
8638         intel_de_write(dev_priv, FP1(pipe), fp);
8639
8640         /*
8641          * Apparently we need to have VGA mode enabled prior to changing
8642          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8643          * dividers, even though the register value does change.
8644          */
8645         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8646         intel_de_write(dev_priv, DPLL(pipe), dpll);
8647
8648         /* Wait for the clocks to stabilize. */
8649         intel_de_posting_read(dev_priv, DPLL(pipe));
8650         udelay(150);
8651
8652         /* The pixel multiplier can only be updated once the
8653          * DPLL is enabled and the clocks are stable.
8654          *
8655          * So write it again.
8656          */
8657         intel_de_write(dev_priv, DPLL(pipe), dpll);
8658
8659         /* We do this three times for luck */
8660         for (i = 0; i < 3 ; i++) {
8661                 intel_de_write(dev_priv, DPLL(pipe), dpll);
8662                 intel_de_posting_read(dev_priv, DPLL(pipe));
8663                 udelay(150); /* wait for warmup */
8664         }
8665
8666         intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
8667         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8668
8669         intel_wait_for_pipe_scanline_moving(crtc);
8670 }
8671
8672 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8673 {
8674         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8675
8676         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8677                     pipe_name(pipe));
8678
8679         drm_WARN_ON(&dev_priv->drm,
8680                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8681         drm_WARN_ON(&dev_priv->drm,
8682                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8683         drm_WARN_ON(&dev_priv->drm,
8684                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8685         drm_WARN_ON(&dev_priv->drm,
8686                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8687         drm_WARN_ON(&dev_priv->drm,
8688                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8689
8690         intel_de_write(dev_priv, TRANSCONF(pipe), 0);
8691         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8692
8693         intel_wait_for_pipe_scanline_stopped(crtc);
8694
8695         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8696         intel_de_posting_read(dev_priv, DPLL(pipe));
8697 }
8698
8699 void intel_display_resume(struct drm_device *dev)
8700 {
8701         struct drm_i915_private *i915 = to_i915(dev);
8702         struct drm_atomic_state *state = i915->display.restore.modeset_state;
8703         struct drm_modeset_acquire_ctx ctx;
8704         int ret;
8705
8706         if (!HAS_DISPLAY(i915))
8707                 return;
8708
8709         i915->display.restore.modeset_state = NULL;
8710         if (state)
8711                 state->acquire_ctx = &ctx;
8712
8713         drm_modeset_acquire_init(&ctx, 0);
8714
8715         while (1) {
8716                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
8717                 if (ret != -EDEADLK)
8718                         break;
8719
8720                 drm_modeset_backoff(&ctx);
8721         }
8722
8723         if (!ret)
8724                 ret = __intel_display_resume(i915, state, &ctx);
8725
8726         skl_watermark_ipc_update(i915);
8727         drm_modeset_drop_locks(&ctx);
8728         drm_modeset_acquire_fini(&ctx);
8729
8730         if (ret)
8731                 drm_err(&i915->drm,
8732                         "Restoring old state failed with %i\n", ret);
8733         if (state)
8734                 drm_atomic_state_put(state);
8735 }
8736
8737 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
8738 {
8739         struct intel_connector *connector;
8740         struct drm_connector_list_iter conn_iter;
8741
8742         /* Kill all the work that may have been queued by hpd. */
8743         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8744         for_each_intel_connector_iter(connector, &conn_iter) {
8745                 if (connector->modeset_retry_work.func)
8746                         cancel_work_sync(&connector->modeset_retry_work);
8747                 if (connector->hdcp.shim) {
8748                         cancel_delayed_work_sync(&connector->hdcp.check_work);
8749                         cancel_work_sync(&connector->hdcp.prop_work);
8750                 }
8751         }
8752         drm_connector_list_iter_end(&conn_iter);
8753 }
8754
8755 /* part #1: call before irq uninstall */
8756 void intel_modeset_driver_remove(struct drm_i915_private *i915)
8757 {
8758         if (!HAS_DISPLAY(i915))
8759                 return;
8760
8761         flush_workqueue(i915->display.wq.flip);
8762         flush_workqueue(i915->display.wq.modeset);
8763
8764         flush_work(&i915->display.atomic_helper.free_work);
8765         drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
8766
8767         /*
8768          * MST topology needs to be suspended so we don't have any calls to
8769          * fbdev after it's finalized. MST will be destroyed later as part of
8770          * drm_mode_config_cleanup()
8771          */
8772         intel_dp_mst_suspend(i915);
8773 }
8774
8775 /* part #2: call after irq uninstall */
8776 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
8777 {
8778         if (!HAS_DISPLAY(i915))
8779                 return;
8780
8781         /*
8782          * Due to the hpd irq storm handling the hotplug work can re-arm the
8783          * poll handlers. Hence disable polling after hpd handling is shut down.
8784          */
8785         intel_hpd_poll_fini(i915);
8786
8787         /* poll work can call into fbdev, hence clean that up afterwards */
8788         intel_fbdev_fini(i915);
8789
8790         intel_unregister_dsm_handler();
8791
8792         /* flush any delayed tasks or pending work */
8793         flush_scheduled_work();
8794
8795         intel_hdcp_component_fini(i915);
8796
8797         intel_mode_config_cleanup(i915);
8798
8799         intel_overlay_cleanup(i915);
8800
8801         intel_gmbus_teardown(i915);
8802
8803         destroy_workqueue(i915->display.wq.flip);
8804         destroy_workqueue(i915->display.wq.modeset);
8805
8806         intel_fbc_cleanup(i915);
8807 }
8808
8809 /* part #3: call after gem init */
8810 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
8811 {
8812         intel_dmc_fini(i915);
8813
8814         intel_power_domains_driver_remove(i915);
8815
8816         intel_vga_unregister(i915);
8817
8818         intel_bios_driver_remove(i915);
8819 }
8820
8821 bool intel_modeset_probe_defer(struct pci_dev *pdev)
8822 {
8823         struct drm_privacy_screen *privacy_screen;
8824
8825         /*
8826          * apple-gmux is needed on dual GPU MacBook Pro
8827          * to probe the panel if we're the inactive GPU.
8828          */
8829         if (vga_switcheroo_client_probe_defer(pdev))
8830                 return true;
8831
8832         /* If the LCD panel has a privacy-screen, wait for it */
8833         privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
8834         if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
8835                 return true;
8836
8837         drm_privacy_screen_put(privacy_screen);
8838
8839         return false;
8840 }
8841
8842 void intel_display_driver_register(struct drm_i915_private *i915)
8843 {
8844         if (!HAS_DISPLAY(i915))
8845                 return;
8846
8847         /* Must be done after probing outputs */
8848         intel_opregion_register(i915);
8849         intel_acpi_video_register(i915);
8850
8851         intel_audio_init(i915);
8852
8853         intel_display_debugfs_register(i915);
8854
8855         /*
8856          * Some ports require correctly set-up hpd registers for
8857          * detection to work properly (leading to ghost connected
8858          * connector status), e.g. VGA on gm45.  Hence we can only set
8859          * up the initial fbdev config after hpd irqs are fully
8860          * enabled. We do it last so that the async config cannot run
8861          * before the connectors are registered.
8862          */
8863         intel_fbdev_initial_config_async(i915);
8864
8865         /*
8866          * We need to coordinate the hotplugs with the asynchronous
8867          * fbdev configuration, for which we use the
8868          * fbdev->async_cookie.
8869          */
8870         drm_kms_helper_poll_init(&i915->drm);
8871 }
8872
8873 void intel_display_driver_unregister(struct drm_i915_private *i915)
8874 {
8875         if (!HAS_DISPLAY(i915))
8876                 return;
8877
8878         intel_fbdev_unregister(i915);
8879         intel_audio_deinit(i915);
8880
8881         /*
8882          * After flushing the fbdev (incl. a late async config which
8883          * will have delayed queuing of a hotplug event), then flush
8884          * the hotplug events.
8885          */
8886         drm_kms_helper_poll_fini(&i915->drm);
8887         drm_atomic_helper_shutdown(&i915->drm);
8888
8889         acpi_video_unregister();
8890         intel_opregion_unregister(i915);
8891 }
8892
8893 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8894 {
8895         return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8896 }