drm/i915/display: add intel_display_driver_early_probe()
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dma-resv.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/string_helpers.h>
34
35 #include <drm/display/drm_dp_helper.h>
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44
45 #include "gem/i915_gem_lmem.h"
46 #include "gem/i915_gem_object.h"
47
48 #include "g4x_dp.h"
49 #include "g4x_hdmi.h"
50 #include "hsw_ips.h"
51 #include "i915_drv.h"
52 #include "i915_reg.h"
53 #include "i915_utils.h"
54 #include "i9xx_plane.h"
55 #include "i9xx_wm.h"
56 #include "icl_dsi.h"
57 #include "intel_atomic.h"
58 #include "intel_atomic_plane.h"
59 #include "intel_audio.h"
60 #include "intel_bw.h"
61 #include "intel_cdclk.h"
62 #include "intel_clock_gating.h"
63 #include "intel_color.h"
64 #include "intel_crt.h"
65 #include "intel_crtc.h"
66 #include "intel_crtc_state_dump.h"
67 #include "intel_ddi.h"
68 #include "intel_de.h"
69 #include "intel_display_driver.h"
70 #include "intel_display_power.h"
71 #include "intel_display_types.h"
72 #include "intel_dmc.h"
73 #include "intel_dp.h"
74 #include "intel_dp_link_training.h"
75 #include "intel_dp_mst.h"
76 #include "intel_dpio_phy.h"
77 #include "intel_dpll.h"
78 #include "intel_dpll_mgr.h"
79 #include "intel_dpt.h"
80 #include "intel_drrs.h"
81 #include "intel_dsi.h"
82 #include "intel_dvo.h"
83 #include "intel_fb.h"
84 #include "intel_fbc.h"
85 #include "intel_fbdev.h"
86 #include "intel_fdi.h"
87 #include "intel_fifo_underrun.h"
88 #include "intel_frontbuffer.h"
89 #include "intel_hdmi.h"
90 #include "intel_hotplug.h"
91 #include "intel_lvds.h"
92 #include "intel_lvds_regs.h"
93 #include "intel_modeset_setup.h"
94 #include "intel_modeset_verify.h"
95 #include "intel_overlay.h"
96 #include "intel_panel.h"
97 #include "intel_pch_display.h"
98 #include "intel_pch_refclk.h"
99 #include "intel_pcode.h"
100 #include "intel_pipe_crc.h"
101 #include "intel_plane_initial.h"
102 #include "intel_pps.h"
103 #include "intel_psr.h"
104 #include "intel_sdvo.h"
105 #include "intel_snps_phy.h"
106 #include "intel_tc.h"
107 #include "intel_tv.h"
108 #include "intel_vblank.h"
109 #include "intel_vdsc.h"
110 #include "intel_vdsc_regs.h"
111 #include "intel_vga.h"
112 #include "intel_vrr.h"
113 #include "intel_wm.h"
114 #include "skl_scaler.h"
115 #include "skl_universal_plane.h"
116 #include "skl_watermark.h"
117 #include "vlv_dsi.h"
118 #include "vlv_dsi_pll.h"
119 #include "vlv_dsi_regs.h"
120 #include "vlv_sideband.h"
121
122 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
123 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
124 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
125 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
126 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
127
128 /* returns HPLL frequency in kHz */
129 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
130 {
131         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
132
133         /* Obtain SKU information */
134         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
135                 CCK_FUSE_HPLL_FREQ_MASK;
136
137         return vco_freq[hpll_freq] * 1000;
138 }
139
140 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
141                       const char *name, u32 reg, int ref_freq)
142 {
143         u32 val;
144         int divider;
145
146         val = vlv_cck_read(dev_priv, reg);
147         divider = val & CCK_FREQUENCY_VALUES;
148
149         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
150                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
151                  "%s change in progress\n", name);
152
153         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
154 }
155
156 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
157                            const char *name, u32 reg)
158 {
159         int hpll;
160
161         vlv_cck_get(dev_priv);
162
163         if (dev_priv->hpll_freq == 0)
164                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
165
166         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
167
168         vlv_cck_put(dev_priv);
169
170         return hpll;
171 }
172
173 void intel_update_czclk(struct drm_i915_private *dev_priv)
174 {
175         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
176                 return;
177
178         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
179                                                       CCK_CZ_CLOCK_CONTROL);
180
181         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
182                 dev_priv->czclk_freq);
183 }
184
185 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
186 {
187         return (crtc_state->active_planes &
188                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
189 }
190
191 /* WA Display #0827: Gen9:all */
192 static void
193 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
194 {
195         if (enable)
196                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
197                              0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
198         else
199                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
200                              DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
201 }
202
203 /* Wa_2006604312:icl,ehl */
204 static void
205 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
206                        bool enable)
207 {
208         if (enable)
209                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
210         else
211                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
212 }
213
214 /* Wa_1604331009:icl,jsl,ehl */
215 static void
216 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
217                        bool enable)
218 {
219         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
220                      enable ? CURSOR_GATING_DIS : 0);
221 }
222
223 static bool
224 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
225 {
226         return crtc_state->master_transcoder != INVALID_TRANSCODER;
227 }
228
229 static bool
230 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
231 {
232         return crtc_state->sync_mode_slaves_mask != 0;
233 }
234
235 bool
236 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
237 {
238         return is_trans_port_sync_master(crtc_state) ||
239                 is_trans_port_sync_slave(crtc_state);
240 }
241
242 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
243 {
244         return ffs(crtc_state->bigjoiner_pipes) - 1;
245 }
246
247 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
248 {
249         if (crtc_state->bigjoiner_pipes)
250                 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
251         else
252                 return 0;
253 }
254
255 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
256 {
257         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
258
259         return crtc_state->bigjoiner_pipes &&
260                 crtc->pipe != bigjoiner_master_pipe(crtc_state);
261 }
262
263 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
264 {
265         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
266
267         return crtc_state->bigjoiner_pipes &&
268                 crtc->pipe == bigjoiner_master_pipe(crtc_state);
269 }
270
271 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
272 {
273         return hweight8(crtc_state->bigjoiner_pipes);
274 }
275
276 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
277 {
278         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
279
280         if (intel_crtc_is_bigjoiner_slave(crtc_state))
281                 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
282         else
283                 return to_intel_crtc(crtc_state->uapi.crtc);
284 }
285
286 static void
287 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
288 {
289         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
290         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
291
292         if (DISPLAY_VER(dev_priv) >= 4) {
293                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
294
295                 /* Wait for the Pipe State to go off */
296                 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
297                                             TRANSCONF_STATE_ENABLE, 100))
298                         drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
299         } else {
300                 intel_wait_for_pipe_scanline_stopped(crtc);
301         }
302 }
303
304 void assert_transcoder(struct drm_i915_private *dev_priv,
305                        enum transcoder cpu_transcoder, bool state)
306 {
307         bool cur_state;
308         enum intel_display_power_domain power_domain;
309         intel_wakeref_t wakeref;
310
311         /* we keep both pipes enabled on 830 */
312         if (IS_I830(dev_priv))
313                 state = true;
314
315         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
316         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
317         if (wakeref) {
318                 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
319                 cur_state = !!(val & TRANSCONF_ENABLE);
320
321                 intel_display_power_put(dev_priv, power_domain, wakeref);
322         } else {
323                 cur_state = false;
324         }
325
326         I915_STATE_WARN(cur_state != state,
327                         "transcoder %s assertion failure (expected %s, current %s)\n",
328                         transcoder_name(cpu_transcoder),
329                         str_on_off(state), str_on_off(cur_state));
330 }
331
332 static void assert_plane(struct intel_plane *plane, bool state)
333 {
334         enum pipe pipe;
335         bool cur_state;
336
337         cur_state = plane->get_hw_state(plane, &pipe);
338
339         I915_STATE_WARN(cur_state != state,
340                         "%s assertion failure (expected %s, current %s)\n",
341                         plane->base.name, str_on_off(state),
342                         str_on_off(cur_state));
343 }
344
345 #define assert_plane_enabled(p) assert_plane(p, true)
346 #define assert_plane_disabled(p) assert_plane(p, false)
347
348 static void assert_planes_disabled(struct intel_crtc *crtc)
349 {
350         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
351         struct intel_plane *plane;
352
353         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
354                 assert_plane_disabled(plane);
355 }
356
357 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
358                          struct intel_digital_port *dig_port,
359                          unsigned int expected_mask)
360 {
361         u32 port_mask;
362         i915_reg_t dpll_reg;
363
364         switch (dig_port->base.port) {
365         default:
366                 MISSING_CASE(dig_port->base.port);
367                 fallthrough;
368         case PORT_B:
369                 port_mask = DPLL_PORTB_READY_MASK;
370                 dpll_reg = DPLL(0);
371                 break;
372         case PORT_C:
373                 port_mask = DPLL_PORTC_READY_MASK;
374                 dpll_reg = DPLL(0);
375                 expected_mask <<= 4;
376                 break;
377         case PORT_D:
378                 port_mask = DPLL_PORTD_READY_MASK;
379                 dpll_reg = DPIO_PHY_STATUS;
380                 break;
381         }
382
383         if (intel_de_wait_for_register(dev_priv, dpll_reg,
384                                        port_mask, expected_mask, 1000))
385                 drm_WARN(&dev_priv->drm, 1,
386                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
387                          dig_port->base.base.base.id, dig_port->base.base.name,
388                          intel_de_read(dev_priv, dpll_reg) & port_mask,
389                          expected_mask);
390 }
391
392 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
393 {
394         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
395         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
396         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
397         enum pipe pipe = crtc->pipe;
398         i915_reg_t reg;
399         u32 val;
400
401         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
402
403         assert_planes_disabled(crtc);
404
405         /*
406          * A pipe without a PLL won't actually be able to drive bits from
407          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
408          * need the check.
409          */
410         if (HAS_GMCH(dev_priv)) {
411                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
412                         assert_dsi_pll_enabled(dev_priv);
413                 else
414                         assert_pll_enabled(dev_priv, pipe);
415         } else {
416                 if (new_crtc_state->has_pch_encoder) {
417                         /* if driving the PCH, we need FDI enabled */
418                         assert_fdi_rx_pll_enabled(dev_priv,
419                                                   intel_crtc_pch_transcoder(crtc));
420                         assert_fdi_tx_pll_enabled(dev_priv,
421                                                   (enum pipe) cpu_transcoder);
422                 }
423                 /* FIXME: assert CPU port conditions for SNB+ */
424         }
425
426         /* Wa_22012358565:adl-p */
427         if (DISPLAY_VER(dev_priv) == 13)
428                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
429                              0, PIPE_ARB_USE_PROG_SLOTS);
430
431         reg = TRANSCONF(cpu_transcoder);
432         val = intel_de_read(dev_priv, reg);
433         if (val & TRANSCONF_ENABLE) {
434                 /* we keep both pipes enabled on 830 */
435                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
436                 return;
437         }
438
439         intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
440         intel_de_posting_read(dev_priv, reg);
441
442         /*
443          * Until the pipe starts PIPEDSL reads will return a stale value,
444          * which causes an apparent vblank timestamp jump when PIPEDSL
445          * resets to its proper value. That also messes up the frame count
446          * when it's derived from the timestamps. So let's wait for the
447          * pipe to start properly before we call drm_crtc_vblank_on()
448          */
449         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
450                 intel_wait_for_pipe_scanline_moving(crtc);
451 }
452
453 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
454 {
455         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
456         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
457         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
458         enum pipe pipe = crtc->pipe;
459         i915_reg_t reg;
460         u32 val;
461
462         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
463
464         /*
465          * Make sure planes won't keep trying to pump pixels to us,
466          * or we might hang the display.
467          */
468         assert_planes_disabled(crtc);
469
470         reg = TRANSCONF(cpu_transcoder);
471         val = intel_de_read(dev_priv, reg);
472         if ((val & TRANSCONF_ENABLE) == 0)
473                 return;
474
475         /*
476          * Double wide has implications for planes
477          * so best keep it disabled when not needed.
478          */
479         if (old_crtc_state->double_wide)
480                 val &= ~TRANSCONF_DOUBLE_WIDE;
481
482         /* Don't disable pipe or pipe PLLs if needed */
483         if (!IS_I830(dev_priv))
484                 val &= ~TRANSCONF_ENABLE;
485
486         if (DISPLAY_VER(dev_priv) >= 14)
487                 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
488                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
489         else if (DISPLAY_VER(dev_priv) >= 12)
490                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
491                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
492
493         intel_de_write(dev_priv, reg, val);
494         if ((val & TRANSCONF_ENABLE) == 0)
495                 intel_wait_for_pipe_off(old_crtc_state);
496 }
497
498 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
499 {
500         unsigned int size = 0;
501         int i;
502
503         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
504                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
505
506         return size;
507 }
508
509 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
510 {
511         unsigned int size = 0;
512         int i;
513
514         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
515                 unsigned int plane_size;
516
517                 if (rem_info->plane[i].linear)
518                         plane_size = rem_info->plane[i].size;
519                 else
520                         plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
521
522                 if (plane_size == 0)
523                         continue;
524
525                 if (rem_info->plane_alignment)
526                         size = ALIGN(size, rem_info->plane_alignment);
527
528                 size += plane_size;
529         }
530
531         return size;
532 }
533
534 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
535 {
536         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
537         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
538
539         return DISPLAY_VER(dev_priv) < 4 ||
540                 (plane->fbc &&
541                  plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
542 }
543
544 /*
545  * Convert the x/y offsets into a linear offset.
546  * Only valid with 0/180 degree rotation, which is fine since linear
547  * offset is only used with linear buffers on pre-hsw and tiled buffers
548  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
549  */
550 u32 intel_fb_xy_to_linear(int x, int y,
551                           const struct intel_plane_state *state,
552                           int color_plane)
553 {
554         const struct drm_framebuffer *fb = state->hw.fb;
555         unsigned int cpp = fb->format->cpp[color_plane];
556         unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
557
558         return y * pitch + x * cpp;
559 }
560
561 /*
562  * Add the x/y offsets derived from fb->offsets[] to the user
563  * specified plane src x/y offsets. The resulting x/y offsets
564  * specify the start of scanout from the beginning of the gtt mapping.
565  */
566 void intel_add_fb_offsets(int *x, int *y,
567                           const struct intel_plane_state *state,
568                           int color_plane)
569
570 {
571         *x += state->view.color_plane[color_plane].x;
572         *y += state->view.color_plane[color_plane].y;
573 }
574
575 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
576                               u32 pixel_format, u64 modifier)
577 {
578         struct intel_crtc *crtc;
579         struct intel_plane *plane;
580
581         if (!HAS_DISPLAY(dev_priv))
582                 return 0;
583
584         /*
585          * We assume the primary plane for pipe A has
586          * the highest stride limits of them all,
587          * if in case pipe A is disabled, use the first pipe from pipe_mask.
588          */
589         crtc = intel_first_crtc(dev_priv);
590         if (!crtc)
591                 return 0;
592
593         plane = to_intel_plane(crtc->base.primary);
594
595         return plane->max_stride(plane, pixel_format, modifier,
596                                  DRM_MODE_ROTATE_0);
597 }
598
599 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
600                              struct intel_plane_state *plane_state,
601                              bool visible)
602 {
603         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
604
605         plane_state->uapi.visible = visible;
606
607         if (visible)
608                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
609         else
610                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
611 }
612
613 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
614 {
615         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
616         struct drm_plane *plane;
617
618         /*
619          * Active_planes aliases if multiple "primary" or cursor planes
620          * have been used on the same (or wrong) pipe. plane_mask uses
621          * unique ids, hence we can use that to reconstruct active_planes.
622          */
623         crtc_state->enabled_planes = 0;
624         crtc_state->active_planes = 0;
625
626         drm_for_each_plane_mask(plane, &dev_priv->drm,
627                                 crtc_state->uapi.plane_mask) {
628                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
629                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
630         }
631 }
632
633 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
634                                   struct intel_plane *plane)
635 {
636         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
637         struct intel_crtc_state *crtc_state =
638                 to_intel_crtc_state(crtc->base.state);
639         struct intel_plane_state *plane_state =
640                 to_intel_plane_state(plane->base.state);
641
642         drm_dbg_kms(&dev_priv->drm,
643                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
644                     plane->base.base.id, plane->base.name,
645                     crtc->base.base.id, crtc->base.name);
646
647         intel_set_plane_visible(crtc_state, plane_state, false);
648         intel_plane_fixup_bitmasks(crtc_state);
649         crtc_state->data_rate[plane->id] = 0;
650         crtc_state->data_rate_y[plane->id] = 0;
651         crtc_state->rel_data_rate[plane->id] = 0;
652         crtc_state->rel_data_rate_y[plane->id] = 0;
653         crtc_state->min_cdclk[plane->id] = 0;
654
655         if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
656             hsw_ips_disable(crtc_state)) {
657                 crtc_state->ips_enabled = false;
658                 intel_crtc_wait_for_next_vblank(crtc);
659         }
660
661         /*
662          * Vblank time updates from the shadow to live plane control register
663          * are blocked if the memory self-refresh mode is active at that
664          * moment. So to make sure the plane gets truly disabled, disable
665          * first the self-refresh mode. The self-refresh enable bit in turn
666          * will be checked/applied by the HW only at the next frame start
667          * event which is after the vblank start event, so we need to have a
668          * wait-for-vblank between disabling the plane and the pipe.
669          */
670         if (HAS_GMCH(dev_priv) &&
671             intel_set_memory_cxsr(dev_priv, false))
672                 intel_crtc_wait_for_next_vblank(crtc);
673
674         /*
675          * Gen2 reports pipe underruns whenever all planes are disabled.
676          * So disable underrun reporting before all the planes get disabled.
677          */
678         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
679                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
680
681         intel_plane_disable_arm(plane, crtc_state);
682         intel_crtc_wait_for_next_vblank(crtc);
683 }
684
685 unsigned int
686 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
687 {
688         int x = 0, y = 0;
689
690         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
691                                           plane_state->view.color_plane[0].offset, 0);
692
693         return y;
694 }
695
696 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
697 {
698         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
699         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
700         enum pipe pipe = crtc->pipe;
701         u32 tmp;
702
703         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
704
705         /*
706          * Display WA #1153: icl
707          * enable hardware to bypass the alpha math
708          * and rounding for per-pixel values 00 and 0xff
709          */
710         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
711         /*
712          * Display WA # 1605353570: icl
713          * Set the pixel rounding bit to 1 for allowing
714          * passthrough of Frame buffer pixels unmodified
715          * across pipe
716          */
717         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
718
719         /*
720          * Underrun recovery must always be disabled on display 13+.
721          * DG2 chicken bit meaning is inverted compared to other platforms.
722          */
723         if (IS_DG2(dev_priv))
724                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
725         else if (DISPLAY_VER(dev_priv) >= 13)
726                 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
727
728         /* Wa_14010547955:dg2 */
729         if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
730                 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
731
732         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
733 }
734
735 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
736 {
737         struct drm_crtc *crtc;
738         bool cleanup_done;
739
740         drm_for_each_crtc(crtc, &dev_priv->drm) {
741                 struct drm_crtc_commit *commit;
742                 spin_lock(&crtc->commit_lock);
743                 commit = list_first_entry_or_null(&crtc->commit_list,
744                                                   struct drm_crtc_commit, commit_entry);
745                 cleanup_done = commit ?
746                         try_wait_for_completion(&commit->cleanup_done) : true;
747                 spin_unlock(&crtc->commit_lock);
748
749                 if (cleanup_done)
750                         continue;
751
752                 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
753
754                 return true;
755         }
756
757         return false;
758 }
759
760 /*
761  * Finds the encoder associated with the given CRTC. This can only be
762  * used when we know that the CRTC isn't feeding multiple encoders!
763  */
764 struct intel_encoder *
765 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
766                            const struct intel_crtc_state *crtc_state)
767 {
768         const struct drm_connector_state *connector_state;
769         const struct drm_connector *connector;
770         struct intel_encoder *encoder = NULL;
771         struct intel_crtc *master_crtc;
772         int num_encoders = 0;
773         int i;
774
775         master_crtc = intel_master_crtc(crtc_state);
776
777         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
778                 if (connector_state->crtc != &master_crtc->base)
779                         continue;
780
781                 encoder = to_intel_encoder(connector_state->best_encoder);
782                 num_encoders++;
783         }
784
785         drm_WARN(state->base.dev, num_encoders != 1,
786                  "%d encoders for pipe %c\n",
787                  num_encoders, pipe_name(master_crtc->pipe));
788
789         return encoder;
790 }
791
792 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
793 {
794         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
795         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
796         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
797         enum pipe pipe = crtc->pipe;
798         int width = drm_rect_width(dst);
799         int height = drm_rect_height(dst);
800         int x = dst->x1;
801         int y = dst->y1;
802
803         if (!crtc_state->pch_pfit.enabled)
804                 return;
805
806         /* Force use of hard-coded filter coefficients
807          * as some pre-programmed values are broken,
808          * e.g. x201.
809          */
810         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
811                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
812                                   PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
813         else
814                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
815                                   PF_FILTER_MED_3x3);
816         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
817         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
818 }
819
820 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
821 {
822         if (crtc->overlay)
823                 (void) intel_overlay_switch_off(crtc->overlay);
824
825         /* Let userspace switch the overlay on again. In most cases userspace
826          * has to recompute where to put it anyway.
827          */
828 }
829
830 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
831 {
832         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
833
834         if (!crtc_state->nv12_planes)
835                 return false;
836
837         /* WA Display #0827: Gen9:all */
838         if (DISPLAY_VER(dev_priv) == 9)
839                 return true;
840
841         return false;
842 }
843
844 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
845 {
846         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
847
848         /* Wa_2006604312:icl,ehl */
849         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
850                 return true;
851
852         return false;
853 }
854
855 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
856 {
857         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
858
859         /* Wa_1604331009:icl,jsl,ehl */
860         if (is_hdr_mode(crtc_state) &&
861             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
862             DISPLAY_VER(dev_priv) == 11)
863                 return true;
864
865         return false;
866 }
867
868 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
869                                     enum pipe pipe, bool enable)
870 {
871         if (DISPLAY_VER(i915) == 9) {
872                 /*
873                  * "Plane N strech max must be programmed to 11b (x1)
874                  *  when Async flips are enabled on that plane."
875                  */
876                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
877                              SKL_PLANE1_STRETCH_MAX_MASK,
878                              enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
879         } else {
880                 /* Also needed on HSW/BDW albeit undocumented */
881                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
882                              HSW_PRI_STRETCH_MAX_MASK,
883                              enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
884         }
885 }
886
887 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
888 {
889         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
890
891         return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
892                 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
893 }
894
895 #define is_enabling(feature, old_crtc_state, new_crtc_state) \
896         ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
897          (new_crtc_state)->feature)
898 #define is_disabling(feature, old_crtc_state, new_crtc_state) \
899         ((old_crtc_state)->feature && \
900          (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
901
902 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
903                             const struct intel_crtc_state *new_crtc_state)
904 {
905         return is_enabling(active_planes, old_crtc_state, new_crtc_state);
906 }
907
908 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
909                              const struct intel_crtc_state *new_crtc_state)
910 {
911         return is_disabling(active_planes, old_crtc_state, new_crtc_state);
912 }
913
914 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
915                          const struct intel_crtc_state *new_crtc_state)
916 {
917         return is_enabling(vrr.enable, old_crtc_state, new_crtc_state);
918 }
919
920 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
921                           const struct intel_crtc_state *new_crtc_state)
922 {
923         return is_disabling(vrr.enable, old_crtc_state, new_crtc_state);
924 }
925
926 #undef is_disabling
927 #undef is_enabling
928
929 static void intel_post_plane_update(struct intel_atomic_state *state,
930                                     struct intel_crtc *crtc)
931 {
932         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
933         const struct intel_crtc_state *old_crtc_state =
934                 intel_atomic_get_old_crtc_state(state, crtc);
935         const struct intel_crtc_state *new_crtc_state =
936                 intel_atomic_get_new_crtc_state(state, crtc);
937         enum pipe pipe = crtc->pipe;
938
939         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
940
941         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
942                 intel_update_watermarks(dev_priv);
943
944         intel_fbc_post_update(state, crtc);
945
946         if (needs_async_flip_vtd_wa(old_crtc_state) &&
947             !needs_async_flip_vtd_wa(new_crtc_state))
948                 intel_async_flip_vtd_wa(dev_priv, pipe, false);
949
950         if (needs_nv12_wa(old_crtc_state) &&
951             !needs_nv12_wa(new_crtc_state))
952                 skl_wa_827(dev_priv, pipe, false);
953
954         if (needs_scalerclk_wa(old_crtc_state) &&
955             !needs_scalerclk_wa(new_crtc_state))
956                 icl_wa_scalerclkgating(dev_priv, pipe, false);
957
958         if (needs_cursorclk_wa(old_crtc_state) &&
959             !needs_cursorclk_wa(new_crtc_state))
960                 icl_wa_cursorclkgating(dev_priv, pipe, false);
961
962         if (intel_crtc_needs_color_update(new_crtc_state))
963                 intel_color_post_update(new_crtc_state);
964 }
965
966 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
967                                         struct intel_crtc *crtc)
968 {
969         const struct intel_crtc_state *crtc_state =
970                 intel_atomic_get_new_crtc_state(state, crtc);
971         u8 update_planes = crtc_state->update_planes;
972         const struct intel_plane_state *plane_state;
973         struct intel_plane *plane;
974         int i;
975
976         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
977                 if (plane->pipe == crtc->pipe &&
978                     update_planes & BIT(plane->id))
979                         plane->enable_flip_done(plane);
980         }
981 }
982
983 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
984                                          struct intel_crtc *crtc)
985 {
986         const struct intel_crtc_state *crtc_state =
987                 intel_atomic_get_new_crtc_state(state, crtc);
988         u8 update_planes = crtc_state->update_planes;
989         const struct intel_plane_state *plane_state;
990         struct intel_plane *plane;
991         int i;
992
993         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
994                 if (plane->pipe == crtc->pipe &&
995                     update_planes & BIT(plane->id))
996                         plane->disable_flip_done(plane);
997         }
998 }
999
1000 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1001                                              struct intel_crtc *crtc)
1002 {
1003         const struct intel_crtc_state *old_crtc_state =
1004                 intel_atomic_get_old_crtc_state(state, crtc);
1005         const struct intel_crtc_state *new_crtc_state =
1006                 intel_atomic_get_new_crtc_state(state, crtc);
1007         u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1008                                        ~new_crtc_state->async_flip_planes;
1009         const struct intel_plane_state *old_plane_state;
1010         struct intel_plane *plane;
1011         bool need_vbl_wait = false;
1012         int i;
1013
1014         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1015                 if (plane->need_async_flip_disable_wa &&
1016                     plane->pipe == crtc->pipe &&
1017                     disable_async_flip_planes & BIT(plane->id)) {
1018                         /*
1019                          * Apart from the async flip bit we want to
1020                          * preserve the old state for the plane.
1021                          */
1022                         plane->async_flip(plane, old_crtc_state,
1023                                           old_plane_state, false);
1024                         need_vbl_wait = true;
1025                 }
1026         }
1027
1028         if (need_vbl_wait)
1029                 intel_crtc_wait_for_next_vblank(crtc);
1030 }
1031
1032 static void intel_pre_plane_update(struct intel_atomic_state *state,
1033                                    struct intel_crtc *crtc)
1034 {
1035         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1036         const struct intel_crtc_state *old_crtc_state =
1037                 intel_atomic_get_old_crtc_state(state, crtc);
1038         const struct intel_crtc_state *new_crtc_state =
1039                 intel_atomic_get_new_crtc_state(state, crtc);
1040         enum pipe pipe = crtc->pipe;
1041
1042         if (vrr_disabling(old_crtc_state, new_crtc_state)) {
1043                 intel_vrr_disable(old_crtc_state);
1044                 intel_crtc_update_active_timings(old_crtc_state, false);
1045         }
1046
1047         intel_drrs_deactivate(old_crtc_state);
1048
1049         intel_psr_pre_plane_update(state, crtc);
1050
1051         if (hsw_ips_pre_update(state, crtc))
1052                 intel_crtc_wait_for_next_vblank(crtc);
1053
1054         if (intel_fbc_pre_update(state, crtc))
1055                 intel_crtc_wait_for_next_vblank(crtc);
1056
1057         if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1058             needs_async_flip_vtd_wa(new_crtc_state))
1059                 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1060
1061         /* Display WA 827 */
1062         if (!needs_nv12_wa(old_crtc_state) &&
1063             needs_nv12_wa(new_crtc_state))
1064                 skl_wa_827(dev_priv, pipe, true);
1065
1066         /* Wa_2006604312:icl,ehl */
1067         if (!needs_scalerclk_wa(old_crtc_state) &&
1068             needs_scalerclk_wa(new_crtc_state))
1069                 icl_wa_scalerclkgating(dev_priv, pipe, true);
1070
1071         /* Wa_1604331009:icl,jsl,ehl */
1072         if (!needs_cursorclk_wa(old_crtc_state) &&
1073             needs_cursorclk_wa(new_crtc_state))
1074                 icl_wa_cursorclkgating(dev_priv, pipe, true);
1075
1076         /*
1077          * Vblank time updates from the shadow to live plane control register
1078          * are blocked if the memory self-refresh mode is active at that
1079          * moment. So to make sure the plane gets truly disabled, disable
1080          * first the self-refresh mode. The self-refresh enable bit in turn
1081          * will be checked/applied by the HW only at the next frame start
1082          * event which is after the vblank start event, so we need to have a
1083          * wait-for-vblank between disabling the plane and the pipe.
1084          */
1085         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1086             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1087                 intel_crtc_wait_for_next_vblank(crtc);
1088
1089         /*
1090          * IVB workaround: must disable low power watermarks for at least
1091          * one frame before enabling scaling.  LP watermarks can be re-enabled
1092          * when scaling is disabled.
1093          *
1094          * WaCxSRDisabledForSpriteScaling:ivb
1095          */
1096         if (old_crtc_state->hw.active &&
1097             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1098                 intel_crtc_wait_for_next_vblank(crtc);
1099
1100         /*
1101          * If we're doing a modeset we don't need to do any
1102          * pre-vblank watermark programming here.
1103          */
1104         if (!intel_crtc_needs_modeset(new_crtc_state)) {
1105                 /*
1106                  * For platforms that support atomic watermarks, program the
1107                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1108                  * will be the intermediate values that are safe for both pre- and
1109                  * post- vblank; when vblank happens, the 'active' values will be set
1110                  * to the final 'target' values and we'll do this again to get the
1111                  * optimal watermarks.  For gen9+ platforms, the values we program here
1112                  * will be the final target values which will get automatically latched
1113                  * at vblank time; no further programming will be necessary.
1114                  *
1115                  * If a platform hasn't been transitioned to atomic watermarks yet,
1116                  * we'll continue to update watermarks the old way, if flags tell
1117                  * us to.
1118                  */
1119                 if (!intel_initial_watermarks(state, crtc))
1120                         if (new_crtc_state->update_wm_pre)
1121                                 intel_update_watermarks(dev_priv);
1122         }
1123
1124         /*
1125          * Gen2 reports pipe underruns whenever all planes are disabled.
1126          * So disable underrun reporting before all the planes get disabled.
1127          *
1128          * We do this after .initial_watermarks() so that we have a
1129          * chance of catching underruns with the intermediate watermarks
1130          * vs. the old plane configuration.
1131          */
1132         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1133                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1134
1135         /*
1136          * WA for platforms where async address update enable bit
1137          * is double buffered and only latched at start of vblank.
1138          */
1139         if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1140                 intel_crtc_async_flip_disable_wa(state, crtc);
1141 }
1142
1143 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1144                                       struct intel_crtc *crtc)
1145 {
1146         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1147         const struct intel_crtc_state *new_crtc_state =
1148                 intel_atomic_get_new_crtc_state(state, crtc);
1149         unsigned int update_mask = new_crtc_state->update_planes;
1150         const struct intel_plane_state *old_plane_state;
1151         struct intel_plane *plane;
1152         unsigned fb_bits = 0;
1153         int i;
1154
1155         intel_crtc_dpms_overlay_disable(crtc);
1156
1157         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1158                 if (crtc->pipe != plane->pipe ||
1159                     !(update_mask & BIT(plane->id)))
1160                         continue;
1161
1162                 intel_plane_disable_arm(plane, new_crtc_state);
1163
1164                 if (old_plane_state->uapi.visible)
1165                         fb_bits |= plane->frontbuffer_bit;
1166         }
1167
1168         intel_frontbuffer_flip(dev_priv, fb_bits);
1169 }
1170
1171 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1172 {
1173         struct drm_i915_private *i915 = to_i915(state->base.dev);
1174         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1175         struct intel_crtc *crtc;
1176         int i;
1177
1178         /*
1179          * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1180          * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1181          */
1182         if (i915->display.dpll.mgr) {
1183                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1184                         if (intel_crtc_needs_modeset(new_crtc_state))
1185                                 continue;
1186
1187                         new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1188                         new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1189                 }
1190         }
1191 }
1192
1193 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1194                                           struct intel_crtc *crtc)
1195 {
1196         const struct intel_crtc_state *crtc_state =
1197                 intel_atomic_get_new_crtc_state(state, crtc);
1198         const struct drm_connector_state *conn_state;
1199         struct drm_connector *conn;
1200         int i;
1201
1202         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1203                 struct intel_encoder *encoder =
1204                         to_intel_encoder(conn_state->best_encoder);
1205
1206                 if (conn_state->crtc != &crtc->base)
1207                         continue;
1208
1209                 if (encoder->pre_pll_enable)
1210                         encoder->pre_pll_enable(state, encoder,
1211                                                 crtc_state, conn_state);
1212         }
1213 }
1214
1215 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1216                                       struct intel_crtc *crtc)
1217 {
1218         const struct intel_crtc_state *crtc_state =
1219                 intel_atomic_get_new_crtc_state(state, crtc);
1220         const struct drm_connector_state *conn_state;
1221         struct drm_connector *conn;
1222         int i;
1223
1224         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1225                 struct intel_encoder *encoder =
1226                         to_intel_encoder(conn_state->best_encoder);
1227
1228                 if (conn_state->crtc != &crtc->base)
1229                         continue;
1230
1231                 if (encoder->pre_enable)
1232                         encoder->pre_enable(state, encoder,
1233                                             crtc_state, conn_state);
1234         }
1235 }
1236
1237 static void intel_encoders_enable(struct intel_atomic_state *state,
1238                                   struct intel_crtc *crtc)
1239 {
1240         const struct intel_crtc_state *crtc_state =
1241                 intel_atomic_get_new_crtc_state(state, crtc);
1242         const struct drm_connector_state *conn_state;
1243         struct drm_connector *conn;
1244         int i;
1245
1246         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1247                 struct intel_encoder *encoder =
1248                         to_intel_encoder(conn_state->best_encoder);
1249
1250                 if (conn_state->crtc != &crtc->base)
1251                         continue;
1252
1253                 if (encoder->enable)
1254                         encoder->enable(state, encoder,
1255                                         crtc_state, conn_state);
1256                 intel_opregion_notify_encoder(encoder, true);
1257         }
1258 }
1259
1260 static void intel_encoders_disable(struct intel_atomic_state *state,
1261                                    struct intel_crtc *crtc)
1262 {
1263         const struct intel_crtc_state *old_crtc_state =
1264                 intel_atomic_get_old_crtc_state(state, crtc);
1265         const struct drm_connector_state *old_conn_state;
1266         struct drm_connector *conn;
1267         int i;
1268
1269         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1270                 struct intel_encoder *encoder =
1271                         to_intel_encoder(old_conn_state->best_encoder);
1272
1273                 if (old_conn_state->crtc != &crtc->base)
1274                         continue;
1275
1276                 intel_opregion_notify_encoder(encoder, false);
1277                 if (encoder->disable)
1278                         encoder->disable(state, encoder,
1279                                          old_crtc_state, old_conn_state);
1280         }
1281 }
1282
1283 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1284                                         struct intel_crtc *crtc)
1285 {
1286         const struct intel_crtc_state *old_crtc_state =
1287                 intel_atomic_get_old_crtc_state(state, crtc);
1288         const struct drm_connector_state *old_conn_state;
1289         struct drm_connector *conn;
1290         int i;
1291
1292         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1293                 struct intel_encoder *encoder =
1294                         to_intel_encoder(old_conn_state->best_encoder);
1295
1296                 if (old_conn_state->crtc != &crtc->base)
1297                         continue;
1298
1299                 if (encoder->post_disable)
1300                         encoder->post_disable(state, encoder,
1301                                               old_crtc_state, old_conn_state);
1302         }
1303 }
1304
1305 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1306                                             struct intel_crtc *crtc)
1307 {
1308         const struct intel_crtc_state *old_crtc_state =
1309                 intel_atomic_get_old_crtc_state(state, crtc);
1310         const struct drm_connector_state *old_conn_state;
1311         struct drm_connector *conn;
1312         int i;
1313
1314         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1315                 struct intel_encoder *encoder =
1316                         to_intel_encoder(old_conn_state->best_encoder);
1317
1318                 if (old_conn_state->crtc != &crtc->base)
1319                         continue;
1320
1321                 if (encoder->post_pll_disable)
1322                         encoder->post_pll_disable(state, encoder,
1323                                                   old_crtc_state, old_conn_state);
1324         }
1325 }
1326
1327 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1328                                        struct intel_crtc *crtc)
1329 {
1330         const struct intel_crtc_state *crtc_state =
1331                 intel_atomic_get_new_crtc_state(state, crtc);
1332         const struct drm_connector_state *conn_state;
1333         struct drm_connector *conn;
1334         int i;
1335
1336         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1337                 struct intel_encoder *encoder =
1338                         to_intel_encoder(conn_state->best_encoder);
1339
1340                 if (conn_state->crtc != &crtc->base)
1341                         continue;
1342
1343                 if (encoder->update_pipe)
1344                         encoder->update_pipe(state, encoder,
1345                                              crtc_state, conn_state);
1346         }
1347 }
1348
1349 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1350 {
1351         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1352         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1353
1354         plane->disable_arm(plane, crtc_state);
1355 }
1356
1357 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1358 {
1359         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1360         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1361
1362         if (crtc_state->has_pch_encoder) {
1363                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1364                                                &crtc_state->fdi_m_n);
1365         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1366                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1367                                                &crtc_state->dp_m_n);
1368                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1369                                                &crtc_state->dp_m2_n2);
1370         }
1371
1372         intel_set_transcoder_timings(crtc_state);
1373
1374         ilk_set_pipeconf(crtc_state);
1375 }
1376
1377 static void ilk_crtc_enable(struct intel_atomic_state *state,
1378                             struct intel_crtc *crtc)
1379 {
1380         const struct intel_crtc_state *new_crtc_state =
1381                 intel_atomic_get_new_crtc_state(state, crtc);
1382         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1383         enum pipe pipe = crtc->pipe;
1384
1385         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1386                 return;
1387
1388         /*
1389          * Sometimes spurious CPU pipe underruns happen during FDI
1390          * training, at least with VGA+HDMI cloning. Suppress them.
1391          *
1392          * On ILK we get an occasional spurious CPU pipe underruns
1393          * between eDP port A enable and vdd enable. Also PCH port
1394          * enable seems to result in the occasional CPU pipe underrun.
1395          *
1396          * Spurious PCH underruns also occur during PCH enabling.
1397          */
1398         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1399         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1400
1401         ilk_configure_cpu_transcoder(new_crtc_state);
1402
1403         intel_set_pipe_src_size(new_crtc_state);
1404
1405         crtc->active = true;
1406
1407         intel_encoders_pre_enable(state, crtc);
1408
1409         if (new_crtc_state->has_pch_encoder) {
1410                 ilk_pch_pre_enable(state, crtc);
1411         } else {
1412                 assert_fdi_tx_disabled(dev_priv, pipe);
1413                 assert_fdi_rx_disabled(dev_priv, pipe);
1414         }
1415
1416         ilk_pfit_enable(new_crtc_state);
1417
1418         /*
1419          * On ILK+ LUT must be loaded before the pipe is running but with
1420          * clocks enabled
1421          */
1422         intel_color_load_luts(new_crtc_state);
1423         intel_color_commit_noarm(new_crtc_state);
1424         intel_color_commit_arm(new_crtc_state);
1425         /* update DSPCNTR to configure gamma for pipe bottom color */
1426         intel_disable_primary_plane(new_crtc_state);
1427
1428         intel_initial_watermarks(state, crtc);
1429         intel_enable_transcoder(new_crtc_state);
1430
1431         if (new_crtc_state->has_pch_encoder)
1432                 ilk_pch_enable(state, crtc);
1433
1434         intel_crtc_vblank_on(new_crtc_state);
1435
1436         intel_encoders_enable(state, crtc);
1437
1438         if (HAS_PCH_CPT(dev_priv))
1439                 intel_wait_for_pipe_scanline_moving(crtc);
1440
1441         /*
1442          * Must wait for vblank to avoid spurious PCH FIFO underruns.
1443          * And a second vblank wait is needed at least on ILK with
1444          * some interlaced HDMI modes. Let's do the double wait always
1445          * in case there are more corner cases we don't know about.
1446          */
1447         if (new_crtc_state->has_pch_encoder) {
1448                 intel_crtc_wait_for_next_vblank(crtc);
1449                 intel_crtc_wait_for_next_vblank(crtc);
1450         }
1451         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1452         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1453 }
1454
1455 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1456                                             enum pipe pipe, bool apply)
1457 {
1458         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1459         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1460
1461         if (apply)
1462                 val |= mask;
1463         else
1464                 val &= ~mask;
1465
1466         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1467 }
1468
1469 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1470 {
1471         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1472         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1473
1474         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1475                        HSW_LINETIME(crtc_state->linetime) |
1476                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
1477 }
1478
1479 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1480 {
1481         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1483         enum transcoder transcoder = crtc_state->cpu_transcoder;
1484         i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1485                          CHICKEN_TRANS(transcoder);
1486
1487         intel_de_rmw(dev_priv, reg,
1488                      HSW_FRAME_START_DELAY_MASK,
1489                      HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1490 }
1491
1492 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1493                                          const struct intel_crtc_state *crtc_state)
1494 {
1495         struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1496
1497         /*
1498          * Enable sequence steps 1-7 on bigjoiner master
1499          */
1500         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1501                 intel_encoders_pre_pll_enable(state, master_crtc);
1502
1503         if (crtc_state->shared_dpll)
1504                 intel_enable_shared_dpll(crtc_state);
1505
1506         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1507                 intel_encoders_pre_enable(state, master_crtc);
1508 }
1509
1510 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1511 {
1512         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1513         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1514         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1515
1516         if (crtc_state->has_pch_encoder) {
1517                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1518                                                &crtc_state->fdi_m_n);
1519         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1520                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1521                                                &crtc_state->dp_m_n);
1522                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1523                                                &crtc_state->dp_m2_n2);
1524         }
1525
1526         intel_set_transcoder_timings(crtc_state);
1527         if (HAS_VRR(dev_priv))
1528                 intel_vrr_set_transcoder_timings(crtc_state);
1529
1530         if (cpu_transcoder != TRANSCODER_EDP)
1531                 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
1532                                crtc_state->pixel_multiplier - 1);
1533
1534         hsw_set_frame_start_delay(crtc_state);
1535
1536         hsw_set_transconf(crtc_state);
1537 }
1538
1539 static void hsw_crtc_enable(struct intel_atomic_state *state,
1540                             struct intel_crtc *crtc)
1541 {
1542         const struct intel_crtc_state *new_crtc_state =
1543                 intel_atomic_get_new_crtc_state(state, crtc);
1544         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1545         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1546         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1547         bool psl_clkgate_wa;
1548
1549         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1550                 return;
1551
1552         intel_dmc_enable_pipe(dev_priv, crtc->pipe);
1553
1554         if (!new_crtc_state->bigjoiner_pipes) {
1555                 intel_encoders_pre_pll_enable(state, crtc);
1556
1557                 if (new_crtc_state->shared_dpll)
1558                         intel_enable_shared_dpll(new_crtc_state);
1559
1560                 intel_encoders_pre_enable(state, crtc);
1561         } else {
1562                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1563         }
1564
1565         intel_dsc_enable(new_crtc_state);
1566
1567         if (DISPLAY_VER(dev_priv) >= 13)
1568                 intel_uncompressed_joiner_enable(new_crtc_state);
1569
1570         intel_set_pipe_src_size(new_crtc_state);
1571         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1572                 bdw_set_pipe_misc(new_crtc_state);
1573
1574         if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1575             !transcoder_is_dsi(cpu_transcoder))
1576                 hsw_configure_cpu_transcoder(new_crtc_state);
1577
1578         crtc->active = true;
1579
1580         /* Display WA #1180: WaDisableScalarClockGating: glk */
1581         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1582                 new_crtc_state->pch_pfit.enabled;
1583         if (psl_clkgate_wa)
1584                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1585
1586         if (DISPLAY_VER(dev_priv) >= 9)
1587                 skl_pfit_enable(new_crtc_state);
1588         else
1589                 ilk_pfit_enable(new_crtc_state);
1590
1591         /*
1592          * On ILK+ LUT must be loaded before the pipe is running but with
1593          * clocks enabled
1594          */
1595         intel_color_load_luts(new_crtc_state);
1596         intel_color_commit_noarm(new_crtc_state);
1597         intel_color_commit_arm(new_crtc_state);
1598         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1599         if (DISPLAY_VER(dev_priv) < 9)
1600                 intel_disable_primary_plane(new_crtc_state);
1601
1602         hsw_set_linetime_wm(new_crtc_state);
1603
1604         if (DISPLAY_VER(dev_priv) >= 11)
1605                 icl_set_pipe_chicken(new_crtc_state);
1606
1607         intel_initial_watermarks(state, crtc);
1608
1609         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1610                 intel_crtc_vblank_on(new_crtc_state);
1611
1612         intel_encoders_enable(state, crtc);
1613
1614         if (psl_clkgate_wa) {
1615                 intel_crtc_wait_for_next_vblank(crtc);
1616                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1617         }
1618
1619         /* If we change the relative order between pipe/planes enabling, we need
1620          * to change the workaround. */
1621         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1622         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1623                 struct intel_crtc *wa_crtc;
1624
1625                 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1626
1627                 intel_crtc_wait_for_next_vblank(wa_crtc);
1628                 intel_crtc_wait_for_next_vblank(wa_crtc);
1629         }
1630 }
1631
1632 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1633 {
1634         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1635         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1636         enum pipe pipe = crtc->pipe;
1637
1638         /* To avoid upsetting the power well on haswell only disable the pfit if
1639          * it's in use. The hw state code will make sure we get this right. */
1640         if (!old_crtc_state->pch_pfit.enabled)
1641                 return;
1642
1643         intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1644         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1645         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1646 }
1647
1648 static void ilk_crtc_disable(struct intel_atomic_state *state,
1649                              struct intel_crtc *crtc)
1650 {
1651         const struct intel_crtc_state *old_crtc_state =
1652                 intel_atomic_get_old_crtc_state(state, crtc);
1653         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1654         enum pipe pipe = crtc->pipe;
1655
1656         /*
1657          * Sometimes spurious CPU pipe underruns happen when the
1658          * pipe is already disabled, but FDI RX/TX is still enabled.
1659          * Happens at least with VGA+HDMI cloning. Suppress them.
1660          */
1661         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1662         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1663
1664         intel_encoders_disable(state, crtc);
1665
1666         intel_crtc_vblank_off(old_crtc_state);
1667
1668         intel_disable_transcoder(old_crtc_state);
1669
1670         ilk_pfit_disable(old_crtc_state);
1671
1672         if (old_crtc_state->has_pch_encoder)
1673                 ilk_pch_disable(state, crtc);
1674
1675         intel_encoders_post_disable(state, crtc);
1676
1677         if (old_crtc_state->has_pch_encoder)
1678                 ilk_pch_post_disable(state, crtc);
1679
1680         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1681         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1682
1683         intel_disable_shared_dpll(old_crtc_state);
1684 }
1685
1686 static void hsw_crtc_disable(struct intel_atomic_state *state,
1687                              struct intel_crtc *crtc)
1688 {
1689         const struct intel_crtc_state *old_crtc_state =
1690                 intel_atomic_get_old_crtc_state(state, crtc);
1691         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1692
1693         /*
1694          * FIXME collapse everything to one hook.
1695          * Need care with mst->ddi interactions.
1696          */
1697         if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
1698                 intel_encoders_disable(state, crtc);
1699                 intel_encoders_post_disable(state, crtc);
1700         }
1701
1702         intel_disable_shared_dpll(old_crtc_state);
1703
1704         intel_encoders_post_pll_disable(state, crtc);
1705
1706         intel_dmc_disable_pipe(i915, crtc->pipe);
1707 }
1708
1709 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1710 {
1711         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1712         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1713
1714         if (!crtc_state->gmch_pfit.control)
1715                 return;
1716
1717         /*
1718          * The panel fitter should only be adjusted whilst the pipe is disabled,
1719          * according to register description and PRM.
1720          */
1721         drm_WARN_ON(&dev_priv->drm,
1722                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
1723         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1724
1725         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
1726                        crtc_state->gmch_pfit.pgm_ratios);
1727         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
1728
1729         /* Border color in case we don't scale up to the full screen. Black by
1730          * default, change to something else for debugging. */
1731         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
1732 }
1733
1734 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1735 {
1736         if (phy == PHY_NONE)
1737                 return false;
1738         else if (IS_ALDERLAKE_S(dev_priv))
1739                 return phy <= PHY_E;
1740         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1741                 return phy <= PHY_D;
1742         else if (IS_JSL_EHL(dev_priv))
1743                 return phy <= PHY_C;
1744         else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1745                 return phy <= PHY_B;
1746         else
1747                 /*
1748                  * DG2 outputs labelled as "combo PHY" in the bspec use
1749                  * SNPS PHYs with completely different programming,
1750                  * hence we always return false here.
1751                  */
1752                 return false;
1753 }
1754
1755 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1756 {
1757         if (IS_DG2(dev_priv))
1758                 /* DG2's "TC1" output uses a SNPS PHY */
1759                 return false;
1760         else if (IS_ALDERLAKE_P(dev_priv))
1761                 return phy >= PHY_F && phy <= PHY_I;
1762         else if (IS_TIGERLAKE(dev_priv))
1763                 return phy >= PHY_D && phy <= PHY_I;
1764         else if (IS_ICELAKE(dev_priv))
1765                 return phy >= PHY_C && phy <= PHY_F;
1766         else
1767                 return false;
1768 }
1769
1770 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1771 {
1772         if (phy == PHY_NONE)
1773                 return false;
1774         else if (IS_DG2(dev_priv))
1775                 /*
1776                  * All four "combo" ports and the TC1 port (PHY E) use
1777                  * Synopsis PHYs.
1778                  */
1779                 return phy <= PHY_E;
1780
1781         return false;
1782 }
1783
1784 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
1785 {
1786         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
1787                 return PHY_D + port - PORT_D_XELPD;
1788         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
1789                 return PHY_F + port - PORT_TC1;
1790         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
1791                 return PHY_B + port - PORT_TC1;
1792         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
1793                 return PHY_C + port - PORT_TC1;
1794         else if (IS_JSL_EHL(i915) && port == PORT_D)
1795                 return PHY_A;
1796
1797         return PHY_A + port - PORT_A;
1798 }
1799
1800 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
1801 {
1802         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
1803                 return TC_PORT_NONE;
1804
1805         if (DISPLAY_VER(dev_priv) >= 12)
1806                 return TC_PORT_1 + port - PORT_TC1;
1807         else
1808                 return TC_PORT_1 + port - PORT_C;
1809 }
1810
1811 enum intel_display_power_domain
1812 intel_aux_power_domain(struct intel_digital_port *dig_port)
1813 {
1814         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1815
1816         if (intel_tc_port_in_tbt_alt_mode(dig_port))
1817                 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
1818
1819         return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
1820 }
1821
1822 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
1823                                    struct intel_power_domain_mask *mask)
1824 {
1825         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1826         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1827         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1828         struct drm_encoder *encoder;
1829         enum pipe pipe = crtc->pipe;
1830
1831         bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
1832
1833         if (!crtc_state->hw.active)
1834                 return;
1835
1836         set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
1837         set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
1838         if (crtc_state->pch_pfit.enabled ||
1839             crtc_state->pch_pfit.force_thru)
1840                 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
1841
1842         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
1843                                   crtc_state->uapi.encoder_mask) {
1844                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
1845
1846                 set_bit(intel_encoder->power_domain, mask->bits);
1847         }
1848
1849         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
1850                 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
1851
1852         if (crtc_state->shared_dpll)
1853                 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
1854
1855         if (crtc_state->dsc.compression_enable)
1856                 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
1857 }
1858
1859 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
1860                                           struct intel_power_domain_mask *old_domains)
1861 {
1862         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1863         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1864         enum intel_display_power_domain domain;
1865         struct intel_power_domain_mask domains, new_domains;
1866
1867         get_crtc_power_domains(crtc_state, &domains);
1868
1869         bitmap_andnot(new_domains.bits,
1870                       domains.bits,
1871                       crtc->enabled_power_domains.mask.bits,
1872                       POWER_DOMAIN_NUM);
1873         bitmap_andnot(old_domains->bits,
1874                       crtc->enabled_power_domains.mask.bits,
1875                       domains.bits,
1876                       POWER_DOMAIN_NUM);
1877
1878         for_each_power_domain(domain, &new_domains)
1879                 intel_display_power_get_in_set(dev_priv,
1880                                                &crtc->enabled_power_domains,
1881                                                domain);
1882 }
1883
1884 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
1885                                           struct intel_power_domain_mask *domains)
1886 {
1887         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
1888                                             &crtc->enabled_power_domains,
1889                                             domains);
1890 }
1891
1892 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1893 {
1894         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1895         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1896
1897         if (intel_crtc_has_dp_encoder(crtc_state)) {
1898                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1899                                                &crtc_state->dp_m_n);
1900                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1901                                                &crtc_state->dp_m2_n2);
1902         }
1903
1904         intel_set_transcoder_timings(crtc_state);
1905
1906         i9xx_set_pipeconf(crtc_state);
1907 }
1908
1909 static void valleyview_crtc_enable(struct intel_atomic_state *state,
1910                                    struct intel_crtc *crtc)
1911 {
1912         const struct intel_crtc_state *new_crtc_state =
1913                 intel_atomic_get_new_crtc_state(state, crtc);
1914         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1915         enum pipe pipe = crtc->pipe;
1916
1917         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1918                 return;
1919
1920         i9xx_configure_cpu_transcoder(new_crtc_state);
1921
1922         intel_set_pipe_src_size(new_crtc_state);
1923
1924         intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
1925
1926         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
1927                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
1928                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
1929         }
1930
1931         crtc->active = true;
1932
1933         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1934
1935         intel_encoders_pre_pll_enable(state, crtc);
1936
1937         if (IS_CHERRYVIEW(dev_priv))
1938                 chv_enable_pll(new_crtc_state);
1939         else
1940                 vlv_enable_pll(new_crtc_state);
1941
1942         intel_encoders_pre_enable(state, crtc);
1943
1944         i9xx_pfit_enable(new_crtc_state);
1945
1946         intel_color_load_luts(new_crtc_state);
1947         intel_color_commit_noarm(new_crtc_state);
1948         intel_color_commit_arm(new_crtc_state);
1949         /* update DSPCNTR to configure gamma for pipe bottom color */
1950         intel_disable_primary_plane(new_crtc_state);
1951
1952         intel_initial_watermarks(state, crtc);
1953         intel_enable_transcoder(new_crtc_state);
1954
1955         intel_crtc_vblank_on(new_crtc_state);
1956
1957         intel_encoders_enable(state, crtc);
1958 }
1959
1960 static void i9xx_crtc_enable(struct intel_atomic_state *state,
1961                              struct intel_crtc *crtc)
1962 {
1963         const struct intel_crtc_state *new_crtc_state =
1964                 intel_atomic_get_new_crtc_state(state, crtc);
1965         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1966         enum pipe pipe = crtc->pipe;
1967
1968         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1969                 return;
1970
1971         i9xx_configure_cpu_transcoder(new_crtc_state);
1972
1973         intel_set_pipe_src_size(new_crtc_state);
1974
1975         crtc->active = true;
1976
1977         if (DISPLAY_VER(dev_priv) != 2)
1978                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1979
1980         intel_encoders_pre_enable(state, crtc);
1981
1982         i9xx_enable_pll(new_crtc_state);
1983
1984         i9xx_pfit_enable(new_crtc_state);
1985
1986         intel_color_load_luts(new_crtc_state);
1987         intel_color_commit_noarm(new_crtc_state);
1988         intel_color_commit_arm(new_crtc_state);
1989         /* update DSPCNTR to configure gamma for pipe bottom color */
1990         intel_disable_primary_plane(new_crtc_state);
1991
1992         if (!intel_initial_watermarks(state, crtc))
1993                 intel_update_watermarks(dev_priv);
1994         intel_enable_transcoder(new_crtc_state);
1995
1996         intel_crtc_vblank_on(new_crtc_state);
1997
1998         intel_encoders_enable(state, crtc);
1999
2000         /* prevents spurious underruns */
2001         if (DISPLAY_VER(dev_priv) == 2)
2002                 intel_crtc_wait_for_next_vblank(crtc);
2003 }
2004
2005 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2006 {
2007         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2008         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2009
2010         if (!old_crtc_state->gmch_pfit.control)
2011                 return;
2012
2013         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2014
2015         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2016                     intel_de_read(dev_priv, PFIT_CONTROL));
2017         intel_de_write(dev_priv, PFIT_CONTROL, 0);
2018 }
2019
2020 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2021                               struct intel_crtc *crtc)
2022 {
2023         struct intel_crtc_state *old_crtc_state =
2024                 intel_atomic_get_old_crtc_state(state, crtc);
2025         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2026         enum pipe pipe = crtc->pipe;
2027
2028         /*
2029          * On gen2 planes are double buffered but the pipe isn't, so we must
2030          * wait for planes to fully turn off before disabling the pipe.
2031          */
2032         if (DISPLAY_VER(dev_priv) == 2)
2033                 intel_crtc_wait_for_next_vblank(crtc);
2034
2035         intel_encoders_disable(state, crtc);
2036
2037         intel_crtc_vblank_off(old_crtc_state);
2038
2039         intel_disable_transcoder(old_crtc_state);
2040
2041         i9xx_pfit_disable(old_crtc_state);
2042
2043         intel_encoders_post_disable(state, crtc);
2044
2045         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2046                 if (IS_CHERRYVIEW(dev_priv))
2047                         chv_disable_pll(dev_priv, pipe);
2048                 else if (IS_VALLEYVIEW(dev_priv))
2049                         vlv_disable_pll(dev_priv, pipe);
2050                 else
2051                         i9xx_disable_pll(old_crtc_state);
2052         }
2053
2054         intel_encoders_post_pll_disable(state, crtc);
2055
2056         if (DISPLAY_VER(dev_priv) != 2)
2057                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2058
2059         if (!dev_priv->display.funcs.wm->initial_watermarks)
2060                 intel_update_watermarks(dev_priv);
2061
2062         /* clock the pipe down to 640x480@60 to potentially save power */
2063         if (IS_I830(dev_priv))
2064                 i830_enable_pipe(dev_priv, pipe);
2065 }
2066
2067 void intel_encoder_destroy(struct drm_encoder *encoder)
2068 {
2069         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2070
2071         drm_encoder_cleanup(encoder);
2072         kfree(intel_encoder);
2073 }
2074
2075 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2076 {
2077         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2078
2079         /* GDG double wide on either pipe, otherwise pipe A only */
2080         return DISPLAY_VER(dev_priv) < 4 &&
2081                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2082 }
2083
2084 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2085 {
2086         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2087         struct drm_rect src;
2088
2089         /*
2090          * We only use IF-ID interlacing. If we ever use
2091          * PF-ID we'll need to adjust the pixel_rate here.
2092          */
2093
2094         if (!crtc_state->pch_pfit.enabled)
2095                 return pixel_rate;
2096
2097         drm_rect_init(&src, 0, 0,
2098                       drm_rect_width(&crtc_state->pipe_src) << 16,
2099                       drm_rect_height(&crtc_state->pipe_src) << 16);
2100
2101         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2102                                    pixel_rate);
2103 }
2104
2105 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2106                                          const struct drm_display_mode *timings)
2107 {
2108         mode->hdisplay = timings->crtc_hdisplay;
2109         mode->htotal = timings->crtc_htotal;
2110         mode->hsync_start = timings->crtc_hsync_start;
2111         mode->hsync_end = timings->crtc_hsync_end;
2112
2113         mode->vdisplay = timings->crtc_vdisplay;
2114         mode->vtotal = timings->crtc_vtotal;
2115         mode->vsync_start = timings->crtc_vsync_start;
2116         mode->vsync_end = timings->crtc_vsync_end;
2117
2118         mode->flags = timings->flags;
2119         mode->type = DRM_MODE_TYPE_DRIVER;
2120
2121         mode->clock = timings->crtc_clock;
2122
2123         drm_mode_set_name(mode);
2124 }
2125
2126 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2127 {
2128         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2129
2130         if (HAS_GMCH(dev_priv))
2131                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2132                 crtc_state->pixel_rate =
2133                         crtc_state->hw.pipe_mode.crtc_clock;
2134         else
2135                 crtc_state->pixel_rate =
2136                         ilk_pipe_pixel_rate(crtc_state);
2137 }
2138
2139 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2140                                            struct drm_display_mode *mode)
2141 {
2142         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2143
2144         if (num_pipes < 2)
2145                 return;
2146
2147         mode->crtc_clock /= num_pipes;
2148         mode->crtc_hdisplay /= num_pipes;
2149         mode->crtc_hblank_start /= num_pipes;
2150         mode->crtc_hblank_end /= num_pipes;
2151         mode->crtc_hsync_start /= num_pipes;
2152         mode->crtc_hsync_end /= num_pipes;
2153         mode->crtc_htotal /= num_pipes;
2154 }
2155
2156 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2157                                           struct drm_display_mode *mode)
2158 {
2159         int overlap = crtc_state->splitter.pixel_overlap;
2160         int n = crtc_state->splitter.link_count;
2161
2162         if (!crtc_state->splitter.enable)
2163                 return;
2164
2165         /*
2166          * eDP MSO uses segment timings from EDID for transcoder
2167          * timings, but full mode for everything else.
2168          *
2169          * h_full = (h_segment - pixel_overlap) * link_count
2170          */
2171         mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2172         mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2173         mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2174         mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2175         mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2176         mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2177         mode->crtc_clock *= n;
2178 }
2179
2180 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2181 {
2182         struct drm_display_mode *mode = &crtc_state->hw.mode;
2183         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2184         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2185
2186         /*
2187          * Start with the adjusted_mode crtc timings, which
2188          * have been filled with the transcoder timings.
2189          */
2190         drm_mode_copy(pipe_mode, adjusted_mode);
2191
2192         /* Expand MSO per-segment transcoder timings to full */
2193         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2194
2195         /*
2196          * We want the full numbers in adjusted_mode normal timings,
2197          * adjusted_mode crtc timings are left with the raw transcoder
2198          * timings.
2199          */
2200         intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2201
2202         /* Populate the "user" mode with full numbers */
2203         drm_mode_copy(mode, pipe_mode);
2204         intel_mode_from_crtc_timings(mode, mode);
2205         mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2206                 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2207         mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2208
2209         /* Derive per-pipe timings in case bigjoiner is used */
2210         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2211         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2212
2213         intel_crtc_compute_pixel_rate(crtc_state);
2214 }
2215
2216 void intel_encoder_get_config(struct intel_encoder *encoder,
2217                               struct intel_crtc_state *crtc_state)
2218 {
2219         encoder->get_config(encoder, crtc_state);
2220
2221         intel_crtc_readout_derived_state(crtc_state);
2222 }
2223
2224 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2225 {
2226         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2227         int width, height;
2228
2229         if (num_pipes < 2)
2230                 return;
2231
2232         width = drm_rect_width(&crtc_state->pipe_src);
2233         height = drm_rect_height(&crtc_state->pipe_src);
2234
2235         drm_rect_init(&crtc_state->pipe_src, 0, 0,
2236                       width / num_pipes, height);
2237 }
2238
2239 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2240 {
2241         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2242         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2243
2244         intel_bigjoiner_compute_pipe_src(crtc_state);
2245
2246         /*
2247          * Pipe horizontal size must be even in:
2248          * - DVO ganged mode
2249          * - LVDS dual channel mode
2250          * - Double wide pipe
2251          */
2252         if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2253                 if (crtc_state->double_wide) {
2254                         drm_dbg_kms(&i915->drm,
2255                                     "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2256                                     crtc->base.base.id, crtc->base.name);
2257                         return -EINVAL;
2258                 }
2259
2260                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2261                     intel_is_dual_link_lvds(i915)) {
2262                         drm_dbg_kms(&i915->drm,
2263                                     "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2264                                     crtc->base.base.id, crtc->base.name);
2265                         return -EINVAL;
2266                 }
2267         }
2268
2269         return 0;
2270 }
2271
2272 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2273 {
2274         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2275         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2276         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2277         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2278         int clock_limit = i915->max_dotclk_freq;
2279
2280         /*
2281          * Start with the adjusted_mode crtc timings, which
2282          * have been filled with the transcoder timings.
2283          */
2284         drm_mode_copy(pipe_mode, adjusted_mode);
2285
2286         /* Expand MSO per-segment transcoder timings to full */
2287         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2288
2289         /* Derive per-pipe timings in case bigjoiner is used */
2290         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2291         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2292
2293         if (DISPLAY_VER(i915) < 4) {
2294                 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2295
2296                 /*
2297                  * Enable double wide mode when the dot clock
2298                  * is > 90% of the (display) core speed.
2299                  */
2300                 if (intel_crtc_supports_double_wide(crtc) &&
2301                     pipe_mode->crtc_clock > clock_limit) {
2302                         clock_limit = i915->max_dotclk_freq;
2303                         crtc_state->double_wide = true;
2304                 }
2305         }
2306
2307         if (pipe_mode->crtc_clock > clock_limit) {
2308                 drm_dbg_kms(&i915->drm,
2309                             "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2310                             crtc->base.base.id, crtc->base.name,
2311                             pipe_mode->crtc_clock, clock_limit,
2312                             str_yes_no(crtc_state->double_wide));
2313                 return -EINVAL;
2314         }
2315
2316         return 0;
2317 }
2318
2319 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2320                                      struct intel_crtc *crtc)
2321 {
2322         struct intel_crtc_state *crtc_state =
2323                 intel_atomic_get_new_crtc_state(state, crtc);
2324         int ret;
2325
2326         ret = intel_dpll_crtc_compute_clock(state, crtc);
2327         if (ret)
2328                 return ret;
2329
2330         ret = intel_crtc_compute_pipe_src(crtc_state);
2331         if (ret)
2332                 return ret;
2333
2334         ret = intel_crtc_compute_pipe_mode(crtc_state);
2335         if (ret)
2336                 return ret;
2337
2338         intel_crtc_compute_pixel_rate(crtc_state);
2339
2340         if (crtc_state->has_pch_encoder)
2341                 return ilk_fdi_compute_config(crtc, crtc_state);
2342
2343         return 0;
2344 }
2345
2346 static void
2347 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2348 {
2349         while (*num > DATA_LINK_M_N_MASK ||
2350                *den > DATA_LINK_M_N_MASK) {
2351                 *num >>= 1;
2352                 *den >>= 1;
2353         }
2354 }
2355
2356 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2357                         u32 m, u32 n, u32 constant_n)
2358 {
2359         if (constant_n)
2360                 *ret_n = constant_n;
2361         else
2362                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2363
2364         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2365         intel_reduce_m_n_ratio(ret_m, ret_n);
2366 }
2367
2368 void
2369 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2370                        int pixel_clock, int link_clock,
2371                        struct intel_link_m_n *m_n,
2372                        bool fec_enable)
2373 {
2374         u32 data_clock = bits_per_pixel * pixel_clock;
2375
2376         if (fec_enable)
2377                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2378
2379         /*
2380          * Windows/BIOS uses fixed M/N values always. Follow suit.
2381          *
2382          * Also several DP dongles in particular seem to be fussy
2383          * about too large link M/N values. Presumably the 20bit
2384          * value used by Windows/BIOS is acceptable to everyone.
2385          */
2386         m_n->tu = 64;
2387         compute_m_n(&m_n->data_m, &m_n->data_n,
2388                     data_clock, link_clock * nlanes * 8,
2389                     0x8000000);
2390
2391         compute_m_n(&m_n->link_m, &m_n->link_n,
2392                     pixel_clock, link_clock,
2393                     0x80000);
2394 }
2395
2396 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2397 {
2398         /*
2399          * There may be no VBT; and if the BIOS enabled SSC we can
2400          * just keep using it to avoid unnecessary flicker.  Whereas if the
2401          * BIOS isn't using it, don't assume it will work even if the VBT
2402          * indicates as much.
2403          */
2404         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2405                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2406                                                        PCH_DREF_CONTROL) &
2407                         DREF_SSC1_ENABLE;
2408
2409                 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2410                         drm_dbg_kms(&dev_priv->drm,
2411                                     "SSC %s by BIOS, overriding VBT which says %s\n",
2412                                     str_enabled_disabled(bios_lvds_use_ssc),
2413                                     str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2414                         dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2415                 }
2416         }
2417 }
2418
2419 void intel_zero_m_n(struct intel_link_m_n *m_n)
2420 {
2421         /* corresponds to 0 register value */
2422         memset(m_n, 0, sizeof(*m_n));
2423         m_n->tu = 1;
2424 }
2425
2426 void intel_set_m_n(struct drm_i915_private *i915,
2427                    const struct intel_link_m_n *m_n,
2428                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2429                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2430 {
2431         intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2432         intel_de_write(i915, data_n_reg, m_n->data_n);
2433         intel_de_write(i915, link_m_reg, m_n->link_m);
2434         /*
2435          * On BDW+ writing LINK_N arms the double buffered update
2436          * of all the M/N registers, so it must be written last.
2437          */
2438         intel_de_write(i915, link_n_reg, m_n->link_n);
2439 }
2440
2441 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2442                                     enum transcoder transcoder)
2443 {
2444         if (IS_HASWELL(dev_priv))
2445                 return transcoder == TRANSCODER_EDP;
2446
2447         return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2448 }
2449
2450 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2451                                     enum transcoder transcoder,
2452                                     const struct intel_link_m_n *m_n)
2453 {
2454         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2455         enum pipe pipe = crtc->pipe;
2456
2457         if (DISPLAY_VER(dev_priv) >= 5)
2458                 intel_set_m_n(dev_priv, m_n,
2459                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2460                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2461         else
2462                 intel_set_m_n(dev_priv, m_n,
2463                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2464                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2465 }
2466
2467 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2468                                     enum transcoder transcoder,
2469                                     const struct intel_link_m_n *m_n)
2470 {
2471         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2472
2473         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2474                 return;
2475
2476         intel_set_m_n(dev_priv, m_n,
2477                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2478                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2479 }
2480
2481 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2482 {
2483         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2484         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2485         enum pipe pipe = crtc->pipe;
2486         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2487         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2488         u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2489         int vsyncshift = 0;
2490
2491         /* We need to be careful not to changed the adjusted mode, for otherwise
2492          * the hw state checker will get angry at the mismatch. */
2493         crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2494         crtc_vtotal = adjusted_mode->crtc_vtotal;
2495         crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2496         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2497
2498         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2499                 /* the chip adds 2 halflines automatically */
2500                 crtc_vtotal -= 1;
2501                 crtc_vblank_end -= 1;
2502
2503                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2504                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2505                 else
2506                         vsyncshift = adjusted_mode->crtc_hsync_start -
2507                                 adjusted_mode->crtc_htotal / 2;
2508                 if (vsyncshift < 0)
2509                         vsyncshift += adjusted_mode->crtc_htotal;
2510         }
2511
2512         /*
2513          * VBLANK_START no longer works on ADL+, instead we must use
2514          * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2515          */
2516         if (DISPLAY_VER(dev_priv) >= 13) {
2517                 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
2518                                crtc_vblank_start - crtc_vdisplay);
2519
2520                 /*
2521                  * VBLANK_START not used by hw, just clear it
2522                  * to make it stand out in register dumps.
2523                  */
2524                 crtc_vblank_start = 1;
2525         }
2526
2527         if (DISPLAY_VER(dev_priv) > 3)
2528                 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
2529                                vsyncshift);
2530
2531         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
2532                        HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2533                        HTOTAL(adjusted_mode->crtc_htotal - 1));
2534         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
2535                        HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2536                        HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2537         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
2538                        HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2539                        HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2540
2541         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
2542                        VACTIVE(crtc_vdisplay - 1) |
2543                        VTOTAL(crtc_vtotal - 1));
2544         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
2545                        VBLANK_START(crtc_vblank_start - 1) |
2546                        VBLANK_END(crtc_vblank_end - 1));
2547         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
2548                        VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2549                        VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2550
2551         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2552          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2553          * documented on the DDI_FUNC_CTL register description, EDP Input Select
2554          * bits. */
2555         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2556             (pipe == PIPE_B || pipe == PIPE_C))
2557                 intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
2558                                VACTIVE(crtc_vdisplay - 1) |
2559                                VTOTAL(crtc_vtotal - 1));
2560 }
2561
2562 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2563 {
2564         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2565         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2566         int width = drm_rect_width(&crtc_state->pipe_src);
2567         int height = drm_rect_height(&crtc_state->pipe_src);
2568         enum pipe pipe = crtc->pipe;
2569
2570         /* pipesrc controls the size that is scaled from, which should
2571          * always be the user's requested size.
2572          */
2573         intel_de_write(dev_priv, PIPESRC(pipe),
2574                        PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2575 }
2576
2577 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2578 {
2579         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2580         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2581
2582         if (DISPLAY_VER(dev_priv) == 2)
2583                 return false;
2584
2585         if (DISPLAY_VER(dev_priv) >= 9 ||
2586             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2587                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2588         else
2589                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2590 }
2591
2592 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2593                                          struct intel_crtc_state *pipe_config)
2594 {
2595         struct drm_device *dev = crtc->base.dev;
2596         struct drm_i915_private *dev_priv = to_i915(dev);
2597         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2598         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2599         u32 tmp;
2600
2601         tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
2602         adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2603         adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2604
2605         if (!transcoder_is_dsi(cpu_transcoder)) {
2606                 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
2607                 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2608                 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2609         }
2610
2611         tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
2612         adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2613         adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2614
2615         tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
2616         adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2617         adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2618
2619         /* FIXME TGL+ DSI transcoders have this! */
2620         if (!transcoder_is_dsi(cpu_transcoder)) {
2621                 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
2622                 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2623                 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2624         }
2625         tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
2626         adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2627         adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2628
2629         if (intel_pipe_is_interlaced(pipe_config)) {
2630                 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2631                 adjusted_mode->crtc_vtotal += 1;
2632                 adjusted_mode->crtc_vblank_end += 1;
2633         }
2634
2635         if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2636                 adjusted_mode->crtc_vblank_start =
2637                         adjusted_mode->crtc_vdisplay +
2638                         intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
2639 }
2640
2641 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2642 {
2643         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2644         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2645         enum pipe master_pipe, pipe = crtc->pipe;
2646         int width;
2647
2648         if (num_pipes < 2)
2649                 return;
2650
2651         master_pipe = bigjoiner_master_pipe(crtc_state);
2652         width = drm_rect_width(&crtc_state->pipe_src);
2653
2654         drm_rect_translate_to(&crtc_state->pipe_src,
2655                               (pipe - master_pipe) * width, 0);
2656 }
2657
2658 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2659                                     struct intel_crtc_state *pipe_config)
2660 {
2661         struct drm_device *dev = crtc->base.dev;
2662         struct drm_i915_private *dev_priv = to_i915(dev);
2663         u32 tmp;
2664
2665         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
2666
2667         drm_rect_init(&pipe_config->pipe_src, 0, 0,
2668                       REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2669                       REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2670
2671         intel_bigjoiner_adjust_pipe_src(pipe_config);
2672 }
2673
2674 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2675 {
2676         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2677         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2678         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2679         u32 val = 0;
2680
2681         /*
2682          * - We keep both pipes enabled on 830
2683          * - During modeset the pipe is still disabled and must remain so
2684          * - During fastset the pipe is already enabled and must remain so
2685          */
2686         if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2687                 val |= TRANSCONF_ENABLE;
2688
2689         if (crtc_state->double_wide)
2690                 val |= TRANSCONF_DOUBLE_WIDE;
2691
2692         /* only g4x and later have fancy bpc/dither controls */
2693         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2694             IS_CHERRYVIEW(dev_priv)) {
2695                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2696                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2697                         val |= TRANSCONF_DITHER_EN |
2698                                 TRANSCONF_DITHER_TYPE_SP;
2699
2700                 switch (crtc_state->pipe_bpp) {
2701                 default:
2702                         /* Case prevented by intel_choose_pipe_bpp_dither. */
2703                         MISSING_CASE(crtc_state->pipe_bpp);
2704                         fallthrough;
2705                 case 18:
2706                         val |= TRANSCONF_BPC_6;
2707                         break;
2708                 case 24:
2709                         val |= TRANSCONF_BPC_8;
2710                         break;
2711                 case 30:
2712                         val |= TRANSCONF_BPC_10;
2713                         break;
2714                 }
2715         }
2716
2717         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2718                 if (DISPLAY_VER(dev_priv) < 4 ||
2719                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2720                         val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2721                 else
2722                         val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2723         } else {
2724                 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2725         }
2726
2727         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2728              crtc_state->limited_color_range)
2729                 val |= TRANSCONF_COLOR_RANGE_SELECT;
2730
2731         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2732
2733         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2734
2735         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
2736         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
2737 }
2738
2739 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2740 {
2741         if (IS_I830(dev_priv))
2742                 return false;
2743
2744         return DISPLAY_VER(dev_priv) >= 4 ||
2745                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2746 }
2747
2748 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2749 {
2750         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2751         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2752         u32 tmp;
2753
2754         if (!i9xx_has_pfit(dev_priv))
2755                 return;
2756
2757         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
2758         if (!(tmp & PFIT_ENABLE))
2759                 return;
2760
2761         /* Check whether the pfit is attached to our pipe. */
2762         if (DISPLAY_VER(dev_priv) < 4) {
2763                 if (crtc->pipe != PIPE_B)
2764                         return;
2765         } else {
2766                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
2767                         return;
2768         }
2769
2770         crtc_state->gmch_pfit.control = tmp;
2771         crtc_state->gmch_pfit.pgm_ratios =
2772                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
2773 }
2774
2775 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
2776                                struct intel_crtc_state *pipe_config)
2777 {
2778         struct drm_device *dev = crtc->base.dev;
2779         struct drm_i915_private *dev_priv = to_i915(dev);
2780         enum pipe pipe = crtc->pipe;
2781         struct dpll clock;
2782         u32 mdiv;
2783         int refclk = 100000;
2784
2785         /* In case of DSI, DPLL will not be used */
2786         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
2787                 return;
2788
2789         vlv_dpio_get(dev_priv);
2790         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
2791         vlv_dpio_put(dev_priv);
2792
2793         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
2794         clock.m2 = mdiv & DPIO_M2DIV_MASK;
2795         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
2796         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
2797         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
2798
2799         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
2800 }
2801
2802 static void chv_crtc_clock_get(struct intel_crtc *crtc,
2803                                struct intel_crtc_state *pipe_config)
2804 {
2805         struct drm_device *dev = crtc->base.dev;
2806         struct drm_i915_private *dev_priv = to_i915(dev);
2807         enum pipe pipe = crtc->pipe;
2808         enum dpio_channel port = vlv_pipe_to_channel(pipe);
2809         struct dpll clock;
2810         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
2811         int refclk = 100000;
2812
2813         /* In case of DSI, DPLL will not be used */
2814         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
2815                 return;
2816
2817         vlv_dpio_get(dev_priv);
2818         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
2819         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
2820         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
2821         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
2822         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
2823         vlv_dpio_put(dev_priv);
2824
2825         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
2826         clock.m2 = (pll_dw0 & 0xff) << 22;
2827         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
2828                 clock.m2 |= pll_dw2 & 0x3fffff;
2829         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
2830         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
2831         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
2832
2833         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
2834 }
2835
2836 static enum intel_output_format
2837 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
2838 {
2839         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2840         u32 tmp;
2841
2842         tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
2843
2844         if (tmp & PIPE_MISC_YUV420_ENABLE) {
2845                 /* We support 4:2:0 in full blend mode only */
2846                 drm_WARN_ON(&dev_priv->drm,
2847                             (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
2848
2849                 return INTEL_OUTPUT_FORMAT_YCBCR420;
2850         } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
2851                 return INTEL_OUTPUT_FORMAT_YCBCR444;
2852         } else {
2853                 return INTEL_OUTPUT_FORMAT_RGB;
2854         }
2855 }
2856
2857 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
2858 {
2859         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2860         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2862         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
2863         u32 tmp;
2864
2865         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
2866
2867         if (tmp & DISP_PIPE_GAMMA_ENABLE)
2868                 crtc_state->gamma_enable = true;
2869
2870         if (!HAS_GMCH(dev_priv) &&
2871             tmp & DISP_PIPE_CSC_ENABLE)
2872                 crtc_state->csc_enable = true;
2873 }
2874
2875 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
2876                                  struct intel_crtc_state *pipe_config)
2877 {
2878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2879         enum intel_display_power_domain power_domain;
2880         intel_wakeref_t wakeref;
2881         u32 tmp;
2882         bool ret;
2883
2884         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
2885         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
2886         if (!wakeref)
2887                 return false;
2888
2889         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2890         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
2891         pipe_config->shared_dpll = NULL;
2892
2893         ret = false;
2894
2895         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
2896         if (!(tmp & TRANSCONF_ENABLE))
2897                 goto out;
2898
2899         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2900             IS_CHERRYVIEW(dev_priv)) {
2901                 switch (tmp & TRANSCONF_BPC_MASK) {
2902                 case TRANSCONF_BPC_6:
2903                         pipe_config->pipe_bpp = 18;
2904                         break;
2905                 case TRANSCONF_BPC_8:
2906                         pipe_config->pipe_bpp = 24;
2907                         break;
2908                 case TRANSCONF_BPC_10:
2909                         pipe_config->pipe_bpp = 30;
2910                         break;
2911                 default:
2912                         MISSING_CASE(tmp);
2913                         break;
2914                 }
2915         }
2916
2917         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2918             (tmp & TRANSCONF_COLOR_RANGE_SELECT))
2919                 pipe_config->limited_color_range = true;
2920
2921         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
2922
2923         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
2924
2925         if (IS_CHERRYVIEW(dev_priv))
2926                 pipe_config->cgm_mode = intel_de_read(dev_priv,
2927                                                       CGM_PIPE_MODE(crtc->pipe));
2928
2929         i9xx_get_pipe_color_config(pipe_config);
2930         intel_color_get_config(pipe_config);
2931
2932         if (DISPLAY_VER(dev_priv) < 4)
2933                 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
2934
2935         intel_get_transcoder_timings(crtc, pipe_config);
2936         intel_get_pipe_src_size(crtc, pipe_config);
2937
2938         i9xx_get_pfit_config(pipe_config);
2939
2940         if (DISPLAY_VER(dev_priv) >= 4) {
2941                 /* No way to read it out on pipes B and C */
2942                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
2943                         tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
2944                 else
2945                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
2946                 pipe_config->pixel_multiplier =
2947                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
2948                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
2949                 pipe_config->dpll_hw_state.dpll_md = tmp;
2950         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
2951                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
2952                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
2953                 pipe_config->pixel_multiplier =
2954                         ((tmp & SDVO_MULTIPLIER_MASK)
2955                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
2956         } else {
2957                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
2958                  * port and will be fixed up in the encoder->get_config
2959                  * function. */
2960                 pipe_config->pixel_multiplier = 1;
2961         }
2962         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
2963                                                         DPLL(crtc->pipe));
2964         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
2965                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
2966                                                                FP0(crtc->pipe));
2967                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
2968                                                                FP1(crtc->pipe));
2969         } else {
2970                 /* Mask out read-only status bits. */
2971                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
2972                                                      DPLL_PORTC_READY_MASK |
2973                                                      DPLL_PORTB_READY_MASK);
2974         }
2975
2976         if (IS_CHERRYVIEW(dev_priv))
2977                 chv_crtc_clock_get(crtc, pipe_config);
2978         else if (IS_VALLEYVIEW(dev_priv))
2979                 vlv_crtc_clock_get(crtc, pipe_config);
2980         else
2981                 i9xx_crtc_clock_get(crtc, pipe_config);
2982
2983         /*
2984          * Normally the dotclock is filled in by the encoder .get_config()
2985          * but in case the pipe is enabled w/o any ports we need a sane
2986          * default.
2987          */
2988         pipe_config->hw.adjusted_mode.crtc_clock =
2989                 pipe_config->port_clock / pipe_config->pixel_multiplier;
2990
2991         ret = true;
2992
2993 out:
2994         intel_display_power_put(dev_priv, power_domain, wakeref);
2995
2996         return ret;
2997 }
2998
2999 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3000 {
3001         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3002         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3003         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3004         u32 val = 0;
3005
3006         /*
3007          * - During modeset the pipe is still disabled and must remain so
3008          * - During fastset the pipe is already enabled and must remain so
3009          */
3010         if (!intel_crtc_needs_modeset(crtc_state))
3011                 val |= TRANSCONF_ENABLE;
3012
3013         switch (crtc_state->pipe_bpp) {
3014         default:
3015                 /* Case prevented by intel_choose_pipe_bpp_dither. */
3016                 MISSING_CASE(crtc_state->pipe_bpp);
3017                 fallthrough;
3018         case 18:
3019                 val |= TRANSCONF_BPC_6;
3020                 break;
3021         case 24:
3022                 val |= TRANSCONF_BPC_8;
3023                 break;
3024         case 30:
3025                 val |= TRANSCONF_BPC_10;
3026                 break;
3027         case 36:
3028                 val |= TRANSCONF_BPC_12;
3029                 break;
3030         }
3031
3032         if (crtc_state->dither)
3033                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3034
3035         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3036                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3037         else
3038                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3039
3040         /*
3041          * This would end up with an odd purple hue over
3042          * the entire display. Make sure we don't do it.
3043          */
3044         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3045                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3046
3047         if (crtc_state->limited_color_range &&
3048             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3049                 val |= TRANSCONF_COLOR_RANGE_SELECT;
3050
3051         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3052                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3053
3054         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3055
3056         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3057         val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3058
3059         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3060         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3061 }
3062
3063 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3064 {
3065         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3066         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3067         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3068         u32 val = 0;
3069
3070         /*
3071          * - During modeset the pipe is still disabled and must remain so
3072          * - During fastset the pipe is already enabled and must remain so
3073          */
3074         if (!intel_crtc_needs_modeset(crtc_state))
3075                 val |= TRANSCONF_ENABLE;
3076
3077         if (IS_HASWELL(dev_priv) && crtc_state->dither)
3078                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3079
3080         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3081                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3082         else
3083                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3084
3085         if (IS_HASWELL(dev_priv) &&
3086             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3087                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3088
3089         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3090         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3091 }
3092
3093 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
3094 {
3095         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3096         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3097         u32 val = 0;
3098
3099         switch (crtc_state->pipe_bpp) {
3100         case 18:
3101                 val |= PIPE_MISC_BPC_6;
3102                 break;
3103         case 24:
3104                 val |= PIPE_MISC_BPC_8;
3105                 break;
3106         case 30:
3107                 val |= PIPE_MISC_BPC_10;
3108                 break;
3109         case 36:
3110                 /* Port output 12BPC defined for ADLP+ */
3111                 if (DISPLAY_VER(dev_priv) > 12)
3112                         val |= PIPE_MISC_BPC_12_ADLP;
3113                 break;
3114         default:
3115                 MISSING_CASE(crtc_state->pipe_bpp);
3116                 break;
3117         }
3118
3119         if (crtc_state->dither)
3120                 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
3121
3122         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3123             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3124                 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
3125
3126         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3127                 val |= PIPE_MISC_YUV420_ENABLE |
3128                         PIPE_MISC_YUV420_MODE_FULL_BLEND;
3129
3130         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3131                 val |= PIPE_MISC_HDR_MODE_PRECISION;
3132
3133         if (DISPLAY_VER(dev_priv) >= 12)
3134                 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
3135
3136         intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
3137 }
3138
3139 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
3140 {
3141         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3142         u32 tmp;
3143
3144         tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3145
3146         switch (tmp & PIPE_MISC_BPC_MASK) {
3147         case PIPE_MISC_BPC_6:
3148                 return 18;
3149         case PIPE_MISC_BPC_8:
3150                 return 24;
3151         case PIPE_MISC_BPC_10:
3152                 return 30;
3153         /*
3154          * PORT OUTPUT 12 BPC defined for ADLP+.
3155          *
3156          * TODO:
3157          * For previous platforms with DSI interface, bits 5:7
3158          * are used for storing pipe_bpp irrespective of dithering.
3159          * Since the value of 12 BPC is not defined for these bits
3160          * on older platforms, need to find a workaround for 12 BPC
3161          * MIPI DSI HW readout.
3162          */
3163         case PIPE_MISC_BPC_12_ADLP:
3164                 if (DISPLAY_VER(dev_priv) > 12)
3165                         return 36;
3166                 fallthrough;
3167         default:
3168                 MISSING_CASE(tmp);
3169                 return 0;
3170         }
3171 }
3172
3173 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3174 {
3175         /*
3176          * Account for spread spectrum to avoid
3177          * oversubscribing the link. Max center spread
3178          * is 2.5%; use 5% for safety's sake.
3179          */
3180         u32 bps = target_clock * bpp * 21 / 20;
3181         return DIV_ROUND_UP(bps, link_bw * 8);
3182 }
3183
3184 void intel_get_m_n(struct drm_i915_private *i915,
3185                    struct intel_link_m_n *m_n,
3186                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3187                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3188 {
3189         m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3190         m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3191         m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3192         m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3193         m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3194 }
3195
3196 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3197                                     enum transcoder transcoder,
3198                                     struct intel_link_m_n *m_n)
3199 {
3200         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3201         enum pipe pipe = crtc->pipe;
3202
3203         if (DISPLAY_VER(dev_priv) >= 5)
3204                 intel_get_m_n(dev_priv, m_n,
3205                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3206                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3207         else
3208                 intel_get_m_n(dev_priv, m_n,
3209                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3210                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3211 }
3212
3213 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3214                                     enum transcoder transcoder,
3215                                     struct intel_link_m_n *m_n)
3216 {
3217         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3218
3219         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3220                 return;
3221
3222         intel_get_m_n(dev_priv, m_n,
3223                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3224                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3225 }
3226
3227 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3228                                   u32 pos, u32 size)
3229 {
3230         drm_rect_init(&crtc_state->pch_pfit.dst,
3231                       pos >> 16, pos & 0xffff,
3232                       size >> 16, size & 0xffff);
3233 }
3234
3235 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3236 {
3237         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3238         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3239         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3240         int id = -1;
3241         int i;
3242
3243         /* find scaler attached to this pipe */
3244         for (i = 0; i < crtc->num_scalers; i++) {
3245                 u32 ctl, pos, size;
3246
3247                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3248                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3249                         continue;
3250
3251                 id = i;
3252                 crtc_state->pch_pfit.enabled = true;
3253
3254                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3255                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3256
3257                 ilk_get_pfit_pos_size(crtc_state, pos, size);
3258
3259                 scaler_state->scalers[i].in_use = true;
3260                 break;
3261         }
3262
3263         scaler_state->scaler_id = id;
3264         if (id >= 0)
3265                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3266         else
3267                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3268 }
3269
3270 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3271 {
3272         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3273         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3274         u32 ctl, pos, size;
3275
3276         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3277         if ((ctl & PF_ENABLE) == 0)
3278                 return;
3279
3280         crtc_state->pch_pfit.enabled = true;
3281
3282         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3283         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3284
3285         ilk_get_pfit_pos_size(crtc_state, pos, size);
3286
3287         /*
3288          * We currently do not free assignements of panel fitters on
3289          * ivb/hsw (since we don't use the higher upscaling modes which
3290          * differentiates them) so just WARN about this case for now.
3291          */
3292         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3293                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3294 }
3295
3296 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3297                                 struct intel_crtc_state *pipe_config)
3298 {
3299         struct drm_device *dev = crtc->base.dev;
3300         struct drm_i915_private *dev_priv = to_i915(dev);
3301         enum intel_display_power_domain power_domain;
3302         intel_wakeref_t wakeref;
3303         u32 tmp;
3304         bool ret;
3305
3306         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3307         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3308         if (!wakeref)
3309                 return false;
3310
3311         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3312         pipe_config->shared_dpll = NULL;
3313
3314         ret = false;
3315         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3316         if (!(tmp & TRANSCONF_ENABLE))
3317                 goto out;
3318
3319         switch (tmp & TRANSCONF_BPC_MASK) {
3320         case TRANSCONF_BPC_6:
3321                 pipe_config->pipe_bpp = 18;
3322                 break;
3323         case TRANSCONF_BPC_8:
3324                 pipe_config->pipe_bpp = 24;
3325                 break;
3326         case TRANSCONF_BPC_10:
3327                 pipe_config->pipe_bpp = 30;
3328                 break;
3329         case TRANSCONF_BPC_12:
3330                 pipe_config->pipe_bpp = 36;
3331                 break;
3332         default:
3333                 break;
3334         }
3335
3336         if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3337                 pipe_config->limited_color_range = true;
3338
3339         switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3340         case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3341         case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3342                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3343                 break;
3344         default:
3345                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3346                 break;
3347         }
3348
3349         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3350
3351         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3352
3353         pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3354
3355         pipe_config->csc_mode = intel_de_read(dev_priv,
3356                                               PIPE_CSC_MODE(crtc->pipe));
3357
3358         i9xx_get_pipe_color_config(pipe_config);
3359         intel_color_get_config(pipe_config);
3360
3361         pipe_config->pixel_multiplier = 1;
3362
3363         ilk_pch_get_config(pipe_config);
3364
3365         intel_get_transcoder_timings(crtc, pipe_config);
3366         intel_get_pipe_src_size(crtc, pipe_config);
3367
3368         ilk_get_pfit_config(pipe_config);
3369
3370         ret = true;
3371
3372 out:
3373         intel_display_power_put(dev_priv, power_domain, wakeref);
3374
3375         return ret;
3376 }
3377
3378 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3379 {
3380         u8 pipes;
3381
3382         if (DISPLAY_VER(i915) >= 12)
3383                 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3384         else if (DISPLAY_VER(i915) >= 11)
3385                 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3386         else
3387                 pipes = 0;
3388
3389         return pipes & RUNTIME_INFO(i915)->pipe_mask;
3390 }
3391
3392 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3393                                            enum transcoder cpu_transcoder)
3394 {
3395         enum intel_display_power_domain power_domain;
3396         intel_wakeref_t wakeref;
3397         u32 tmp = 0;
3398
3399         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3400
3401         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3402                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3403
3404         return tmp & TRANS_DDI_FUNC_ENABLE;
3405 }
3406
3407 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3408                                     u8 *master_pipes, u8 *slave_pipes)
3409 {
3410         struct intel_crtc *crtc;
3411
3412         *master_pipes = 0;
3413         *slave_pipes = 0;
3414
3415         for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3416                                          bigjoiner_pipes(dev_priv)) {
3417                 enum intel_display_power_domain power_domain;
3418                 enum pipe pipe = crtc->pipe;
3419                 intel_wakeref_t wakeref;
3420
3421                 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3422                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3423                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3424
3425                         if (!(tmp & BIG_JOINER_ENABLE))
3426                                 continue;
3427
3428                         if (tmp & MASTER_BIG_JOINER_ENABLE)
3429                                 *master_pipes |= BIT(pipe);
3430                         else
3431                                 *slave_pipes |= BIT(pipe);
3432                 }
3433
3434                 if (DISPLAY_VER(dev_priv) < 13)
3435                         continue;
3436
3437                 power_domain = POWER_DOMAIN_PIPE(pipe);
3438                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3439                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3440
3441                         if (tmp & UNCOMPRESSED_JOINER_MASTER)
3442                                 *master_pipes |= BIT(pipe);
3443                         if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3444                                 *slave_pipes |= BIT(pipe);
3445                 }
3446         }
3447
3448         /* Bigjoiner pipes should always be consecutive master and slave */
3449         drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3450                  "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3451                  *master_pipes, *slave_pipes);
3452 }
3453
3454 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3455 {
3456         if ((slave_pipes & BIT(pipe)) == 0)
3457                 return pipe;
3458
3459         /* ignore everything above our pipe */
3460         master_pipes &= ~GENMASK(7, pipe);
3461
3462         /* highest remaining bit should be our master pipe */
3463         return fls(master_pipes) - 1;
3464 }
3465
3466 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3467 {
3468         enum pipe master_pipe, next_master_pipe;
3469
3470         master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3471
3472         if ((master_pipes & BIT(master_pipe)) == 0)
3473                 return 0;
3474
3475         /* ignore our master pipe and everything below it */
3476         master_pipes &= ~GENMASK(master_pipe, 0);
3477         /* make sure a high bit is set for the ffs() */
3478         master_pipes |= BIT(7);
3479         /* lowest remaining bit should be the next master pipe */
3480         next_master_pipe = ffs(master_pipes) - 1;
3481
3482         return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3483 }
3484
3485 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3486 {
3487         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3488
3489         if (DISPLAY_VER(i915) >= 11)
3490                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3491
3492         return panel_transcoder_mask;
3493 }
3494
3495 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3496 {
3497         struct drm_device *dev = crtc->base.dev;
3498         struct drm_i915_private *dev_priv = to_i915(dev);
3499         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3500         enum transcoder cpu_transcoder;
3501         u8 master_pipes, slave_pipes;
3502         u8 enabled_transcoders = 0;
3503
3504         /*
3505          * XXX: Do intel_display_power_get_if_enabled before reading this (for
3506          * consistency and less surprising code; it's in always on power).
3507          */
3508         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3509                                        panel_transcoder_mask) {
3510                 enum intel_display_power_domain power_domain;
3511                 intel_wakeref_t wakeref;
3512                 enum pipe trans_pipe;
3513                 u32 tmp = 0;
3514
3515                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3516                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3517                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3518
3519                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3520                         continue;
3521
3522                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3523                 default:
3524                         drm_WARN(dev, 1,
3525                                  "unknown pipe linked to transcoder %s\n",
3526                                  transcoder_name(cpu_transcoder));
3527                         fallthrough;
3528                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3529                 case TRANS_DDI_EDP_INPUT_A_ON:
3530                         trans_pipe = PIPE_A;
3531                         break;
3532                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3533                         trans_pipe = PIPE_B;
3534                         break;
3535                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3536                         trans_pipe = PIPE_C;
3537                         break;
3538                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3539                         trans_pipe = PIPE_D;
3540                         break;
3541                 }
3542
3543                 if (trans_pipe == crtc->pipe)
3544                         enabled_transcoders |= BIT(cpu_transcoder);
3545         }
3546
3547         /* single pipe or bigjoiner master */
3548         cpu_transcoder = (enum transcoder) crtc->pipe;
3549         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3550                 enabled_transcoders |= BIT(cpu_transcoder);
3551
3552         /* bigjoiner slave -> consider the master pipe's transcoder as well */
3553         enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3554         if (slave_pipes & BIT(crtc->pipe)) {
3555                 cpu_transcoder = (enum transcoder)
3556                         get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3557                 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3558                         enabled_transcoders |= BIT(cpu_transcoder);
3559         }
3560
3561         return enabled_transcoders;
3562 }
3563
3564 static bool has_edp_transcoders(u8 enabled_transcoders)
3565 {
3566         return enabled_transcoders & BIT(TRANSCODER_EDP);
3567 }
3568
3569 static bool has_dsi_transcoders(u8 enabled_transcoders)
3570 {
3571         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3572                                       BIT(TRANSCODER_DSI_1));
3573 }
3574
3575 static bool has_pipe_transcoders(u8 enabled_transcoders)
3576 {
3577         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3578                                        BIT(TRANSCODER_DSI_0) |
3579                                        BIT(TRANSCODER_DSI_1));
3580 }
3581
3582 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3583                                        u8 enabled_transcoders)
3584 {
3585         /* Only one type of transcoder please */
3586         drm_WARN_ON(&i915->drm,
3587                     has_edp_transcoders(enabled_transcoders) +
3588                     has_dsi_transcoders(enabled_transcoders) +
3589                     has_pipe_transcoders(enabled_transcoders) > 1);
3590
3591         /* Only DSI transcoders can be ganged */
3592         drm_WARN_ON(&i915->drm,
3593                     !has_dsi_transcoders(enabled_transcoders) &&
3594                     !is_power_of_2(enabled_transcoders));
3595 }
3596
3597 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3598                                      struct intel_crtc_state *pipe_config,
3599                                      struct intel_display_power_domain_set *power_domain_set)
3600 {
3601         struct drm_device *dev = crtc->base.dev;
3602         struct drm_i915_private *dev_priv = to_i915(dev);
3603         unsigned long enabled_transcoders;
3604         u32 tmp;
3605
3606         enabled_transcoders = hsw_enabled_transcoders(crtc);
3607         if (!enabled_transcoders)
3608                 return false;
3609
3610         assert_enabled_transcoders(dev_priv, enabled_transcoders);
3611
3612         /*
3613          * With the exception of DSI we should only ever have
3614          * a single enabled transcoder. With DSI let's just
3615          * pick the first one.
3616          */
3617         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3618
3619         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3620                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3621                 return false;
3622
3623         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3624                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3625
3626                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3627                         pipe_config->pch_pfit.force_thru = true;
3628         }
3629
3630         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3631
3632         return tmp & TRANSCONF_ENABLE;
3633 }
3634
3635 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3636                                          struct intel_crtc_state *pipe_config,
3637                                          struct intel_display_power_domain_set *power_domain_set)
3638 {
3639         struct drm_device *dev = crtc->base.dev;
3640         struct drm_i915_private *dev_priv = to_i915(dev);
3641         enum transcoder cpu_transcoder;
3642         enum port port;
3643         u32 tmp;
3644
3645         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3646                 if (port == PORT_A)
3647                         cpu_transcoder = TRANSCODER_DSI_A;
3648                 else
3649                         cpu_transcoder = TRANSCODER_DSI_C;
3650
3651                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3652                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3653                         continue;
3654
3655                 /*
3656                  * The PLL needs to be enabled with a valid divider
3657                  * configuration, otherwise accessing DSI registers will hang
3658                  * the machine. See BSpec North Display Engine
3659                  * registers/MIPI[BXT]. We can break out here early, since we
3660                  * need the same DSI PLL to be enabled for both DSI ports.
3661                  */
3662                 if (!bxt_dsi_pll_is_enabled(dev_priv))
3663                         break;
3664
3665                 /* XXX: this works for video mode only */
3666                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
3667                 if (!(tmp & DPI_ENABLE))
3668                         continue;
3669
3670                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
3671                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3672                         continue;
3673
3674                 pipe_config->cpu_transcoder = cpu_transcoder;
3675                 break;
3676         }
3677
3678         return transcoder_is_dsi(pipe_config->cpu_transcoder);
3679 }
3680
3681 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
3682 {
3683         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3684         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3685         u8 master_pipes, slave_pipes;
3686         enum pipe pipe = crtc->pipe;
3687
3688         enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
3689
3690         if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
3691                 return;
3692
3693         crtc_state->bigjoiner_pipes =
3694                 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
3695                 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
3696 }
3697
3698 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3699                                 struct intel_crtc_state *pipe_config)
3700 {
3701         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3702         bool active;
3703         u32 tmp;
3704
3705         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3706                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
3707                 return false;
3708
3709         pipe_config->shared_dpll = NULL;
3710
3711         active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3712
3713         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3714             bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3715                 drm_WARN_ON(&dev_priv->drm, active);
3716                 active = true;
3717         }
3718
3719         if (!active)
3720                 goto out;
3721
3722         intel_dsc_get_config(pipe_config);
3723         intel_bigjoiner_get_config(pipe_config);
3724
3725         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3726             DISPLAY_VER(dev_priv) >= 11)
3727                 intel_get_transcoder_timings(crtc, pipe_config);
3728
3729         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3730                 intel_vrr_get_config(pipe_config);
3731
3732         intel_get_pipe_src_size(crtc, pipe_config);
3733
3734         if (IS_HASWELL(dev_priv)) {
3735                 u32 tmp = intel_de_read(dev_priv,
3736                                         TRANSCONF(pipe_config->cpu_transcoder));
3737
3738                 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3739                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3740                 else
3741                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3742         } else {
3743                 pipe_config->output_format =
3744                         bdw_get_pipe_misc_output_format(crtc);
3745         }
3746
3747         pipe_config->gamma_mode = intel_de_read(dev_priv,
3748                                                 GAMMA_MODE(crtc->pipe));
3749
3750         pipe_config->csc_mode = intel_de_read(dev_priv,
3751                                               PIPE_CSC_MODE(crtc->pipe));
3752
3753         if (DISPLAY_VER(dev_priv) >= 9) {
3754                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
3755
3756                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
3757                         pipe_config->gamma_enable = true;
3758
3759                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
3760                         pipe_config->csc_enable = true;
3761         } else {
3762                 i9xx_get_pipe_color_config(pipe_config);
3763         }
3764
3765         intel_color_get_config(pipe_config);
3766
3767         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
3768         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
3769         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3770                 pipe_config->ips_linetime =
3771                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
3772
3773         if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3774                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
3775                 if (DISPLAY_VER(dev_priv) >= 9)
3776                         skl_get_pfit_config(pipe_config);
3777                 else
3778                         ilk_get_pfit_config(pipe_config);
3779         }
3780
3781         hsw_ips_get_config(pipe_config);
3782
3783         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
3784             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3785                 pipe_config->pixel_multiplier =
3786                         intel_de_read(dev_priv,
3787                                       TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
3788         } else {
3789                 pipe_config->pixel_multiplier = 1;
3790         }
3791
3792         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3793                 tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
3794                                     MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
3795                                     CHICKEN_TRANS(pipe_config->cpu_transcoder));
3796
3797                 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
3798         } else {
3799                 /* no idea if this is correct */
3800                 pipe_config->framestart_delay = 1;
3801         }
3802
3803 out:
3804         intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
3805
3806         return active;
3807 }
3808
3809 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
3810 {
3811         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3812         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3813
3814         if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
3815                 return false;
3816
3817         crtc_state->hw.active = true;
3818
3819         intel_crtc_readout_derived_state(crtc_state);
3820
3821         return true;
3822 }
3823
3824 /* VESA 640x480x72Hz mode to set on the pipe */
3825 static const struct drm_display_mode load_detect_mode = {
3826         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
3827                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
3828 };
3829
3830 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
3831                                         struct drm_crtc *crtc)
3832 {
3833         struct drm_plane *plane;
3834         struct drm_plane_state *plane_state;
3835         int ret, i;
3836
3837         ret = drm_atomic_add_affected_planes(state, crtc);
3838         if (ret)
3839                 return ret;
3840
3841         for_each_new_plane_in_state(state, plane, plane_state, i) {
3842                 if (plane_state->crtc != crtc)
3843                         continue;
3844
3845                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3846                 if (ret)
3847                         return ret;
3848
3849                 drm_atomic_set_fb_for_plane(plane_state, NULL);
3850         }
3851
3852         return 0;
3853 }
3854
3855 int intel_get_load_detect_pipe(struct drm_connector *connector,
3856                                struct intel_load_detect_pipe *old,
3857                                struct drm_modeset_acquire_ctx *ctx)
3858 {
3859         struct intel_encoder *encoder =
3860                 intel_attached_encoder(to_intel_connector(connector));
3861         struct intel_crtc *possible_crtc;
3862         struct intel_crtc *crtc = NULL;
3863         struct drm_device *dev = encoder->base.dev;
3864         struct drm_i915_private *dev_priv = to_i915(dev);
3865         struct drm_mode_config *config = &dev->mode_config;
3866         struct drm_atomic_state *state = NULL, *restore_state = NULL;
3867         struct drm_connector_state *connector_state;
3868         struct intel_crtc_state *crtc_state;
3869         int ret;
3870
3871         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
3872                     connector->base.id, connector->name,
3873                     encoder->base.base.id, encoder->base.name);
3874
3875         old->restore_state = NULL;
3876
3877         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
3878
3879         /*
3880          * Algorithm gets a little messy:
3881          *
3882          *   - if the connector already has an assigned crtc, use it (but make
3883          *     sure it's on first)
3884          *
3885          *   - try to find the first unused crtc that can drive this connector,
3886          *     and use that if we find one
3887          */
3888
3889         /* See if we already have a CRTC for this connector */
3890         if (connector->state->crtc) {
3891                 crtc = to_intel_crtc(connector->state->crtc);
3892
3893                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
3894                 if (ret)
3895                         goto fail;
3896
3897                 /* Make sure the crtc and connector are running */
3898                 goto found;
3899         }
3900
3901         /* Find an unused one (if possible) */
3902         for_each_intel_crtc(dev, possible_crtc) {
3903                 if (!(encoder->base.possible_crtcs &
3904                       drm_crtc_mask(&possible_crtc->base)))
3905                         continue;
3906
3907                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
3908                 if (ret)
3909                         goto fail;
3910
3911                 if (possible_crtc->base.state->enable) {
3912                         drm_modeset_unlock(&possible_crtc->base.mutex);
3913                         continue;
3914                 }
3915
3916                 crtc = possible_crtc;
3917                 break;
3918         }
3919
3920         /*
3921          * If we didn't find an unused CRTC, don't use any.
3922          */
3923         if (!crtc) {
3924                 drm_dbg_kms(&dev_priv->drm,
3925                             "no pipe available for load-detect\n");
3926                 ret = -ENODEV;
3927                 goto fail;
3928         }
3929
3930 found:
3931         state = drm_atomic_state_alloc(dev);
3932         restore_state = drm_atomic_state_alloc(dev);
3933         if (!state || !restore_state) {
3934                 ret = -ENOMEM;
3935                 goto fail;
3936         }
3937
3938         state->acquire_ctx = ctx;
3939         to_intel_atomic_state(state)->internal = true;
3940
3941         restore_state->acquire_ctx = ctx;
3942         to_intel_atomic_state(restore_state)->internal = true;
3943
3944         connector_state = drm_atomic_get_connector_state(state, connector);
3945         if (IS_ERR(connector_state)) {
3946                 ret = PTR_ERR(connector_state);
3947                 goto fail;
3948         }
3949
3950         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
3951         if (ret)
3952                 goto fail;
3953
3954         crtc_state = intel_atomic_get_crtc_state(state, crtc);
3955         if (IS_ERR(crtc_state)) {
3956                 ret = PTR_ERR(crtc_state);
3957                 goto fail;
3958         }
3959
3960         crtc_state->uapi.active = true;
3961
3962         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
3963                                            &load_detect_mode);
3964         if (ret)
3965                 goto fail;
3966
3967         ret = intel_modeset_disable_planes(state, &crtc->base);
3968         if (ret)
3969                 goto fail;
3970
3971         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
3972         if (!ret)
3973                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
3974         if (!ret)
3975                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
3976         if (ret) {
3977                 drm_dbg_kms(&dev_priv->drm,
3978                             "Failed to create a copy of old state to restore: %i\n",
3979                             ret);
3980                 goto fail;
3981         }
3982
3983         ret = drm_atomic_commit(state);
3984         if (ret) {
3985                 drm_dbg_kms(&dev_priv->drm,
3986                             "failed to set mode on load-detect pipe\n");
3987                 goto fail;
3988         }
3989
3990         old->restore_state = restore_state;
3991         drm_atomic_state_put(state);
3992
3993         /* let the connector get through one full cycle before testing */
3994         intel_crtc_wait_for_next_vblank(crtc);
3995
3996         return true;
3997
3998 fail:
3999         if (state) {
4000                 drm_atomic_state_put(state);
4001                 state = NULL;
4002         }
4003         if (restore_state) {
4004                 drm_atomic_state_put(restore_state);
4005                 restore_state = NULL;
4006         }
4007
4008         if (ret == -EDEADLK)
4009                 return ret;
4010
4011         return false;
4012 }
4013
4014 void intel_release_load_detect_pipe(struct drm_connector *connector,
4015                                     struct intel_load_detect_pipe *old,
4016                                     struct drm_modeset_acquire_ctx *ctx)
4017 {
4018         struct intel_encoder *intel_encoder =
4019                 intel_attached_encoder(to_intel_connector(connector));
4020         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4021         struct drm_encoder *encoder = &intel_encoder->base;
4022         struct drm_atomic_state *state = old->restore_state;
4023         int ret;
4024
4025         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4026                     connector->base.id, connector->name,
4027                     encoder->base.id, encoder->name);
4028
4029         if (!state)
4030                 return;
4031
4032         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4033         if (ret)
4034                 drm_dbg_kms(&i915->drm,
4035                             "Couldn't release load detect pipe: %i\n", ret);
4036         drm_atomic_state_put(state);
4037 }
4038
4039 static int i9xx_pll_refclk(struct drm_device *dev,
4040                            const struct intel_crtc_state *pipe_config)
4041 {
4042         struct drm_i915_private *dev_priv = to_i915(dev);
4043         u32 dpll = pipe_config->dpll_hw_state.dpll;
4044
4045         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4046                 return dev_priv->display.vbt.lvds_ssc_freq;
4047         else if (HAS_PCH_SPLIT(dev_priv))
4048                 return 120000;
4049         else if (DISPLAY_VER(dev_priv) != 2)
4050                 return 96000;
4051         else
4052                 return 48000;
4053 }
4054
4055 /* Returns the clock of the currently programmed mode of the given pipe. */
4056 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4057                          struct intel_crtc_state *pipe_config)
4058 {
4059         struct drm_device *dev = crtc->base.dev;
4060         struct drm_i915_private *dev_priv = to_i915(dev);
4061         u32 dpll = pipe_config->dpll_hw_state.dpll;
4062         u32 fp;
4063         struct dpll clock;
4064         int port_clock;
4065         int refclk = i9xx_pll_refclk(dev, pipe_config);
4066
4067         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4068                 fp = pipe_config->dpll_hw_state.fp0;
4069         else
4070                 fp = pipe_config->dpll_hw_state.fp1;
4071
4072         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4073         if (IS_PINEVIEW(dev_priv)) {
4074                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4075                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4076         } else {
4077                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4078                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4079         }
4080
4081         if (DISPLAY_VER(dev_priv) != 2) {
4082                 if (IS_PINEVIEW(dev_priv))
4083                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4084                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4085                 else
4086                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4087                                DPLL_FPA01_P1_POST_DIV_SHIFT);
4088
4089                 switch (dpll & DPLL_MODE_MASK) {
4090                 case DPLLB_MODE_DAC_SERIAL:
4091                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4092                                 5 : 10;
4093                         break;
4094                 case DPLLB_MODE_LVDS:
4095                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4096                                 7 : 14;
4097                         break;
4098                 default:
4099                         drm_dbg_kms(&dev_priv->drm,
4100                                     "Unknown DPLL mode %08x in programmed "
4101                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
4102                         return;
4103                 }
4104
4105                 if (IS_PINEVIEW(dev_priv))
4106                         port_clock = pnv_calc_dpll_params(refclk, &clock);
4107                 else
4108                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
4109         } else {
4110                 enum pipe lvds_pipe;
4111
4112                 if (IS_I85X(dev_priv) &&
4113                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4114                     lvds_pipe == crtc->pipe) {
4115                         u32 lvds = intel_de_read(dev_priv, LVDS);
4116
4117                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4118                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
4119
4120                         if (lvds & LVDS_CLKB_POWER_UP)
4121                                 clock.p2 = 7;
4122                         else
4123                                 clock.p2 = 14;
4124                 } else {
4125                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
4126                                 clock.p1 = 2;
4127                         else {
4128                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4129                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4130                         }
4131                         if (dpll & PLL_P2_DIVIDE_BY_4)
4132                                 clock.p2 = 4;
4133                         else
4134                                 clock.p2 = 2;
4135                 }
4136
4137                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4138         }
4139
4140         /*
4141          * This value includes pixel_multiplier. We will use
4142          * port_clock to compute adjusted_mode.crtc_clock in the
4143          * encoder's get_config() function.
4144          */
4145         pipe_config->port_clock = port_clock;
4146 }
4147
4148 int intel_dotclock_calculate(int link_freq,
4149                              const struct intel_link_m_n *m_n)
4150 {
4151         /*
4152          * The calculation for the data clock is:
4153          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4154          * But we want to avoid losing precison if possible, so:
4155          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4156          *
4157          * and the link clock is simpler:
4158          * link_clock = (m * link_clock) / n
4159          */
4160
4161         if (!m_n->link_n)
4162                 return 0;
4163
4164         return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4165                                 m_n->link_n);
4166 }
4167
4168 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4169 {
4170         int dotclock;
4171
4172         if (intel_crtc_has_dp_encoder(pipe_config))
4173                 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4174                                                     &pipe_config->dp_m_n);
4175         else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4176                 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4177                                              pipe_config->pipe_bpp);
4178         else
4179                 dotclock = pipe_config->port_clock;
4180
4181         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4182             !intel_crtc_has_dp_encoder(pipe_config))
4183                 dotclock *= 2;
4184
4185         if (pipe_config->pixel_multiplier)
4186                 dotclock /= pipe_config->pixel_multiplier;
4187
4188         return dotclock;
4189 }
4190
4191 /* Returns the currently programmed mode of the given encoder. */
4192 struct drm_display_mode *
4193 intel_encoder_current_mode(struct intel_encoder *encoder)
4194 {
4195         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4196         struct intel_crtc_state *crtc_state;
4197         struct drm_display_mode *mode;
4198         struct intel_crtc *crtc;
4199         enum pipe pipe;
4200
4201         if (!encoder->get_hw_state(encoder, &pipe))
4202                 return NULL;
4203
4204         crtc = intel_crtc_for_pipe(dev_priv, pipe);
4205
4206         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4207         if (!mode)
4208                 return NULL;
4209
4210         crtc_state = intel_crtc_state_alloc(crtc);
4211         if (!crtc_state) {
4212                 kfree(mode);
4213                 return NULL;
4214         }
4215
4216         if (!intel_crtc_get_pipe_config(crtc_state)) {
4217                 kfree(crtc_state);
4218                 kfree(mode);
4219                 return NULL;
4220         }
4221
4222         intel_encoder_get_config(encoder, crtc_state);
4223
4224         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4225
4226         kfree(crtc_state);
4227
4228         return mode;
4229 }
4230
4231 static bool encoders_cloneable(const struct intel_encoder *a,
4232                                const struct intel_encoder *b)
4233 {
4234         /* masks could be asymmetric, so check both ways */
4235         return a == b || (a->cloneable & BIT(b->type) &&
4236                           b->cloneable & BIT(a->type));
4237 }
4238
4239 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4240                                          struct intel_crtc *crtc,
4241                                          struct intel_encoder *encoder)
4242 {
4243         struct intel_encoder *source_encoder;
4244         struct drm_connector *connector;
4245         struct drm_connector_state *connector_state;
4246         int i;
4247
4248         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4249                 if (connector_state->crtc != &crtc->base)
4250                         continue;
4251
4252                 source_encoder =
4253                         to_intel_encoder(connector_state->best_encoder);
4254                 if (!encoders_cloneable(encoder, source_encoder))
4255                         return false;
4256         }
4257
4258         return true;
4259 }
4260
4261 static int icl_add_linked_planes(struct intel_atomic_state *state)
4262 {
4263         struct intel_plane *plane, *linked;
4264         struct intel_plane_state *plane_state, *linked_plane_state;
4265         int i;
4266
4267         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4268                 linked = plane_state->planar_linked_plane;
4269
4270                 if (!linked)
4271                         continue;
4272
4273                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4274                 if (IS_ERR(linked_plane_state))
4275                         return PTR_ERR(linked_plane_state);
4276
4277                 drm_WARN_ON(state->base.dev,
4278                             linked_plane_state->planar_linked_plane != plane);
4279                 drm_WARN_ON(state->base.dev,
4280                             linked_plane_state->planar_slave == plane_state->planar_slave);
4281         }
4282
4283         return 0;
4284 }
4285
4286 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4287 {
4288         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4289         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4290         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4291         struct intel_plane *plane, *linked;
4292         struct intel_plane_state *plane_state;
4293         int i;
4294
4295         if (DISPLAY_VER(dev_priv) < 11)
4296                 return 0;
4297
4298         /*
4299          * Destroy all old plane links and make the slave plane invisible
4300          * in the crtc_state->active_planes mask.
4301          */
4302         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4303                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4304                         continue;
4305
4306                 plane_state->planar_linked_plane = NULL;
4307                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4308                         crtc_state->enabled_planes &= ~BIT(plane->id);
4309                         crtc_state->active_planes &= ~BIT(plane->id);
4310                         crtc_state->update_planes |= BIT(plane->id);
4311                         crtc_state->data_rate[plane->id] = 0;
4312                         crtc_state->rel_data_rate[plane->id] = 0;
4313                 }
4314
4315                 plane_state->planar_slave = false;
4316         }
4317
4318         if (!crtc_state->nv12_planes)
4319                 return 0;
4320
4321         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4322                 struct intel_plane_state *linked_state = NULL;
4323
4324                 if (plane->pipe != crtc->pipe ||
4325                     !(crtc_state->nv12_planes & BIT(plane->id)))
4326                         continue;
4327
4328                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4329                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4330                                 continue;
4331
4332                         if (crtc_state->active_planes & BIT(linked->id))
4333                                 continue;
4334
4335                         linked_state = intel_atomic_get_plane_state(state, linked);
4336                         if (IS_ERR(linked_state))
4337                                 return PTR_ERR(linked_state);
4338
4339                         break;
4340                 }
4341
4342                 if (!linked_state) {
4343                         drm_dbg_kms(&dev_priv->drm,
4344                                     "Need %d free Y planes for planar YUV\n",
4345                                     hweight8(crtc_state->nv12_planes));
4346
4347                         return -EINVAL;
4348                 }
4349
4350                 plane_state->planar_linked_plane = linked;
4351
4352                 linked_state->planar_slave = true;
4353                 linked_state->planar_linked_plane = plane;
4354                 crtc_state->enabled_planes |= BIT(linked->id);
4355                 crtc_state->active_planes |= BIT(linked->id);
4356                 crtc_state->update_planes |= BIT(linked->id);
4357                 crtc_state->data_rate[linked->id] =
4358                         crtc_state->data_rate_y[plane->id];
4359                 crtc_state->rel_data_rate[linked->id] =
4360                         crtc_state->rel_data_rate_y[plane->id];
4361                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4362                             linked->base.name, plane->base.name);
4363
4364                 /* Copy parameters to slave plane */
4365                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4366                 linked_state->color_ctl = plane_state->color_ctl;
4367                 linked_state->view = plane_state->view;
4368                 linked_state->decrypt = plane_state->decrypt;
4369
4370                 intel_plane_copy_hw_state(linked_state, plane_state);
4371                 linked_state->uapi.src = plane_state->uapi.src;
4372                 linked_state->uapi.dst = plane_state->uapi.dst;
4373
4374                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4375                         if (linked->id == PLANE_SPRITE5)
4376                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4377                         else if (linked->id == PLANE_SPRITE4)
4378                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4379                         else if (linked->id == PLANE_SPRITE3)
4380                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4381                         else if (linked->id == PLANE_SPRITE2)
4382                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4383                         else
4384                                 MISSING_CASE(linked->id);
4385                 }
4386         }
4387
4388         return 0;
4389 }
4390
4391 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4392 {
4393         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4394         struct intel_atomic_state *state =
4395                 to_intel_atomic_state(new_crtc_state->uapi.state);
4396         const struct intel_crtc_state *old_crtc_state =
4397                 intel_atomic_get_old_crtc_state(state, crtc);
4398
4399         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4400 }
4401
4402 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4403 {
4404         const struct drm_display_mode *pipe_mode =
4405                 &crtc_state->hw.pipe_mode;
4406         int linetime_wm;
4407
4408         if (!crtc_state->hw.enable)
4409                 return 0;
4410
4411         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4412                                         pipe_mode->crtc_clock);
4413
4414         return min(linetime_wm, 0x1ff);
4415 }
4416
4417 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4418                                const struct intel_cdclk_state *cdclk_state)
4419 {
4420         const struct drm_display_mode *pipe_mode =
4421                 &crtc_state->hw.pipe_mode;
4422         int linetime_wm;
4423
4424         if (!crtc_state->hw.enable)
4425                 return 0;
4426
4427         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4428                                         cdclk_state->logical.cdclk);
4429
4430         return min(linetime_wm, 0x1ff);
4431 }
4432
4433 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4434 {
4435         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4436         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4437         const struct drm_display_mode *pipe_mode =
4438                 &crtc_state->hw.pipe_mode;
4439         int linetime_wm;
4440
4441         if (!crtc_state->hw.enable)
4442                 return 0;
4443
4444         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4445                                    crtc_state->pixel_rate);
4446
4447         /* Display WA #1135: BXT:ALL GLK:ALL */
4448         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4449             skl_watermark_ipc_enabled(dev_priv))
4450                 linetime_wm /= 2;
4451
4452         return min(linetime_wm, 0x1ff);
4453 }
4454
4455 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4456                                    struct intel_crtc *crtc)
4457 {
4458         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4459         struct intel_crtc_state *crtc_state =
4460                 intel_atomic_get_new_crtc_state(state, crtc);
4461         const struct intel_cdclk_state *cdclk_state;
4462
4463         if (DISPLAY_VER(dev_priv) >= 9)
4464                 crtc_state->linetime = skl_linetime_wm(crtc_state);
4465         else
4466                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4467
4468         if (!hsw_crtc_supports_ips(crtc))
4469                 return 0;
4470
4471         cdclk_state = intel_atomic_get_cdclk_state(state);
4472         if (IS_ERR(cdclk_state))
4473                 return PTR_ERR(cdclk_state);
4474
4475         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4476                                                        cdclk_state);
4477
4478         return 0;
4479 }
4480
4481 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4482                                    struct intel_crtc *crtc)
4483 {
4484         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4485         struct intel_crtc_state *crtc_state =
4486                 intel_atomic_get_new_crtc_state(state, crtc);
4487         int ret;
4488
4489         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4490             intel_crtc_needs_modeset(crtc_state) &&
4491             !crtc_state->hw.active)
4492                 crtc_state->update_wm_post = true;
4493
4494         if (intel_crtc_needs_modeset(crtc_state)) {
4495                 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4496                 if (ret)
4497                         return ret;
4498         }
4499
4500         /*
4501          * May need to update pipe gamma enable bits
4502          * when C8 planes are getting enabled/disabled.
4503          */
4504         if (c8_planes_changed(crtc_state))
4505                 crtc_state->uapi.color_mgmt_changed = true;
4506
4507         if (intel_crtc_needs_color_update(crtc_state)) {
4508                 ret = intel_color_check(crtc_state);
4509                 if (ret)
4510                         return ret;
4511         }
4512
4513         ret = intel_compute_pipe_wm(state, crtc);
4514         if (ret) {
4515                 drm_dbg_kms(&dev_priv->drm,
4516                             "Target pipe watermarks are invalid\n");
4517                 return ret;
4518         }
4519
4520         /*
4521          * Calculate 'intermediate' watermarks that satisfy both the
4522          * old state and the new state.  We can program these
4523          * immediately.
4524          */
4525         ret = intel_compute_intermediate_wm(state, crtc);
4526         if (ret) {
4527                 drm_dbg_kms(&dev_priv->drm,
4528                             "No valid intermediate pipe watermarks are possible\n");
4529                 return ret;
4530         }
4531
4532         if (DISPLAY_VER(dev_priv) >= 9) {
4533                 if (intel_crtc_needs_modeset(crtc_state) ||
4534                     intel_crtc_needs_fastset(crtc_state)) {
4535                         ret = skl_update_scaler_crtc(crtc_state);
4536                         if (ret)
4537                                 return ret;
4538                 }
4539
4540                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4541                 if (ret)
4542                         return ret;
4543         }
4544
4545         if (HAS_IPS(dev_priv)) {
4546                 ret = hsw_ips_compute_config(state, crtc);
4547                 if (ret)
4548                         return ret;
4549         }
4550
4551         if (DISPLAY_VER(dev_priv) >= 9 ||
4552             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4553                 ret = hsw_compute_linetime_wm(state, crtc);
4554                 if (ret)
4555                         return ret;
4556
4557         }
4558
4559         ret = intel_psr2_sel_fetch_update(state, crtc);
4560         if (ret)
4561                 return ret;
4562
4563         return 0;
4564 }
4565
4566 static int
4567 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4568                       struct intel_crtc_state *crtc_state)
4569 {
4570         struct drm_connector *connector = conn_state->connector;
4571         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4572         const struct drm_display_info *info = &connector->display_info;
4573         int bpp;
4574
4575         switch (conn_state->max_bpc) {
4576         case 6 ... 7:
4577                 bpp = 6 * 3;
4578                 break;
4579         case 8 ... 9:
4580                 bpp = 8 * 3;
4581                 break;
4582         case 10 ... 11:
4583                 bpp = 10 * 3;
4584                 break;
4585         case 12 ... 16:
4586                 bpp = 12 * 3;
4587                 break;
4588         default:
4589                 MISSING_CASE(conn_state->max_bpc);
4590                 return -EINVAL;
4591         }
4592
4593         if (bpp < crtc_state->pipe_bpp) {
4594                 drm_dbg_kms(&i915->drm,
4595                             "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4596                             "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4597                             connector->base.id, connector->name,
4598                             bpp, 3 * info->bpc,
4599                             3 * conn_state->max_requested_bpc,
4600                             crtc_state->pipe_bpp);
4601
4602                 crtc_state->pipe_bpp = bpp;
4603         }
4604
4605         return 0;
4606 }
4607
4608 static int
4609 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4610                           struct intel_crtc *crtc)
4611 {
4612         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4613         struct intel_crtc_state *crtc_state =
4614                 intel_atomic_get_new_crtc_state(state, crtc);
4615         struct drm_connector *connector;
4616         struct drm_connector_state *connector_state;
4617         int bpp, i;
4618
4619         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4620             IS_CHERRYVIEW(dev_priv)))
4621                 bpp = 10*3;
4622         else if (DISPLAY_VER(dev_priv) >= 5)
4623                 bpp = 12*3;
4624         else
4625                 bpp = 8*3;
4626
4627         crtc_state->pipe_bpp = bpp;
4628
4629         /* Clamp display bpp to connector max bpp */
4630         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4631                 int ret;
4632
4633                 if (connector_state->crtc != &crtc->base)
4634                         continue;
4635
4636                 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4637                 if (ret)
4638                         return ret;
4639         }
4640
4641         return 0;
4642 }
4643
4644 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4645 {
4646         struct drm_device *dev = state->base.dev;
4647         struct drm_connector *connector;
4648         struct drm_connector_list_iter conn_iter;
4649         unsigned int used_ports = 0;
4650         unsigned int used_mst_ports = 0;
4651         bool ret = true;
4652
4653         /*
4654          * We're going to peek into connector->state,
4655          * hence connection_mutex must be held.
4656          */
4657         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4658
4659         /*
4660          * Walk the connector list instead of the encoder
4661          * list to detect the problem on ddi platforms
4662          * where there's just one encoder per digital port.
4663          */
4664         drm_connector_list_iter_begin(dev, &conn_iter);
4665         drm_for_each_connector_iter(connector, &conn_iter) {
4666                 struct drm_connector_state *connector_state;
4667                 struct intel_encoder *encoder;
4668
4669                 connector_state =
4670                         drm_atomic_get_new_connector_state(&state->base,
4671                                                            connector);
4672                 if (!connector_state)
4673                         connector_state = connector->state;
4674
4675                 if (!connector_state->best_encoder)
4676                         continue;
4677
4678                 encoder = to_intel_encoder(connector_state->best_encoder);
4679
4680                 drm_WARN_ON(dev, !connector_state->crtc);
4681
4682                 switch (encoder->type) {
4683                 case INTEL_OUTPUT_DDI:
4684                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4685                                 break;
4686                         fallthrough;
4687                 case INTEL_OUTPUT_DP:
4688                 case INTEL_OUTPUT_HDMI:
4689                 case INTEL_OUTPUT_EDP:
4690                         /* the same port mustn't appear more than once */
4691                         if (used_ports & BIT(encoder->port))
4692                                 ret = false;
4693
4694                         used_ports |= BIT(encoder->port);
4695                         break;
4696                 case INTEL_OUTPUT_DP_MST:
4697                         used_mst_ports |=
4698                                 1 << encoder->port;
4699                         break;
4700                 default:
4701                         break;
4702                 }
4703         }
4704         drm_connector_list_iter_end(&conn_iter);
4705
4706         /* can't mix MST and SST/HDMI on the same port */
4707         if (used_ports & used_mst_ports)
4708                 return false;
4709
4710         return ret;
4711 }
4712
4713 static void
4714 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4715                                            struct intel_crtc *crtc)
4716 {
4717         struct intel_crtc_state *crtc_state =
4718                 intel_atomic_get_new_crtc_state(state, crtc);
4719
4720         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4721
4722         drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4723                                   crtc_state->uapi.degamma_lut);
4724         drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4725                                   crtc_state->uapi.gamma_lut);
4726         drm_property_replace_blob(&crtc_state->hw.ctm,
4727                                   crtc_state->uapi.ctm);
4728 }
4729
4730 static void
4731 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4732                                          struct intel_crtc *crtc)
4733 {
4734         struct intel_crtc_state *crtc_state =
4735                 intel_atomic_get_new_crtc_state(state, crtc);
4736
4737         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4738
4739         crtc_state->hw.enable = crtc_state->uapi.enable;
4740         crtc_state->hw.active = crtc_state->uapi.active;
4741         drm_mode_copy(&crtc_state->hw.mode,
4742                       &crtc_state->uapi.mode);
4743         drm_mode_copy(&crtc_state->hw.adjusted_mode,
4744                       &crtc_state->uapi.adjusted_mode);
4745         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4746
4747         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4748 }
4749
4750 static void
4751 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4752                                     struct intel_crtc *slave_crtc)
4753 {
4754         struct intel_crtc_state *slave_crtc_state =
4755                 intel_atomic_get_new_crtc_state(state, slave_crtc);
4756         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4757         const struct intel_crtc_state *master_crtc_state =
4758                 intel_atomic_get_new_crtc_state(state, master_crtc);
4759
4760         drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
4761                                   master_crtc_state->hw.degamma_lut);
4762         drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
4763                                   master_crtc_state->hw.gamma_lut);
4764         drm_property_replace_blob(&slave_crtc_state->hw.ctm,
4765                                   master_crtc_state->hw.ctm);
4766
4767         slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
4768 }
4769
4770 static int
4771 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
4772                                   struct intel_crtc *slave_crtc)
4773 {
4774         struct intel_crtc_state *slave_crtc_state =
4775                 intel_atomic_get_new_crtc_state(state, slave_crtc);
4776         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4777         const struct intel_crtc_state *master_crtc_state =
4778                 intel_atomic_get_new_crtc_state(state, master_crtc);
4779         struct intel_crtc_state *saved_state;
4780
4781         WARN_ON(master_crtc_state->bigjoiner_pipes !=
4782                 slave_crtc_state->bigjoiner_pipes);
4783
4784         saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
4785         if (!saved_state)
4786                 return -ENOMEM;
4787
4788         /* preserve some things from the slave's original crtc state */
4789         saved_state->uapi = slave_crtc_state->uapi;
4790         saved_state->scaler_state = slave_crtc_state->scaler_state;
4791         saved_state->shared_dpll = slave_crtc_state->shared_dpll;
4792         saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
4793         saved_state->crc_enabled = slave_crtc_state->crc_enabled;
4794
4795         intel_crtc_free_hw_state(slave_crtc_state);
4796         memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
4797         kfree(saved_state);
4798
4799         /* Re-init hw state */
4800         memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
4801         slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
4802         slave_crtc_state->hw.active = master_crtc_state->hw.active;
4803         drm_mode_copy(&slave_crtc_state->hw.mode,
4804                       &master_crtc_state->hw.mode);
4805         drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
4806                       &master_crtc_state->hw.pipe_mode);
4807         drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
4808                       &master_crtc_state->hw.adjusted_mode);
4809         slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
4810
4811         copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
4812
4813         slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
4814         slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
4815         slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
4816
4817         WARN_ON(master_crtc_state->bigjoiner_pipes !=
4818                 slave_crtc_state->bigjoiner_pipes);
4819
4820         return 0;
4821 }
4822
4823 static int
4824 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
4825                                  struct intel_crtc *crtc)
4826 {
4827         struct intel_crtc_state *crtc_state =
4828                 intel_atomic_get_new_crtc_state(state, crtc);
4829         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4830         struct intel_crtc_state *saved_state;
4831
4832         saved_state = intel_crtc_state_alloc(crtc);
4833         if (!saved_state)
4834                 return -ENOMEM;
4835
4836         /* free the old crtc_state->hw members */
4837         intel_crtc_free_hw_state(crtc_state);
4838
4839         /* FIXME: before the switch to atomic started, a new pipe_config was
4840          * kzalloc'd. Code that depends on any field being zero should be
4841          * fixed, so that the crtc_state can be safely duplicated. For now,
4842          * only fields that are know to not cause problems are preserved. */
4843
4844         saved_state->uapi = crtc_state->uapi;
4845         saved_state->inherited = crtc_state->inherited;
4846         saved_state->scaler_state = crtc_state->scaler_state;
4847         saved_state->shared_dpll = crtc_state->shared_dpll;
4848         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
4849         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
4850                sizeof(saved_state->icl_port_dplls));
4851         saved_state->crc_enabled = crtc_state->crc_enabled;
4852         if (IS_G4X(dev_priv) ||
4853             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4854                 saved_state->wm = crtc_state->wm;
4855
4856         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
4857         kfree(saved_state);
4858
4859         intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
4860
4861         return 0;
4862 }
4863
4864 static int
4865 intel_modeset_pipe_config(struct intel_atomic_state *state,
4866                           struct intel_crtc *crtc)
4867 {
4868         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4869         struct intel_crtc_state *crtc_state =
4870                 intel_atomic_get_new_crtc_state(state, crtc);
4871         struct drm_connector *connector;
4872         struct drm_connector_state *connector_state;
4873         int pipe_src_w, pipe_src_h;
4874         int base_bpp, ret, i;
4875         bool retry = true;
4876
4877         crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
4878
4879         crtc_state->framestart_delay = 1;
4880
4881         /*
4882          * Sanitize sync polarity flags based on requested ones. If neither
4883          * positive or negative polarity is requested, treat this as meaning
4884          * negative polarity.
4885          */
4886         if (!(crtc_state->hw.adjusted_mode.flags &
4887               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
4888                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4889
4890         if (!(crtc_state->hw.adjusted_mode.flags &
4891               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
4892                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4893
4894         ret = compute_baseline_pipe_bpp(state, crtc);
4895         if (ret)
4896                 return ret;
4897
4898         base_bpp = crtc_state->pipe_bpp;
4899
4900         /*
4901          * Determine the real pipe dimensions. Note that stereo modes can
4902          * increase the actual pipe size due to the frame doubling and
4903          * insertion of additional space for blanks between the frame. This
4904          * is stored in the crtc timings. We use the requested mode to do this
4905          * computation to clearly distinguish it from the adjusted mode, which
4906          * can be changed by the connectors in the below retry loop.
4907          */
4908         drm_mode_get_hv_timing(&crtc_state->hw.mode,
4909                                &pipe_src_w, &pipe_src_h);
4910         drm_rect_init(&crtc_state->pipe_src, 0, 0,
4911                       pipe_src_w, pipe_src_h);
4912
4913         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4914                 struct intel_encoder *encoder =
4915                         to_intel_encoder(connector_state->best_encoder);
4916
4917                 if (connector_state->crtc != &crtc->base)
4918                         continue;
4919
4920                 if (!check_single_encoder_cloning(state, crtc, encoder)) {
4921                         drm_dbg_kms(&i915->drm,
4922                                     "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
4923                                     encoder->base.base.id, encoder->base.name);
4924                         return -EINVAL;
4925                 }
4926
4927                 /*
4928                  * Determine output_types before calling the .compute_config()
4929                  * hooks so that the hooks can use this information safely.
4930                  */
4931                 if (encoder->compute_output_type)
4932                         crtc_state->output_types |=
4933                                 BIT(encoder->compute_output_type(encoder, crtc_state,
4934                                                                  connector_state));
4935                 else
4936                         crtc_state->output_types |= BIT(encoder->type);
4937         }
4938
4939 encoder_retry:
4940         /* Ensure the port clock defaults are reset when retrying. */
4941         crtc_state->port_clock = 0;
4942         crtc_state->pixel_multiplier = 1;
4943
4944         /* Fill in default crtc timings, allow encoders to overwrite them. */
4945         drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
4946                               CRTC_STEREO_DOUBLE);
4947
4948         /* Pass our mode to the connectors and the CRTC to give them a chance to
4949          * adjust it according to limitations or connector properties, and also
4950          * a chance to reject the mode entirely.
4951          */
4952         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4953                 struct intel_encoder *encoder =
4954                         to_intel_encoder(connector_state->best_encoder);
4955
4956                 if (connector_state->crtc != &crtc->base)
4957                         continue;
4958
4959                 ret = encoder->compute_config(encoder, crtc_state,
4960                                               connector_state);
4961                 if (ret == -EDEADLK)
4962                         return ret;
4963                 if (ret < 0) {
4964                         drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
4965                                     encoder->base.base.id, encoder->base.name, ret);
4966                         return ret;
4967                 }
4968         }
4969
4970         /* Set default port clock if not overwritten by the encoder. Needs to be
4971          * done afterwards in case the encoder adjusts the mode. */
4972         if (!crtc_state->port_clock)
4973                 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
4974                         * crtc_state->pixel_multiplier;
4975
4976         ret = intel_crtc_compute_config(state, crtc);
4977         if (ret == -EDEADLK)
4978                 return ret;
4979         if (ret == -EAGAIN) {
4980                 if (drm_WARN(&i915->drm, !retry,
4981                              "[CRTC:%d:%s] loop in pipe configuration computation\n",
4982                              crtc->base.base.id, crtc->base.name))
4983                         return -EINVAL;
4984
4985                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
4986                             crtc->base.base.id, crtc->base.name);
4987                 retry = false;
4988                 goto encoder_retry;
4989         }
4990         if (ret < 0) {
4991                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
4992                             crtc->base.base.id, crtc->base.name, ret);
4993                 return ret;
4994         }
4995
4996         /* Dithering seems to not pass-through bits correctly when it should, so
4997          * only enable it on 6bpc panels and when its not a compliance
4998          * test requesting 6bpc video pattern.
4999          */
5000         crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5001                 !crtc_state->dither_force_disable;
5002         drm_dbg_kms(&i915->drm,
5003                     "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5004                     crtc->base.base.id, crtc->base.name,
5005                     base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5006
5007         return 0;
5008 }
5009
5010 static int
5011 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5012                                struct intel_crtc *crtc)
5013 {
5014         struct intel_crtc_state *crtc_state =
5015                 intel_atomic_get_new_crtc_state(state, crtc);
5016         struct drm_connector_state *conn_state;
5017         struct drm_connector *connector;
5018         int i;
5019
5020         intel_bigjoiner_adjust_pipe_src(crtc_state);
5021
5022         for_each_new_connector_in_state(&state->base, connector,
5023                                         conn_state, i) {
5024                 struct intel_encoder *encoder =
5025                         to_intel_encoder(conn_state->best_encoder);
5026                 int ret;
5027
5028                 if (conn_state->crtc != &crtc->base ||
5029                     !encoder->compute_config_late)
5030                         continue;
5031
5032                 ret = encoder->compute_config_late(encoder, crtc_state,
5033                                                    conn_state);
5034                 if (ret)
5035                         return ret;
5036         }
5037
5038         return 0;
5039 }
5040
5041 bool intel_fuzzy_clock_check(int clock1, int clock2)
5042 {
5043         int diff;
5044
5045         if (clock1 == clock2)
5046                 return true;
5047
5048         if (!clock1 || !clock2)
5049                 return false;
5050
5051         diff = abs(clock1 - clock2);
5052
5053         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5054                 return true;
5055
5056         return false;
5057 }
5058
5059 static bool
5060 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5061                        const struct intel_link_m_n *m2_n2)
5062 {
5063         return m_n->tu == m2_n2->tu &&
5064                 m_n->data_m == m2_n2->data_m &&
5065                 m_n->data_n == m2_n2->data_n &&
5066                 m_n->link_m == m2_n2->link_m &&
5067                 m_n->link_n == m2_n2->link_n;
5068 }
5069
5070 static bool
5071 intel_compare_infoframe(const union hdmi_infoframe *a,
5072                         const union hdmi_infoframe *b)
5073 {
5074         return memcmp(a, b, sizeof(*a)) == 0;
5075 }
5076
5077 static bool
5078 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5079                          const struct drm_dp_vsc_sdp *b)
5080 {
5081         return memcmp(a, b, sizeof(*a)) == 0;
5082 }
5083
5084 static bool
5085 intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
5086 {
5087         return memcmp(a, b, len) == 0;
5088 }
5089
5090 static void
5091 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5092                                bool fastset, const char *name,
5093                                const union hdmi_infoframe *a,
5094                                const union hdmi_infoframe *b)
5095 {
5096         if (fastset) {
5097                 if (!drm_debug_enabled(DRM_UT_KMS))
5098                         return;
5099
5100                 drm_dbg_kms(&dev_priv->drm,
5101                             "fastset mismatch in %s infoframe\n", name);
5102                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5103                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5104                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5105                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5106         } else {
5107                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5108                 drm_err(&dev_priv->drm, "expected:\n");
5109                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5110                 drm_err(&dev_priv->drm, "found:\n");
5111                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5112         }
5113 }
5114
5115 static void
5116 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5117                                 bool fastset, const char *name,
5118                                 const struct drm_dp_vsc_sdp *a,
5119                                 const struct drm_dp_vsc_sdp *b)
5120 {
5121         if (fastset) {
5122                 if (!drm_debug_enabled(DRM_UT_KMS))
5123                         return;
5124
5125                 drm_dbg_kms(&dev_priv->drm,
5126                             "fastset mismatch in %s dp sdp\n", name);
5127                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5128                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5129                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5130                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5131         } else {
5132                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5133                 drm_err(&dev_priv->drm, "expected:\n");
5134                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5135                 drm_err(&dev_priv->drm, "found:\n");
5136                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5137         }
5138 }
5139
5140 /* Returns the length up to and including the last differing byte */
5141 static size_t
5142 memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
5143 {
5144         int i;
5145
5146         for (i = len - 1; i >= 0; i--) {
5147                 if (a[i] != b[i])
5148                         return i + 1;
5149         }
5150
5151         return 0;
5152 }
5153
5154 static void
5155 pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
5156                             bool fastset, const char *name,
5157                             const u8 *a, const u8 *b, size_t len)
5158 {
5159         if (fastset) {
5160                 if (!drm_debug_enabled(DRM_UT_KMS))
5161                         return;
5162
5163                 /* only dump up to the last difference */
5164                 len = memcmp_diff_len(a, b, len);
5165
5166                 drm_dbg_kms(&dev_priv->drm,
5167                             "fastset mismatch in %s buffer\n", name);
5168                 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
5169                                16, 0, a, len, false);
5170                 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
5171                                16, 0, b, len, false);
5172         } else {
5173                 /* only dump up to the last difference */
5174                 len = memcmp_diff_len(a, b, len);
5175
5176                 drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
5177                 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
5178                                16, 0, a, len, false);
5179                 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
5180                                16, 0, b, len, false);
5181         }
5182 }
5183
5184 static void __printf(4, 5)
5185 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5186                      const char *name, const char *format, ...)
5187 {
5188         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5189         struct va_format vaf;
5190         va_list args;
5191
5192         va_start(args, format);
5193         vaf.fmt = format;
5194         vaf.va = &args;
5195
5196         if (fastset)
5197                 drm_dbg_kms(&i915->drm,
5198                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5199                             crtc->base.base.id, crtc->base.name, name, &vaf);
5200         else
5201                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5202                         crtc->base.base.id, crtc->base.name, name, &vaf);
5203
5204         va_end(args);
5205 }
5206
5207 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5208 {
5209         if (dev_priv->params.fastboot != -1)
5210                 return dev_priv->params.fastboot;
5211
5212         /* Enable fastboot by default on Skylake and newer */
5213         if (DISPLAY_VER(dev_priv) >= 9)
5214                 return true;
5215
5216         /* Enable fastboot by default on VLV and CHV */
5217         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5218                 return true;
5219
5220         /* Disabled by default on all others */
5221         return false;
5222 }
5223
5224 bool
5225 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5226                           const struct intel_crtc_state *pipe_config,
5227                           bool fastset)
5228 {
5229         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5230         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5231         bool ret = true;
5232         bool fixup_inherited = fastset &&
5233                 current_config->inherited && !pipe_config->inherited;
5234
5235         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5236                 drm_dbg_kms(&dev_priv->drm,
5237                             "initial modeset and fastboot not set\n");
5238                 ret = false;
5239         }
5240
5241 #define PIPE_CONF_CHECK_X(name) do { \
5242         if (current_config->name != pipe_config->name) { \
5243                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5244                                      "(expected 0x%08x, found 0x%08x)", \
5245                                      current_config->name, \
5246                                      pipe_config->name); \
5247                 ret = false; \
5248         } \
5249 } while (0)
5250
5251 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5252         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5253                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5254                                      "(expected 0x%08x, found 0x%08x)", \
5255                                      current_config->name & (mask), \
5256                                      pipe_config->name & (mask)); \
5257                 ret = false; \
5258         } \
5259 } while (0)
5260
5261 #define PIPE_CONF_CHECK_I(name) do { \
5262         if (current_config->name != pipe_config->name) { \
5263                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5264                                      "(expected %i, found %i)", \
5265                                      current_config->name, \
5266                                      pipe_config->name); \
5267                 ret = false; \
5268         } \
5269 } while (0)
5270
5271 #define PIPE_CONF_CHECK_BOOL(name) do { \
5272         if (current_config->name != pipe_config->name) { \
5273                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
5274                                      "(expected %s, found %s)", \
5275                                      str_yes_no(current_config->name), \
5276                                      str_yes_no(pipe_config->name)); \
5277                 ret = false; \
5278         } \
5279 } while (0)
5280
5281 /*
5282  * Checks state where we only read out the enabling, but not the entire
5283  * state itself (like full infoframes or ELD for audio). These states
5284  * require a full modeset on bootup to fix up.
5285  */
5286 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5287         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5288                 PIPE_CONF_CHECK_BOOL(name); \
5289         } else { \
5290                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5291                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5292                                      str_yes_no(current_config->name), \
5293                                      str_yes_no(pipe_config->name)); \
5294                 ret = false; \
5295         } \
5296 } while (0)
5297
5298 #define PIPE_CONF_CHECK_P(name) do { \
5299         if (current_config->name != pipe_config->name) { \
5300                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5301                                      "(expected %p, found %p)", \
5302                                      current_config->name, \
5303                                      pipe_config->name); \
5304                 ret = false; \
5305         } \
5306 } while (0)
5307
5308 #define PIPE_CONF_CHECK_M_N(name) do { \
5309         if (!intel_compare_link_m_n(&current_config->name, \
5310                                     &pipe_config->name)) { \
5311                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5312                                      "(expected tu %i data %i/%i link %i/%i, " \
5313                                      "found tu %i, data %i/%i link %i/%i)", \
5314                                      current_config->name.tu, \
5315                                      current_config->name.data_m, \
5316                                      current_config->name.data_n, \
5317                                      current_config->name.link_m, \
5318                                      current_config->name.link_n, \
5319                                      pipe_config->name.tu, \
5320                                      pipe_config->name.data_m, \
5321                                      pipe_config->name.data_n, \
5322                                      pipe_config->name.link_m, \
5323                                      pipe_config->name.link_n); \
5324                 ret = false; \
5325         } \
5326 } while (0)
5327
5328 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5329         PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5330         PIPE_CONF_CHECK_I(name.crtc_htotal); \
5331         PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5332         PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5333         PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5334         PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5335         PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5336         PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5337         PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5338         PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5339         PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5340         PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5341 } while (0)
5342
5343 #define PIPE_CONF_CHECK_RECT(name) do { \
5344         PIPE_CONF_CHECK_I(name.x1); \
5345         PIPE_CONF_CHECK_I(name.x2); \
5346         PIPE_CONF_CHECK_I(name.y1); \
5347         PIPE_CONF_CHECK_I(name.y2); \
5348 } while (0)
5349
5350 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5351         if ((current_config->name ^ pipe_config->name) & (mask)) { \
5352                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5353                                      "(%x) (expected %i, found %i)", \
5354                                      (mask), \
5355                                      current_config->name & (mask), \
5356                                      pipe_config->name & (mask)); \
5357                 ret = false; \
5358         } \
5359 } while (0)
5360
5361 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5362         if (!intel_compare_infoframe(&current_config->infoframes.name, \
5363                                      &pipe_config->infoframes.name)) { \
5364                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5365                                                &current_config->infoframes.name, \
5366                                                &pipe_config->infoframes.name); \
5367                 ret = false; \
5368         } \
5369 } while (0)
5370
5371 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5372         if (!current_config->has_psr && !pipe_config->has_psr && \
5373             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
5374                                       &pipe_config->infoframes.name)) { \
5375                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5376                                                 &current_config->infoframes.name, \
5377                                                 &pipe_config->infoframes.name); \
5378                 ret = false; \
5379         } \
5380 } while (0)
5381
5382 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5383         BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5384         BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5385         if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5386                 pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
5387                                             current_config->name, \
5388                                             pipe_config->name, \
5389                                             (len)); \
5390                 ret = false; \
5391         } \
5392 } while (0)
5393
5394 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5395         if (current_config->gamma_mode == pipe_config->gamma_mode && \
5396             !intel_color_lut_equal(current_config, \
5397                                    current_config->lut, pipe_config->lut, \
5398                                    is_pre_csc_lut)) {   \
5399                 pipe_config_mismatch(fastset, crtc, __stringify(lut), \
5400                                      "hw_state doesn't match sw_state"); \
5401                 ret = false; \
5402         } \
5403 } while (0)
5404
5405 #define PIPE_CONF_CHECK_CSC(name) do { \
5406         PIPE_CONF_CHECK_X(name.preoff[0]); \
5407         PIPE_CONF_CHECK_X(name.preoff[1]); \
5408         PIPE_CONF_CHECK_X(name.preoff[2]); \
5409         PIPE_CONF_CHECK_X(name.coeff[0]); \
5410         PIPE_CONF_CHECK_X(name.coeff[1]); \
5411         PIPE_CONF_CHECK_X(name.coeff[2]); \
5412         PIPE_CONF_CHECK_X(name.coeff[3]); \
5413         PIPE_CONF_CHECK_X(name.coeff[4]); \
5414         PIPE_CONF_CHECK_X(name.coeff[5]); \
5415         PIPE_CONF_CHECK_X(name.coeff[6]); \
5416         PIPE_CONF_CHECK_X(name.coeff[7]); \
5417         PIPE_CONF_CHECK_X(name.coeff[8]); \
5418         PIPE_CONF_CHECK_X(name.postoff[0]); \
5419         PIPE_CONF_CHECK_X(name.postoff[1]); \
5420         PIPE_CONF_CHECK_X(name.postoff[2]); \
5421 } while (0)
5422
5423 #define PIPE_CONF_QUIRK(quirk) \
5424         ((current_config->quirks | pipe_config->quirks) & (quirk))
5425
5426         PIPE_CONF_CHECK_I(hw.enable);
5427         PIPE_CONF_CHECK_I(hw.active);
5428
5429         PIPE_CONF_CHECK_I(cpu_transcoder);
5430         PIPE_CONF_CHECK_I(mst_master_transcoder);
5431
5432         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5433         PIPE_CONF_CHECK_I(fdi_lanes);
5434         PIPE_CONF_CHECK_M_N(fdi_m_n);
5435
5436         PIPE_CONF_CHECK_I(lane_count);
5437         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5438
5439         if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5440                 if (!fastset || !pipe_config->seamless_m_n)
5441                         PIPE_CONF_CHECK_M_N(dp_m_n);
5442         } else {
5443                 PIPE_CONF_CHECK_M_N(dp_m_n);
5444                 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5445         }
5446
5447         PIPE_CONF_CHECK_X(output_types);
5448
5449         PIPE_CONF_CHECK_I(framestart_delay);
5450         PIPE_CONF_CHECK_I(msa_timing_delay);
5451
5452         PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5453         PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5454
5455         PIPE_CONF_CHECK_I(pixel_multiplier);
5456
5457         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5458                               DRM_MODE_FLAG_INTERLACE);
5459
5460         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5461                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5462                                       DRM_MODE_FLAG_PHSYNC);
5463                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5464                                       DRM_MODE_FLAG_NHSYNC);
5465                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5466                                       DRM_MODE_FLAG_PVSYNC);
5467                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5468                                       DRM_MODE_FLAG_NVSYNC);
5469         }
5470
5471         PIPE_CONF_CHECK_I(output_format);
5472         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5473         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5474             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5475                 PIPE_CONF_CHECK_BOOL(limited_color_range);
5476
5477         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5478         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5479         PIPE_CONF_CHECK_BOOL(has_infoframe);
5480         PIPE_CONF_CHECK_BOOL(fec_enable);
5481
5482         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5483         PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5484
5485         PIPE_CONF_CHECK_X(gmch_pfit.control);
5486         /* pfit ratios are autocomputed by the hw on gen4+ */
5487         if (DISPLAY_VER(dev_priv) < 4)
5488                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5489         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5490
5491         /*
5492          * Changing the EDP transcoder input mux
5493          * (A_ONOFF vs. A_ON) requires a full modeset.
5494          */
5495         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5496
5497         if (!fastset) {
5498                 PIPE_CONF_CHECK_RECT(pipe_src);
5499
5500                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5501                 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5502
5503                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5504                 PIPE_CONF_CHECK_I(pixel_rate);
5505
5506                 PIPE_CONF_CHECK_X(gamma_mode);
5507                 if (IS_CHERRYVIEW(dev_priv))
5508                         PIPE_CONF_CHECK_X(cgm_mode);
5509                 else
5510                         PIPE_CONF_CHECK_X(csc_mode);
5511                 PIPE_CONF_CHECK_BOOL(gamma_enable);
5512                 PIPE_CONF_CHECK_BOOL(csc_enable);
5513
5514                 PIPE_CONF_CHECK_I(linetime);
5515                 PIPE_CONF_CHECK_I(ips_linetime);
5516
5517                 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5518                 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5519
5520                 PIPE_CONF_CHECK_CSC(csc);
5521                 PIPE_CONF_CHECK_CSC(output_csc);
5522
5523                 if (current_config->active_planes) {
5524                         PIPE_CONF_CHECK_BOOL(has_psr);
5525                         PIPE_CONF_CHECK_BOOL(has_psr2);
5526                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5527                         PIPE_CONF_CHECK_I(dc3co_exitline);
5528                 }
5529         }
5530
5531         PIPE_CONF_CHECK_BOOL(double_wide);
5532
5533         if (dev_priv->display.dpll.mgr) {
5534                 PIPE_CONF_CHECK_P(shared_dpll);
5535
5536                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5537                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5538                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5539                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5540                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5541                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5542                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5543                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5544                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5545                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5546                 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5547                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5548                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5549                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5550                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5551                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5552                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5553                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5554                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5555                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5556                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5557                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5558                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5559                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5560                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5561                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5562                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5563                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5564                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5565                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5566                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5567                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5568         }
5569
5570         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5571         PIPE_CONF_CHECK_X(dsi_pll.div);
5572
5573         if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5574                 PIPE_CONF_CHECK_I(pipe_bpp);
5575
5576         if (!fastset || !pipe_config->seamless_m_n) {
5577                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5578                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5579         }
5580         PIPE_CONF_CHECK_I(port_clock);
5581
5582         PIPE_CONF_CHECK_I(min_voltage_level);
5583
5584         if (current_config->has_psr || pipe_config->has_psr)
5585                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5586                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5587         else
5588                 PIPE_CONF_CHECK_X(infoframes.enable);
5589
5590         PIPE_CONF_CHECK_X(infoframes.gcp);
5591         PIPE_CONF_CHECK_INFOFRAME(avi);
5592         PIPE_CONF_CHECK_INFOFRAME(spd);
5593         PIPE_CONF_CHECK_INFOFRAME(hdmi);
5594         PIPE_CONF_CHECK_INFOFRAME(drm);
5595         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5596
5597         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5598         PIPE_CONF_CHECK_I(master_transcoder);
5599         PIPE_CONF_CHECK_X(bigjoiner_pipes);
5600
5601         PIPE_CONF_CHECK_I(dsc.compression_enable);
5602         PIPE_CONF_CHECK_I(dsc.dsc_split);
5603         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5604
5605         PIPE_CONF_CHECK_BOOL(splitter.enable);
5606         PIPE_CONF_CHECK_I(splitter.link_count);
5607         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5608
5609         if (!fastset)
5610                 PIPE_CONF_CHECK_BOOL(vrr.enable);
5611         PIPE_CONF_CHECK_I(vrr.vmin);
5612         PIPE_CONF_CHECK_I(vrr.vmax);
5613         PIPE_CONF_CHECK_I(vrr.flipline);
5614         PIPE_CONF_CHECK_I(vrr.pipeline_full);
5615         PIPE_CONF_CHECK_I(vrr.guardband);
5616
5617 #undef PIPE_CONF_CHECK_X
5618 #undef PIPE_CONF_CHECK_I
5619 #undef PIPE_CONF_CHECK_BOOL
5620 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5621 #undef PIPE_CONF_CHECK_P
5622 #undef PIPE_CONF_CHECK_FLAGS
5623 #undef PIPE_CONF_CHECK_COLOR_LUT
5624 #undef PIPE_CONF_CHECK_TIMINGS
5625 #undef PIPE_CONF_CHECK_RECT
5626 #undef PIPE_CONF_QUIRK
5627
5628         return ret;
5629 }
5630
5631 static void
5632 intel_verify_planes(struct intel_atomic_state *state)
5633 {
5634         struct intel_plane *plane;
5635         const struct intel_plane_state *plane_state;
5636         int i;
5637
5638         for_each_new_intel_plane_in_state(state, plane,
5639                                           plane_state, i)
5640                 assert_plane(plane, plane_state->planar_slave ||
5641                              plane_state->uapi.visible);
5642 }
5643
5644 int intel_modeset_all_pipes(struct intel_atomic_state *state,
5645                             const char *reason)
5646 {
5647         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5648         struct intel_crtc *crtc;
5649
5650         /*
5651          * Add all pipes to the state, and force
5652          * a modeset on all the active ones.
5653          */
5654         for_each_intel_crtc(&dev_priv->drm, crtc) {
5655                 struct intel_crtc_state *crtc_state;
5656                 int ret;
5657
5658                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5659                 if (IS_ERR(crtc_state))
5660                         return PTR_ERR(crtc_state);
5661
5662                 if (!crtc_state->hw.active ||
5663                     intel_crtc_needs_modeset(crtc_state))
5664                         continue;
5665
5666                 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5667                             crtc->base.base.id, crtc->base.name, reason);
5668
5669                 crtc_state->uapi.mode_changed = true;
5670                 crtc_state->update_pipe = false;
5671
5672                 ret = drm_atomic_add_affected_connectors(&state->base,
5673                                                          &crtc->base);
5674                 if (ret)
5675                         return ret;
5676
5677                 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5678                 if (ret)
5679                         return ret;
5680
5681                 ret = intel_atomic_add_affected_planes(state, crtc);
5682                 if (ret)
5683                         return ret;
5684
5685                 crtc_state->update_planes |= crtc_state->active_planes;
5686                 crtc_state->async_flip_planes = 0;
5687                 crtc_state->do_async_flip = false;
5688         }
5689
5690         return 0;
5691 }
5692
5693 /*
5694  * This implements the workaround described in the "notes" section of the mode
5695  * set sequence documentation. When going from no pipes or single pipe to
5696  * multiple pipes, and planes are enabled after the pipe, we need to wait at
5697  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5698  */
5699 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5700 {
5701         struct intel_crtc_state *crtc_state;
5702         struct intel_crtc *crtc;
5703         struct intel_crtc_state *first_crtc_state = NULL;
5704         struct intel_crtc_state *other_crtc_state = NULL;
5705         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5706         int i;
5707
5708         /* look at all crtc's that are going to be enabled in during modeset */
5709         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5710                 if (!crtc_state->hw.active ||
5711                     !intel_crtc_needs_modeset(crtc_state))
5712                         continue;
5713
5714                 if (first_crtc_state) {
5715                         other_crtc_state = crtc_state;
5716                         break;
5717                 } else {
5718                         first_crtc_state = crtc_state;
5719                         first_pipe = crtc->pipe;
5720                 }
5721         }
5722
5723         /* No workaround needed? */
5724         if (!first_crtc_state)
5725                 return 0;
5726
5727         /* w/a possibly needed, check how many crtc's are already enabled. */
5728         for_each_intel_crtc(state->base.dev, crtc) {
5729                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5730                 if (IS_ERR(crtc_state))
5731                         return PTR_ERR(crtc_state);
5732
5733                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
5734
5735                 if (!crtc_state->hw.active ||
5736                     intel_crtc_needs_modeset(crtc_state))
5737                         continue;
5738
5739                 /* 2 or more enabled crtcs means no need for w/a */
5740                 if (enabled_pipe != INVALID_PIPE)
5741                         return 0;
5742
5743                 enabled_pipe = crtc->pipe;
5744         }
5745
5746         if (enabled_pipe != INVALID_PIPE)
5747                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
5748         else if (other_crtc_state)
5749                 other_crtc_state->hsw_workaround_pipe = first_pipe;
5750
5751         return 0;
5752 }
5753
5754 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
5755                            u8 active_pipes)
5756 {
5757         const struct intel_crtc_state *crtc_state;
5758         struct intel_crtc *crtc;
5759         int i;
5760
5761         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5762                 if (crtc_state->hw.active)
5763                         active_pipes |= BIT(crtc->pipe);
5764                 else
5765                         active_pipes &= ~BIT(crtc->pipe);
5766         }
5767
5768         return active_pipes;
5769 }
5770
5771 static int intel_modeset_checks(struct intel_atomic_state *state)
5772 {
5773         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5774
5775         state->modeset = true;
5776
5777         if (IS_HASWELL(dev_priv))
5778                 return hsw_mode_set_planes_workaround(state);
5779
5780         return 0;
5781 }
5782
5783 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
5784                                      struct intel_crtc_state *new_crtc_state)
5785 {
5786         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
5787                 return;
5788
5789         new_crtc_state->uapi.mode_changed = false;
5790         if (!intel_crtc_needs_modeset(new_crtc_state))
5791                 new_crtc_state->update_pipe = true;
5792 }
5793
5794 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
5795                                           struct intel_crtc *crtc,
5796                                           u8 plane_ids_mask)
5797 {
5798         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5799         struct intel_plane *plane;
5800
5801         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5802                 struct intel_plane_state *plane_state;
5803
5804                 if ((plane_ids_mask & BIT(plane->id)) == 0)
5805                         continue;
5806
5807                 plane_state = intel_atomic_get_plane_state(state, plane);
5808                 if (IS_ERR(plane_state))
5809                         return PTR_ERR(plane_state);
5810         }
5811
5812         return 0;
5813 }
5814
5815 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
5816                                      struct intel_crtc *crtc)
5817 {
5818         const struct intel_crtc_state *old_crtc_state =
5819                 intel_atomic_get_old_crtc_state(state, crtc);
5820         const struct intel_crtc_state *new_crtc_state =
5821                 intel_atomic_get_new_crtc_state(state, crtc);
5822
5823         return intel_crtc_add_planes_to_state(state, crtc,
5824                                               old_crtc_state->enabled_planes |
5825                                               new_crtc_state->enabled_planes);
5826 }
5827
5828 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
5829 {
5830         /* See {hsw,vlv,ivb}_plane_ratio() */
5831         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
5832                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5833                 IS_IVYBRIDGE(dev_priv);
5834 }
5835
5836 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
5837                                            struct intel_crtc *crtc,
5838                                            struct intel_crtc *other)
5839 {
5840         const struct intel_plane_state *plane_state;
5841         struct intel_plane *plane;
5842         u8 plane_ids = 0;
5843         int i;
5844
5845         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5846                 if (plane->pipe == crtc->pipe)
5847                         plane_ids |= BIT(plane->id);
5848         }
5849
5850         return intel_crtc_add_planes_to_state(state, other, plane_ids);
5851 }
5852
5853 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
5854 {
5855         struct drm_i915_private *i915 = to_i915(state->base.dev);
5856         const struct intel_crtc_state *crtc_state;
5857         struct intel_crtc *crtc;
5858         int i;
5859
5860         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5861                 struct intel_crtc *other;
5862
5863                 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
5864                                                  crtc_state->bigjoiner_pipes) {
5865                         int ret;
5866
5867                         if (crtc == other)
5868                                 continue;
5869
5870                         ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
5871                         if (ret)
5872                                 return ret;
5873                 }
5874         }
5875
5876         return 0;
5877 }
5878
5879 static int intel_atomic_check_planes(struct intel_atomic_state *state)
5880 {
5881         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5882         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
5883         struct intel_plane_state *plane_state;
5884         struct intel_plane *plane;
5885         struct intel_crtc *crtc;
5886         int i, ret;
5887
5888         ret = icl_add_linked_planes(state);
5889         if (ret)
5890                 return ret;
5891
5892         ret = intel_bigjoiner_add_affected_planes(state);
5893         if (ret)
5894                 return ret;
5895
5896         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5897                 ret = intel_plane_atomic_check(state, plane);
5898                 if (ret) {
5899                         drm_dbg_atomic(&dev_priv->drm,
5900                                        "[PLANE:%d:%s] atomic driver check failed\n",
5901                                        plane->base.base.id, plane->base.name);
5902                         return ret;
5903                 }
5904         }
5905
5906         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5907                                             new_crtc_state, i) {
5908                 u8 old_active_planes, new_active_planes;
5909
5910                 ret = icl_check_nv12_planes(new_crtc_state);
5911                 if (ret)
5912                         return ret;
5913
5914                 /*
5915                  * On some platforms the number of active planes affects
5916                  * the planes' minimum cdclk calculation. Add such planes
5917                  * to the state before we compute the minimum cdclk.
5918                  */
5919                 if (!active_planes_affects_min_cdclk(dev_priv))
5920                         continue;
5921
5922                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5923                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5924
5925                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
5926                         continue;
5927
5928                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
5929                 if (ret)
5930                         return ret;
5931         }
5932
5933         return 0;
5934 }
5935
5936 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
5937 {
5938         struct intel_crtc_state *crtc_state;
5939         struct intel_crtc *crtc;
5940         int i;
5941
5942         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5943                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5944                 int ret;
5945
5946                 ret = intel_crtc_atomic_check(state, crtc);
5947                 if (ret) {
5948                         drm_dbg_atomic(&i915->drm,
5949                                        "[CRTC:%d:%s] atomic driver check failed\n",
5950                                        crtc->base.base.id, crtc->base.name);
5951                         return ret;
5952                 }
5953         }
5954
5955         return 0;
5956 }
5957
5958 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
5959                                                u8 transcoders)
5960 {
5961         const struct intel_crtc_state *new_crtc_state;
5962         struct intel_crtc *crtc;
5963         int i;
5964
5965         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5966                 if (new_crtc_state->hw.enable &&
5967                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
5968                     intel_crtc_needs_modeset(new_crtc_state))
5969                         return true;
5970         }
5971
5972         return false;
5973 }
5974
5975 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
5976                                      u8 pipes)
5977 {
5978         const struct intel_crtc_state *new_crtc_state;
5979         struct intel_crtc *crtc;
5980         int i;
5981
5982         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5983                 if (new_crtc_state->hw.enable &&
5984                     pipes & BIT(crtc->pipe) &&
5985                     intel_crtc_needs_modeset(new_crtc_state))
5986                         return true;
5987         }
5988
5989         return false;
5990 }
5991
5992 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
5993                                         struct intel_crtc *master_crtc)
5994 {
5995         struct drm_i915_private *i915 = to_i915(state->base.dev);
5996         struct intel_crtc_state *master_crtc_state =
5997                 intel_atomic_get_new_crtc_state(state, master_crtc);
5998         struct intel_crtc *slave_crtc;
5999
6000         if (!master_crtc_state->bigjoiner_pipes)
6001                 return 0;
6002
6003         /* sanity check */
6004         if (drm_WARN_ON(&i915->drm,
6005                         master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6006                 return -EINVAL;
6007
6008         if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6009                 drm_dbg_kms(&i915->drm,
6010                             "[CRTC:%d:%s] Cannot act as big joiner master "
6011                             "(need 0x%x as pipes, only 0x%x possible)\n",
6012                             master_crtc->base.base.id, master_crtc->base.name,
6013                             master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6014                 return -EINVAL;
6015         }
6016
6017         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6018                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6019                 struct intel_crtc_state *slave_crtc_state;
6020                 int ret;
6021
6022                 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6023                 if (IS_ERR(slave_crtc_state))
6024                         return PTR_ERR(slave_crtc_state);
6025
6026                 /* master being enabled, slave was already configured? */
6027                 if (slave_crtc_state->uapi.enable) {
6028                         drm_dbg_kms(&i915->drm,
6029                                     "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6030                                     "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6031                                     slave_crtc->base.base.id, slave_crtc->base.name,
6032                                     master_crtc->base.base.id, master_crtc->base.name);
6033                         return -EINVAL;
6034                 }
6035
6036                 /*
6037                  * The state copy logic assumes the master crtc gets processed
6038                  * before the slave crtc during the main compute_config loop.
6039                  * This works because the crtcs are created in pipe order,
6040                  * and the hardware requires master pipe < slave pipe as well.
6041                  * Should that change we need to rethink the logic.
6042                  */
6043                 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6044                             drm_crtc_index(&slave_crtc->base)))
6045                         return -EINVAL;
6046
6047                 drm_dbg_kms(&i915->drm,
6048                             "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6049                             slave_crtc->base.base.id, slave_crtc->base.name,
6050                             master_crtc->base.base.id, master_crtc->base.name);
6051
6052                 slave_crtc_state->bigjoiner_pipes =
6053                         master_crtc_state->bigjoiner_pipes;
6054
6055                 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6056                 if (ret)
6057                         return ret;
6058         }
6059
6060         return 0;
6061 }
6062
6063 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6064                                  struct intel_crtc *master_crtc)
6065 {
6066         struct drm_i915_private *i915 = to_i915(state->base.dev);
6067         struct intel_crtc_state *master_crtc_state =
6068                 intel_atomic_get_new_crtc_state(state, master_crtc);
6069         struct intel_crtc *slave_crtc;
6070
6071         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6072                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6073                 struct intel_crtc_state *slave_crtc_state =
6074                         intel_atomic_get_new_crtc_state(state, slave_crtc);
6075
6076                 slave_crtc_state->bigjoiner_pipes = 0;
6077
6078                 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6079         }
6080
6081         master_crtc_state->bigjoiner_pipes = 0;
6082 }
6083
6084 /**
6085  * DOC: asynchronous flip implementation
6086  *
6087  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6088  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6089  * Correspondingly, support is currently added for primary plane only.
6090  *
6091  * Async flip can only change the plane surface address, so anything else
6092  * changing is rejected from the intel_async_flip_check_hw() function.
6093  * Once this check is cleared, flip done interrupt is enabled using
6094  * the intel_crtc_enable_flip_done() function.
6095  *
6096  * As soon as the surface address register is written, flip done interrupt is
6097  * generated and the requested events are sent to the usersapce in the interrupt
6098  * handler itself. The timestamp and sequence sent during the flip done event
6099  * correspond to the last vblank and have no relation to the actual time when
6100  * the flip done event was sent.
6101  */
6102 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6103                                        struct intel_crtc *crtc)
6104 {
6105         struct drm_i915_private *i915 = to_i915(state->base.dev);
6106         const struct intel_crtc_state *new_crtc_state =
6107                 intel_atomic_get_new_crtc_state(state, crtc);
6108         const struct intel_plane_state *old_plane_state;
6109         struct intel_plane_state *new_plane_state;
6110         struct intel_plane *plane;
6111         int i;
6112
6113         if (!new_crtc_state->uapi.async_flip)
6114                 return 0;
6115
6116         if (!new_crtc_state->uapi.active) {
6117                 drm_dbg_kms(&i915->drm,
6118                             "[CRTC:%d:%s] not active\n",
6119                             crtc->base.base.id, crtc->base.name);
6120                 return -EINVAL;
6121         }
6122
6123         if (intel_crtc_needs_modeset(new_crtc_state)) {
6124                 drm_dbg_kms(&i915->drm,
6125                             "[CRTC:%d:%s] modeset required\n",
6126                             crtc->base.base.id, crtc->base.name);
6127                 return -EINVAL;
6128         }
6129
6130         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6131                                              new_plane_state, i) {
6132                 if (plane->pipe != crtc->pipe)
6133                         continue;
6134
6135                 /*
6136                  * TODO: Async flip is only supported through the page flip IOCTL
6137                  * as of now. So support currently added for primary plane only.
6138                  * Support for other planes on platforms on which supports
6139                  * this(vlv/chv and icl+) should be added when async flip is
6140                  * enabled in the atomic IOCTL path.
6141                  */
6142                 if (!plane->async_flip) {
6143                         drm_dbg_kms(&i915->drm,
6144                                     "[PLANE:%d:%s] async flip not supported\n",
6145                                     plane->base.base.id, plane->base.name);
6146                         return -EINVAL;
6147                 }
6148
6149                 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6150                         drm_dbg_kms(&i915->drm,
6151                                     "[PLANE:%d:%s] no old or new framebuffer\n",
6152                                     plane->base.base.id, plane->base.name);
6153                         return -EINVAL;
6154                 }
6155         }
6156
6157         return 0;
6158 }
6159
6160 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6161 {
6162         struct drm_i915_private *i915 = to_i915(state->base.dev);
6163         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6164         const struct intel_plane_state *new_plane_state, *old_plane_state;
6165         struct intel_plane *plane;
6166         int i;
6167
6168         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6169         new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6170
6171         if (!new_crtc_state->uapi.async_flip)
6172                 return 0;
6173
6174         if (!new_crtc_state->hw.active) {
6175                 drm_dbg_kms(&i915->drm,
6176                             "[CRTC:%d:%s] not active\n",
6177                             crtc->base.base.id, crtc->base.name);
6178                 return -EINVAL;
6179         }
6180
6181         if (intel_crtc_needs_modeset(new_crtc_state)) {
6182                 drm_dbg_kms(&i915->drm,
6183                             "[CRTC:%d:%s] modeset required\n",
6184                             crtc->base.base.id, crtc->base.name);
6185                 return -EINVAL;
6186         }
6187
6188         if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6189                 drm_dbg_kms(&i915->drm,
6190                             "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6191                             crtc->base.base.id, crtc->base.name);
6192                 return -EINVAL;
6193         }
6194
6195         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6196                                              new_plane_state, i) {
6197                 if (plane->pipe != crtc->pipe)
6198                         continue;
6199
6200                 /*
6201                  * Only async flip capable planes should be in the state
6202                  * if we're really about to ask the hardware to perform
6203                  * an async flip. We should never get this far otherwise.
6204                  */
6205                 if (drm_WARN_ON(&i915->drm,
6206                                 new_crtc_state->do_async_flip && !plane->async_flip))
6207                         return -EINVAL;
6208
6209                 /*
6210                  * Only check async flip capable planes other planes
6211                  * may be involved in the initial commit due to
6212                  * the wm0/ddb optimization.
6213                  *
6214                  * TODO maybe should track which planes actually
6215                  * were requested to do the async flip...
6216                  */
6217                 if (!plane->async_flip)
6218                         continue;
6219
6220                 /*
6221                  * FIXME: This check is kept generic for all platforms.
6222                  * Need to verify this for all gen9 platforms to enable
6223                  * this selectively if required.
6224                  */
6225                 switch (new_plane_state->hw.fb->modifier) {
6226                 case I915_FORMAT_MOD_X_TILED:
6227                 case I915_FORMAT_MOD_Y_TILED:
6228                 case I915_FORMAT_MOD_Yf_TILED:
6229                 case I915_FORMAT_MOD_4_TILED:
6230                         break;
6231                 default:
6232                         drm_dbg_kms(&i915->drm,
6233                                     "[PLANE:%d:%s] Modifier does not support async flips\n",
6234                                     plane->base.base.id, plane->base.name);
6235                         return -EINVAL;
6236                 }
6237
6238                 if (new_plane_state->hw.fb->format->num_planes > 1) {
6239                         drm_dbg_kms(&i915->drm,
6240                                     "[PLANE:%d:%s] Planar formats do not support async flips\n",
6241                                     plane->base.base.id, plane->base.name);
6242                         return -EINVAL;
6243                 }
6244
6245                 if (old_plane_state->view.color_plane[0].mapping_stride !=
6246                     new_plane_state->view.color_plane[0].mapping_stride) {
6247                         drm_dbg_kms(&i915->drm,
6248                                     "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6249                                     plane->base.base.id, plane->base.name);
6250                         return -EINVAL;
6251                 }
6252
6253                 if (old_plane_state->hw.fb->modifier !=
6254                     new_plane_state->hw.fb->modifier) {
6255                         drm_dbg_kms(&i915->drm,
6256                                     "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6257                                     plane->base.base.id, plane->base.name);
6258                         return -EINVAL;
6259                 }
6260
6261                 if (old_plane_state->hw.fb->format !=
6262                     new_plane_state->hw.fb->format) {
6263                         drm_dbg_kms(&i915->drm,
6264                                     "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6265                                     plane->base.base.id, plane->base.name);
6266                         return -EINVAL;
6267                 }
6268
6269                 if (old_plane_state->hw.rotation !=
6270                     new_plane_state->hw.rotation) {
6271                         drm_dbg_kms(&i915->drm,
6272                                     "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6273                                     plane->base.base.id, plane->base.name);
6274                         return -EINVAL;
6275                 }
6276
6277                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6278                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6279                         drm_dbg_kms(&i915->drm,
6280                                     "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6281                                     plane->base.base.id, plane->base.name);
6282                         return -EINVAL;
6283                 }
6284
6285                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6286                         drm_dbg_kms(&i915->drm,
6287                                     "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6288                                     plane->base.base.id, plane->base.name);
6289                         return -EINVAL;
6290                 }
6291
6292                 if (old_plane_state->hw.pixel_blend_mode !=
6293                     new_plane_state->hw.pixel_blend_mode) {
6294                         drm_dbg_kms(&i915->drm,
6295                                     "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6296                                     plane->base.base.id, plane->base.name);
6297                         return -EINVAL;
6298                 }
6299
6300                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6301                         drm_dbg_kms(&i915->drm,
6302                                     "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6303                                     plane->base.base.id, plane->base.name);
6304                         return -EINVAL;
6305                 }
6306
6307                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6308                         drm_dbg_kms(&i915->drm,
6309                                     "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6310                                     plane->base.base.id, plane->base.name);
6311                         return -EINVAL;
6312                 }
6313
6314                 /* plane decryption is allow to change only in synchronous flips */
6315                 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6316                         drm_dbg_kms(&i915->drm,
6317                                     "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6318                                     plane->base.base.id, plane->base.name);
6319                         return -EINVAL;
6320                 }
6321         }
6322
6323         return 0;
6324 }
6325
6326 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6327 {
6328         struct drm_i915_private *i915 = to_i915(state->base.dev);
6329         struct intel_crtc_state *crtc_state;
6330         struct intel_crtc *crtc;
6331         u8 affected_pipes = 0;
6332         u8 modeset_pipes = 0;
6333         int i;
6334
6335         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6336                 affected_pipes |= crtc_state->bigjoiner_pipes;
6337                 if (intel_crtc_needs_modeset(crtc_state))
6338                         modeset_pipes |= crtc_state->bigjoiner_pipes;
6339         }
6340
6341         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6342                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6343                 if (IS_ERR(crtc_state))
6344                         return PTR_ERR(crtc_state);
6345         }
6346
6347         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6348                 int ret;
6349
6350                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6351
6352                 crtc_state->uapi.mode_changed = true;
6353
6354                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6355                 if (ret)
6356                         return ret;
6357
6358                 ret = intel_atomic_add_affected_planes(state, crtc);
6359                 if (ret)
6360                         return ret;
6361         }
6362
6363         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6364                 /* Kill old bigjoiner link, we may re-establish afterwards */
6365                 if (intel_crtc_needs_modeset(crtc_state) &&
6366                     intel_crtc_is_bigjoiner_master(crtc_state))
6367                         kill_bigjoiner_slave(state, crtc);
6368         }
6369
6370         return 0;
6371 }
6372
6373 /**
6374  * intel_atomic_check - validate state object
6375  * @dev: drm device
6376  * @_state: state to validate
6377  */
6378 int intel_atomic_check(struct drm_device *dev,
6379                        struct drm_atomic_state *_state)
6380 {
6381         struct drm_i915_private *dev_priv = to_i915(dev);
6382         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6383         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6384         struct intel_crtc *crtc;
6385         int ret, i;
6386         bool any_ms = false;
6387
6388         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6389                                             new_crtc_state, i) {
6390                 /*
6391                  * crtc's state no longer considered to be inherited
6392                  * after the first userspace/client initiated commit.
6393                  */
6394                 if (!state->internal)
6395                         new_crtc_state->inherited = false;
6396
6397                 if (new_crtc_state->inherited != old_crtc_state->inherited)
6398                         new_crtc_state->uapi.mode_changed = true;
6399
6400                 if (new_crtc_state->uapi.scaling_filter !=
6401                     old_crtc_state->uapi.scaling_filter)
6402                         new_crtc_state->uapi.mode_changed = true;
6403         }
6404
6405         intel_vrr_check_modeset(state);
6406
6407         ret = drm_atomic_helper_check_modeset(dev, &state->base);
6408         if (ret)
6409                 goto fail;
6410
6411         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6412                 ret = intel_async_flip_check_uapi(state, crtc);
6413                 if (ret)
6414                         return ret;
6415         }
6416
6417         ret = intel_bigjoiner_add_affected_crtcs(state);
6418         if (ret)
6419                 goto fail;
6420
6421         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6422                                             new_crtc_state, i) {
6423                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6424                         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6425                                 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6426                         else
6427                                 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6428                         continue;
6429                 }
6430
6431                 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6432                         drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6433                         continue;
6434                 }
6435
6436                 ret = intel_crtc_prepare_cleared_state(state, crtc);
6437                 if (ret)
6438                         goto fail;
6439
6440                 if (!new_crtc_state->hw.enable)
6441                         continue;
6442
6443                 ret = intel_modeset_pipe_config(state, crtc);
6444                 if (ret)
6445                         goto fail;
6446
6447                 ret = intel_atomic_check_bigjoiner(state, crtc);
6448                 if (ret)
6449                         goto fail;
6450         }
6451
6452         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6453                                             new_crtc_state, i) {
6454                 if (!intel_crtc_needs_modeset(new_crtc_state))
6455                         continue;
6456
6457                 if (new_crtc_state->hw.enable) {
6458                         ret = intel_modeset_pipe_config_late(state, crtc);
6459                         if (ret)
6460                                 goto fail;
6461                 }
6462
6463                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6464         }
6465
6466         /**
6467          * Check if fastset is allowed by external dependencies like other
6468          * pipes and transcoders.
6469          *
6470          * Right now it only forces a fullmodeset when the MST master
6471          * transcoder did not changed but the pipe of the master transcoder
6472          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6473          * in case of port synced crtcs, if one of the synced crtcs
6474          * needs a full modeset, all other synced crtcs should be
6475          * forced a full modeset.
6476          */
6477         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6478                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6479                         continue;
6480
6481                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6482                         enum transcoder master = new_crtc_state->mst_master_transcoder;
6483
6484                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6485                                 new_crtc_state->uapi.mode_changed = true;
6486                                 new_crtc_state->update_pipe = false;
6487                         }
6488                 }
6489
6490                 if (is_trans_port_sync_mode(new_crtc_state)) {
6491                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
6492
6493                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6494                                 trans |= BIT(new_crtc_state->master_transcoder);
6495
6496                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
6497                                 new_crtc_state->uapi.mode_changed = true;
6498                                 new_crtc_state->update_pipe = false;
6499                         }
6500                 }
6501
6502                 if (new_crtc_state->bigjoiner_pipes) {
6503                         if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6504                                 new_crtc_state->uapi.mode_changed = true;
6505                                 new_crtc_state->update_pipe = false;
6506                         }
6507                 }
6508         }
6509
6510         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6511                                             new_crtc_state, i) {
6512                 if (!intel_crtc_needs_modeset(new_crtc_state))
6513                         continue;
6514
6515                 any_ms = true;
6516
6517                 intel_release_shared_dplls(state, crtc);
6518         }
6519
6520         if (any_ms && !check_digital_port_conflicts(state)) {
6521                 drm_dbg_kms(&dev_priv->drm,
6522                             "rejecting conflicting digital port configuration\n");
6523                 ret = -EINVAL;
6524                 goto fail;
6525         }
6526
6527         ret = drm_dp_mst_atomic_check(&state->base);
6528         if (ret)
6529                 goto fail;
6530
6531         ret = intel_atomic_check_planes(state);
6532         if (ret)
6533                 goto fail;
6534
6535         ret = intel_compute_global_watermarks(state);
6536         if (ret)
6537                 goto fail;
6538
6539         ret = intel_bw_atomic_check(state);
6540         if (ret)
6541                 goto fail;
6542
6543         ret = intel_cdclk_atomic_check(state, &any_ms);
6544         if (ret)
6545                 goto fail;
6546
6547         if (intel_any_crtc_needs_modeset(state))
6548                 any_ms = true;
6549
6550         if (any_ms) {
6551                 ret = intel_modeset_checks(state);
6552                 if (ret)
6553                         goto fail;
6554
6555                 ret = intel_modeset_calc_cdclk(state);
6556                 if (ret)
6557                         return ret;
6558         }
6559
6560         ret = intel_atomic_check_crtcs(state);
6561         if (ret)
6562                 goto fail;
6563
6564         ret = intel_fbc_atomic_check(state);
6565         if (ret)
6566                 goto fail;
6567
6568         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6569                                             new_crtc_state, i) {
6570                 intel_color_assert_luts(new_crtc_state);
6571
6572                 ret = intel_async_flip_check_hw(state, crtc);
6573                 if (ret)
6574                         goto fail;
6575
6576                 /* Either full modeset or fastset (or neither), never both */
6577                 drm_WARN_ON(&dev_priv->drm,
6578                             intel_crtc_needs_modeset(new_crtc_state) &&
6579                             intel_crtc_needs_fastset(new_crtc_state));
6580
6581                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6582                     !intel_crtc_needs_fastset(new_crtc_state))
6583                         continue;
6584
6585                 intel_crtc_state_dump(new_crtc_state, state,
6586                                       intel_crtc_needs_modeset(new_crtc_state) ?
6587                                       "modeset" : "fastset");
6588         }
6589
6590         return 0;
6591
6592  fail:
6593         if (ret == -EDEADLK)
6594                 return ret;
6595
6596         /*
6597          * FIXME would probably be nice to know which crtc specifically
6598          * caused the failure, in cases where we can pinpoint it.
6599          */
6600         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6601                                             new_crtc_state, i)
6602                 intel_crtc_state_dump(new_crtc_state, state, "failed");
6603
6604         return ret;
6605 }
6606
6607 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6608 {
6609         struct intel_crtc_state *crtc_state;
6610         struct intel_crtc *crtc;
6611         int i, ret;
6612
6613         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6614         if (ret < 0)
6615                 return ret;
6616
6617         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6618                 if (intel_crtc_needs_color_update(crtc_state))
6619                         intel_color_prepare_commit(crtc_state);
6620         }
6621
6622         return 0;
6623 }
6624
6625 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6626                                   struct intel_crtc_state *crtc_state)
6627 {
6628         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6629
6630         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6631                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6632
6633         if (crtc_state->has_pch_encoder) {
6634                 enum pipe pch_transcoder =
6635                         intel_crtc_pch_transcoder(crtc);
6636
6637                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6638         }
6639 }
6640
6641 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6642                                const struct intel_crtc_state *new_crtc_state)
6643 {
6644         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6645         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6646
6647         /*
6648          * Update pipe size and adjust fitter if needed: the reason for this is
6649          * that in compute_mode_changes we check the native mode (not the pfit
6650          * mode) to see if we can flip rather than do a full mode set. In the
6651          * fastboot case, we'll flip, but if we don't update the pipesrc and
6652          * pfit state, we'll end up with a big fb scanned out into the wrong
6653          * sized surface.
6654          */
6655         intel_set_pipe_src_size(new_crtc_state);
6656
6657         /* on skylake this is done by detaching scalers */
6658         if (DISPLAY_VER(dev_priv) >= 9) {
6659                 if (new_crtc_state->pch_pfit.enabled)
6660                         skl_pfit_enable(new_crtc_state);
6661         } else if (HAS_PCH_SPLIT(dev_priv)) {
6662                 if (new_crtc_state->pch_pfit.enabled)
6663                         ilk_pfit_enable(new_crtc_state);
6664                 else if (old_crtc_state->pch_pfit.enabled)
6665                         ilk_pfit_disable(old_crtc_state);
6666         }
6667
6668         /*
6669          * The register is supposedly single buffered so perhaps
6670          * not 100% correct to do this here. But SKL+ calculate
6671          * this based on the adjust pixel rate so pfit changes do
6672          * affect it and so it must be updated for fastsets.
6673          * HSW/BDW only really need this here for fastboot, after
6674          * that the value should not change without a full modeset.
6675          */
6676         if (DISPLAY_VER(dev_priv) >= 9 ||
6677             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6678                 hsw_set_linetime_wm(new_crtc_state);
6679
6680         if (new_crtc_state->seamless_m_n)
6681                 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6682                                                &new_crtc_state->dp_m_n);
6683 }
6684
6685 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6686                                    struct intel_crtc *crtc)
6687 {
6688         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6689         const struct intel_crtc_state *old_crtc_state =
6690                 intel_atomic_get_old_crtc_state(state, crtc);
6691         const struct intel_crtc_state *new_crtc_state =
6692                 intel_atomic_get_new_crtc_state(state, crtc);
6693         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6694
6695         /*
6696          * During modesets pipe configuration was programmed as the
6697          * CRTC was enabled.
6698          */
6699         if (!modeset) {
6700                 if (intel_crtc_needs_color_update(new_crtc_state))
6701                         intel_color_commit_arm(new_crtc_state);
6702
6703                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6704                         bdw_set_pipe_misc(new_crtc_state);
6705
6706                 if (intel_crtc_needs_fastset(new_crtc_state))
6707                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
6708         }
6709
6710         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6711
6712         intel_atomic_update_watermarks(state, crtc);
6713 }
6714
6715 static void commit_pipe_post_planes(struct intel_atomic_state *state,
6716                                     struct intel_crtc *crtc)
6717 {
6718         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6719         const struct intel_crtc_state *new_crtc_state =
6720                 intel_atomic_get_new_crtc_state(state, crtc);
6721
6722         /*
6723          * Disable the scaler(s) after the plane(s) so that we don't
6724          * get a catastrophic underrun even if the two operations
6725          * end up happening in two different frames.
6726          */
6727         if (DISPLAY_VER(dev_priv) >= 9 &&
6728             !intel_crtc_needs_modeset(new_crtc_state))
6729                 skl_detach_scalers(new_crtc_state);
6730 }
6731
6732 static void intel_enable_crtc(struct intel_atomic_state *state,
6733                               struct intel_crtc *crtc)
6734 {
6735         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6736         const struct intel_crtc_state *new_crtc_state =
6737                 intel_atomic_get_new_crtc_state(state, crtc);
6738
6739         if (!intel_crtc_needs_modeset(new_crtc_state))
6740                 return;
6741
6742         /* VRR will be enable later, if required */
6743         intel_crtc_update_active_timings(new_crtc_state, false);
6744
6745         dev_priv->display.funcs.display->crtc_enable(state, crtc);
6746
6747         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6748                 return;
6749
6750         /* vblanks work again, re-enable pipe CRC. */
6751         intel_crtc_enable_pipe_crc(crtc);
6752 }
6753
6754 static void intel_update_crtc(struct intel_atomic_state *state,
6755                               struct intel_crtc *crtc)
6756 {
6757         struct drm_i915_private *i915 = to_i915(state->base.dev);
6758         const struct intel_crtc_state *old_crtc_state =
6759                 intel_atomic_get_old_crtc_state(state, crtc);
6760         struct intel_crtc_state *new_crtc_state =
6761                 intel_atomic_get_new_crtc_state(state, crtc);
6762         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6763
6764         if (old_crtc_state->inherited ||
6765             intel_crtc_needs_modeset(new_crtc_state)) {
6766                 if (HAS_DPT(i915))
6767                         intel_dpt_configure(crtc);
6768         }
6769
6770         if (vrr_enabling(old_crtc_state, new_crtc_state)) {
6771                 intel_vrr_enable(new_crtc_state);
6772                 intel_crtc_update_active_timings(new_crtc_state,
6773                                                  new_crtc_state->vrr.enable);
6774         }
6775
6776         if (!modeset) {
6777                 if (new_crtc_state->preload_luts &&
6778                     intel_crtc_needs_color_update(new_crtc_state))
6779                         intel_color_load_luts(new_crtc_state);
6780
6781                 intel_pre_plane_update(state, crtc);
6782
6783                 if (intel_crtc_needs_fastset(new_crtc_state))
6784                         intel_encoders_update_pipe(state, crtc);
6785
6786                 if (DISPLAY_VER(i915) >= 11 &&
6787                     intel_crtc_needs_fastset(new_crtc_state))
6788                         icl_set_pipe_chicken(new_crtc_state);
6789         }
6790
6791         intel_fbc_update(state, crtc);
6792
6793         drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
6794
6795         if (!modeset &&
6796             intel_crtc_needs_color_update(new_crtc_state))
6797                 intel_color_commit_noarm(new_crtc_state);
6798
6799         intel_crtc_planes_update_noarm(state, crtc);
6800
6801         /* Perform vblank evasion around commit operation */
6802         intel_pipe_update_start(new_crtc_state);
6803
6804         commit_pipe_pre_planes(state, crtc);
6805
6806         intel_crtc_planes_update_arm(state, crtc);
6807
6808         commit_pipe_post_planes(state, crtc);
6809
6810         intel_pipe_update_end(new_crtc_state);
6811
6812         /*
6813          * We usually enable FIFO underrun interrupts as part of the
6814          * CRTC enable sequence during modesets.  But when we inherit a
6815          * valid pipe configuration from the BIOS we need to take care
6816          * of enabling them on the CRTC's first fastset.
6817          */
6818         if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
6819             old_crtc_state->inherited)
6820                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
6821 }
6822
6823 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
6824                                           struct intel_crtc_state *old_crtc_state,
6825                                           struct intel_crtc_state *new_crtc_state,
6826                                           struct intel_crtc *crtc)
6827 {
6828         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6829
6830         /*
6831          * We need to disable pipe CRC before disabling the pipe,
6832          * or we race against vblank off.
6833          */
6834         intel_crtc_disable_pipe_crc(crtc);
6835
6836         dev_priv->display.funcs.display->crtc_disable(state, crtc);
6837         crtc->active = false;
6838         intel_fbc_disable(crtc);
6839
6840         if (!new_crtc_state->hw.active)
6841                 intel_initial_watermarks(state, crtc);
6842 }
6843
6844 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
6845 {
6846         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
6847         struct intel_crtc *crtc;
6848         u32 handled = 0;
6849         int i;
6850
6851         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6852                                             new_crtc_state, i) {
6853                 if (!intel_crtc_needs_modeset(new_crtc_state))
6854                         continue;
6855
6856                 if (!old_crtc_state->hw.active)
6857                         continue;
6858
6859                 intel_pre_plane_update(state, crtc);
6860                 intel_crtc_disable_planes(state, crtc);
6861         }
6862
6863         /* Only disable port sync and MST slaves */
6864         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6865                                             new_crtc_state, i) {
6866                 if (!intel_crtc_needs_modeset(new_crtc_state))
6867                         continue;
6868
6869                 if (!old_crtc_state->hw.active)
6870                         continue;
6871
6872                 /* In case of Transcoder port Sync master slave CRTCs can be
6873                  * assigned in any order and we need to make sure that
6874                  * slave CRTCs are disabled first and then master CRTC since
6875                  * Slave vblanks are masked till Master Vblanks.
6876                  */
6877                 if (!is_trans_port_sync_slave(old_crtc_state) &&
6878                     !intel_dp_mst_is_slave_trans(old_crtc_state) &&
6879                     !intel_crtc_is_bigjoiner_slave(old_crtc_state))
6880                         continue;
6881
6882                 intel_old_crtc_state_disables(state, old_crtc_state,
6883                                               new_crtc_state, crtc);
6884                 handled |= BIT(crtc->pipe);
6885         }
6886
6887         /* Disable everything else left on */
6888         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6889                                             new_crtc_state, i) {
6890                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
6891                     (handled & BIT(crtc->pipe)))
6892                         continue;
6893
6894                 if (!old_crtc_state->hw.active)
6895                         continue;
6896
6897                 intel_old_crtc_state_disables(state, old_crtc_state,
6898                                               new_crtc_state, crtc);
6899         }
6900 }
6901
6902 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
6903 {
6904         struct intel_crtc_state *new_crtc_state;
6905         struct intel_crtc *crtc;
6906         int i;
6907
6908         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6909                 if (!new_crtc_state->hw.active)
6910                         continue;
6911
6912                 intel_enable_crtc(state, crtc);
6913                 intel_update_crtc(state, crtc);
6914         }
6915 }
6916
6917 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
6918 {
6919         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6920         struct intel_crtc *crtc;
6921         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6922         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
6923         u8 update_pipes = 0, modeset_pipes = 0;
6924         int i;
6925
6926         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6927                 enum pipe pipe = crtc->pipe;
6928
6929                 if (!new_crtc_state->hw.active)
6930                         continue;
6931
6932                 /* ignore allocations for crtc's that have been turned off. */
6933                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6934                         entries[pipe] = old_crtc_state->wm.skl.ddb;
6935                         update_pipes |= BIT(pipe);
6936                 } else {
6937                         modeset_pipes |= BIT(pipe);
6938                 }
6939         }
6940
6941         /*
6942          * Whenever the number of active pipes changes, we need to make sure we
6943          * update the pipes in the right order so that their ddb allocations
6944          * never overlap with each other between CRTC updates. Otherwise we'll
6945          * cause pipe underruns and other bad stuff.
6946          *
6947          * So first lets enable all pipes that do not need a fullmodeset as
6948          * those don't have any external dependency.
6949          */
6950         while (update_pipes) {
6951                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6952                                                     new_crtc_state, i) {
6953                         enum pipe pipe = crtc->pipe;
6954
6955                         if ((update_pipes & BIT(pipe)) == 0)
6956                                 continue;
6957
6958                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
6959                                                         entries, I915_MAX_PIPES, pipe))
6960                                 continue;
6961
6962                         entries[pipe] = new_crtc_state->wm.skl.ddb;
6963                         update_pipes &= ~BIT(pipe);
6964
6965                         intel_update_crtc(state, crtc);
6966
6967                         /*
6968                          * If this is an already active pipe, it's DDB changed,
6969                          * and this isn't the last pipe that needs updating
6970                          * then we need to wait for a vblank to pass for the
6971                          * new ddb allocation to take effect.
6972                          */
6973                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
6974                                                  &old_crtc_state->wm.skl.ddb) &&
6975                             (update_pipes | modeset_pipes))
6976                                 intel_crtc_wait_for_next_vblank(crtc);
6977                 }
6978         }
6979
6980         update_pipes = modeset_pipes;
6981
6982         /*
6983          * Enable all pipes that needs a modeset and do not depends on other
6984          * pipes
6985          */
6986         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6987                 enum pipe pipe = crtc->pipe;
6988
6989                 if ((modeset_pipes & BIT(pipe)) == 0)
6990                         continue;
6991
6992                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
6993                     is_trans_port_sync_master(new_crtc_state) ||
6994                     intel_crtc_is_bigjoiner_master(new_crtc_state))
6995                         continue;
6996
6997                 modeset_pipes &= ~BIT(pipe);
6998
6999                 intel_enable_crtc(state, crtc);
7000         }
7001
7002         /*
7003          * Then we enable all remaining pipes that depend on other
7004          * pipes: MST slaves and port sync masters, big joiner master
7005          */
7006         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7007                 enum pipe pipe = crtc->pipe;
7008
7009                 if ((modeset_pipes & BIT(pipe)) == 0)
7010                         continue;
7011
7012                 modeset_pipes &= ~BIT(pipe);
7013
7014                 intel_enable_crtc(state, crtc);
7015         }
7016
7017         /*
7018          * Finally we do the plane updates/etc. for all pipes that got enabled.
7019          */
7020         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7021                 enum pipe pipe = crtc->pipe;
7022
7023                 if ((update_pipes & BIT(pipe)) == 0)
7024                         continue;
7025
7026                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7027                                                                         entries, I915_MAX_PIPES, pipe));
7028
7029                 entries[pipe] = new_crtc_state->wm.skl.ddb;
7030                 update_pipes &= ~BIT(pipe);
7031
7032                 intel_update_crtc(state, crtc);
7033         }
7034
7035         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7036         drm_WARN_ON(&dev_priv->drm, update_pipes);
7037 }
7038
7039 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7040 {
7041         struct intel_atomic_state *state, *next;
7042         struct llist_node *freed;
7043
7044         freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7045         llist_for_each_entry_safe(state, next, freed, freed)
7046                 drm_atomic_state_put(&state->base);
7047 }
7048
7049 void intel_atomic_helper_free_state_worker(struct work_struct *work)
7050 {
7051         struct drm_i915_private *dev_priv =
7052                 container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7053
7054         intel_atomic_helper_free_state(dev_priv);
7055 }
7056
7057 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7058 {
7059         struct wait_queue_entry wait_fence, wait_reset;
7060         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7061
7062         init_wait_entry(&wait_fence, 0);
7063         init_wait_entry(&wait_reset, 0);
7064         for (;;) {
7065                 prepare_to_wait(&intel_state->commit_ready.wait,
7066                                 &wait_fence, TASK_UNINTERRUPTIBLE);
7067                 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7068                                               I915_RESET_MODESET),
7069                                 &wait_reset, TASK_UNINTERRUPTIBLE);
7070
7071
7072                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
7073                     test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7074                         break;
7075
7076                 schedule();
7077         }
7078         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7079         finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7080                                   I915_RESET_MODESET),
7081                     &wait_reset);
7082 }
7083
7084 static void intel_atomic_cleanup_work(struct work_struct *work)
7085 {
7086         struct intel_atomic_state *state =
7087                 container_of(work, struct intel_atomic_state, base.commit_work);
7088         struct drm_i915_private *i915 = to_i915(state->base.dev);
7089         struct intel_crtc_state *old_crtc_state;
7090         struct intel_crtc *crtc;
7091         int i;
7092
7093         for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7094                 intel_color_cleanup_commit(old_crtc_state);
7095
7096         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7097         drm_atomic_helper_commit_cleanup_done(&state->base);
7098         drm_atomic_state_put(&state->base);
7099
7100         intel_atomic_helper_free_state(i915);
7101 }
7102
7103 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7104 {
7105         struct drm_i915_private *i915 = to_i915(state->base.dev);
7106         struct intel_plane *plane;
7107         struct intel_plane_state *plane_state;
7108         int i;
7109
7110         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7111                 struct drm_framebuffer *fb = plane_state->hw.fb;
7112                 int cc_plane;
7113                 int ret;
7114
7115                 if (!fb)
7116                         continue;
7117
7118                 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7119                 if (cc_plane < 0)
7120                         continue;
7121
7122                 /*
7123                  * The layout of the fast clear color value expected by HW
7124                  * (the DRM ABI requiring this value to be located in fb at
7125                  * offset 0 of cc plane, plane #2 previous generations or
7126                  * plane #1 for flat ccs):
7127                  * - 4 x 4 bytes per-channel value
7128                  *   (in surface type specific float/int format provided by the fb user)
7129                  * - 8 bytes native color value used by the display
7130                  *   (converted/written by GPU during a fast clear operation using the
7131                  *    above per-channel values)
7132                  *
7133                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
7134                  * caller made sure that the object is synced wrt. the related color clear value
7135                  * GPU write on it.
7136                  */
7137                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7138                                                      fb->offsets[cc_plane] + 16,
7139                                                      &plane_state->ccval,
7140                                                      sizeof(plane_state->ccval));
7141                 /* The above could only fail if the FB obj has an unexpected backing store type. */
7142                 drm_WARN_ON(&i915->drm, ret);
7143         }
7144 }
7145
7146 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7147 {
7148         struct drm_device *dev = state->base.dev;
7149         struct drm_i915_private *dev_priv = to_i915(dev);
7150         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7151         struct intel_crtc *crtc;
7152         struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7153         intel_wakeref_t wakeref = 0;
7154         int i;
7155
7156         intel_atomic_commit_fence_wait(state);
7157
7158         drm_atomic_helper_wait_for_dependencies(&state->base);
7159         drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7160
7161         /*
7162          * During full modesets we write a lot of registers, wait
7163          * for PLLs, etc. Doing that while DC states are enabled
7164          * is not a good idea.
7165          *
7166          * During fastsets and other updates we also need to
7167          * disable DC states due to the following scenario:
7168          * 1. DC5 exit and PSR exit happen
7169          * 2. Some or all _noarm() registers are written
7170          * 3. Due to some long delay PSR is re-entered
7171          * 4. DC5 entry -> DMC saves the already written new
7172          *    _noarm() registers and the old not yet written
7173          *    _arm() registers
7174          * 5. DC5 exit -> DMC restores a mixture of old and
7175          *    new register values and arms the update
7176          * 6. PSR exit -> hardware latches a mixture of old and
7177          *    new register values -> corrupted frame, or worse
7178          * 7. New _arm() registers are finally written
7179          * 8. Hardware finally latches a complete set of new
7180          *    register values, and subsequent frames will be OK again
7181          *
7182          * Also note that due to the pipe CSC hardware issues on
7183          * SKL/GLK DC states must remain off until the pipe CSC
7184          * state readout has happened. Otherwise we risk corrupting
7185          * the CSC latched register values with the readout (see
7186          * skl_read_csc() and skl_color_commit_noarm()).
7187          */
7188         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
7189
7190         intel_atomic_prepare_plane_clear_colors(state);
7191
7192         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7193                                             new_crtc_state, i) {
7194                 if (intel_crtc_needs_modeset(new_crtc_state) ||
7195                     intel_crtc_needs_fastset(new_crtc_state))
7196                         intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7197         }
7198
7199         intel_commit_modeset_disables(state);
7200
7201         /* FIXME: Eventually get rid of our crtc->config pointer */
7202         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7203                 crtc->config = new_crtc_state;
7204
7205         if (state->modeset) {
7206                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7207
7208                 intel_set_cdclk_pre_plane_update(state);
7209
7210                 intel_modeset_verify_disabled(dev_priv, state);
7211         }
7212
7213         intel_sagv_pre_plane_update(state);
7214
7215         /* Complete the events for pipes that have now been disabled */
7216         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7217                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7218
7219                 /* Complete events for now disable pipes here. */
7220                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7221                         spin_lock_irq(&dev->event_lock);
7222                         drm_crtc_send_vblank_event(&crtc->base,
7223                                                    new_crtc_state->uapi.event);
7224                         spin_unlock_irq(&dev->event_lock);
7225
7226                         new_crtc_state->uapi.event = NULL;
7227                 }
7228         }
7229
7230         intel_encoders_update_prepare(state);
7231
7232         intel_dbuf_pre_plane_update(state);
7233         intel_mbus_dbox_update(state);
7234
7235         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7236                 if (new_crtc_state->do_async_flip)
7237                         intel_crtc_enable_flip_done(state, crtc);
7238         }
7239
7240         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7241         dev_priv->display.funcs.display->commit_modeset_enables(state);
7242
7243         if (state->modeset)
7244                 intel_set_cdclk_post_plane_update(state);
7245
7246         intel_wait_for_vblank_workers(state);
7247
7248         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7249          * already, but still need the state for the delayed optimization. To
7250          * fix this:
7251          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7252          * - schedule that vblank worker _before_ calling hw_done
7253          * - at the start of commit_tail, cancel it _synchrously
7254          * - switch over to the vblank wait helper in the core after that since
7255          *   we don't need out special handling any more.
7256          */
7257         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7258
7259         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7260                 if (new_crtc_state->do_async_flip)
7261                         intel_crtc_disable_flip_done(state, crtc);
7262         }
7263
7264         /*
7265          * Now that the vblank has passed, we can go ahead and program the
7266          * optimal watermarks on platforms that need two-step watermark
7267          * programming.
7268          *
7269          * TODO: Move this (and other cleanup) to an async worker eventually.
7270          */
7271         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7272                                             new_crtc_state, i) {
7273                 /*
7274                  * Gen2 reports pipe underruns whenever all planes are disabled.
7275                  * So re-enable underrun reporting after some planes get enabled.
7276                  *
7277                  * We do this before .optimize_watermarks() so that we have a
7278                  * chance of catching underruns with the intermediate watermarks
7279                  * vs. the new plane configuration.
7280                  */
7281                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7282                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7283
7284                 intel_optimize_watermarks(state, crtc);
7285         }
7286
7287         intel_dbuf_post_plane_update(state);
7288         intel_psr_post_plane_update(state);
7289
7290         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7291                 intel_post_plane_update(state, crtc);
7292
7293                 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7294
7295                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7296
7297                 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7298                 hsw_ips_post_update(state, crtc);
7299
7300                 /*
7301                  * Activate DRRS after state readout to avoid
7302                  * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7303                  */
7304                 intel_drrs_activate(new_crtc_state);
7305
7306                 /*
7307                  * DSB cleanup is done in cleanup_work aligning with framebuffer
7308                  * cleanup. So copy and reset the dsb structure to sync with
7309                  * commit_done and later do dsb cleanup in cleanup_work.
7310                  *
7311                  * FIXME get rid of this funny new->old swapping
7312                  */
7313                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7314         }
7315
7316         /* Underruns don't always raise interrupts, so check manually */
7317         intel_check_cpu_fifo_underruns(dev_priv);
7318         intel_check_pch_fifo_underruns(dev_priv);
7319
7320         if (state->modeset)
7321                 intel_verify_planes(state);
7322
7323         intel_sagv_post_plane_update(state);
7324
7325         drm_atomic_helper_commit_hw_done(&state->base);
7326
7327         if (state->modeset) {
7328                 /* As one of the primary mmio accessors, KMS has a high
7329                  * likelihood of triggering bugs in unclaimed access. After we
7330                  * finish modesetting, see if an error has been flagged, and if
7331                  * so enable debugging for the next modeset - and hope we catch
7332                  * the culprit.
7333                  */
7334                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7335         }
7336         intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
7337         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7338
7339         /*
7340          * Defer the cleanup of the old state to a separate worker to not
7341          * impede the current task (userspace for blocking modesets) that
7342          * are executed inline. For out-of-line asynchronous modesets/flips,
7343          * deferring to a new worker seems overkill, but we would place a
7344          * schedule point (cond_resched()) here anyway to keep latencies
7345          * down.
7346          */
7347         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7348         queue_work(system_highpri_wq, &state->base.commit_work);
7349 }
7350
7351 static void intel_atomic_commit_work(struct work_struct *work)
7352 {
7353         struct intel_atomic_state *state =
7354                 container_of(work, struct intel_atomic_state, base.commit_work);
7355
7356         intel_atomic_commit_tail(state);
7357 }
7358
7359 static int
7360 intel_atomic_commit_ready(struct i915_sw_fence *fence,
7361                           enum i915_sw_fence_notify notify)
7362 {
7363         struct intel_atomic_state *state =
7364                 container_of(fence, struct intel_atomic_state, commit_ready);
7365
7366         switch (notify) {
7367         case FENCE_COMPLETE:
7368                 /* we do blocking waits in the worker, nothing to do here */
7369                 break;
7370         case FENCE_FREE:
7371                 {
7372                         struct intel_atomic_helper *helper =
7373                                 &to_i915(state->base.dev)->display.atomic_helper;
7374
7375                         if (llist_add(&state->freed, &helper->free_list))
7376                                 schedule_work(&helper->free_work);
7377                         break;
7378                 }
7379         }
7380
7381         return NOTIFY_DONE;
7382 }
7383
7384 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7385 {
7386         struct intel_plane_state *old_plane_state, *new_plane_state;
7387         struct intel_plane *plane;
7388         int i;
7389
7390         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7391                                              new_plane_state, i)
7392                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7393                                         to_intel_frontbuffer(new_plane_state->hw.fb),
7394                                         plane->frontbuffer_bit);
7395 }
7396
7397 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
7398                         bool nonblock)
7399 {
7400         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7401         struct drm_i915_private *dev_priv = to_i915(dev);
7402         int ret = 0;
7403
7404         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7405
7406         drm_atomic_state_get(&state->base);
7407         i915_sw_fence_init(&state->commit_ready,
7408                            intel_atomic_commit_ready);
7409
7410         /*
7411          * The intel_legacy_cursor_update() fast path takes care
7412          * of avoiding the vblank waits for simple cursor
7413          * movement and flips. For cursor on/off and size changes,
7414          * we want to perform the vblank waits so that watermark
7415          * updates happen during the correct frames. Gen9+ have
7416          * double buffered watermarks and so shouldn't need this.
7417          *
7418          * Unset state->legacy_cursor_update before the call to
7419          * drm_atomic_helper_setup_commit() because otherwise
7420          * drm_atomic_helper_wait_for_flip_done() is a noop and
7421          * we get FIFO underruns because we didn't wait
7422          * for vblank.
7423          *
7424          * FIXME doing watermarks and fb cleanup from a vblank worker
7425          * (assuming we had any) would solve these problems.
7426          */
7427         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7428                 struct intel_crtc_state *new_crtc_state;
7429                 struct intel_crtc *crtc;
7430                 int i;
7431
7432                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7433                         if (new_crtc_state->wm.need_postvbl_update ||
7434                             new_crtc_state->update_wm_post)
7435                                 state->base.legacy_cursor_update = false;
7436         }
7437
7438         ret = intel_atomic_prepare_commit(state);
7439         if (ret) {
7440                 drm_dbg_atomic(&dev_priv->drm,
7441                                "Preparing state failed with %i\n", ret);
7442                 i915_sw_fence_commit(&state->commit_ready);
7443                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7444                 return ret;
7445         }
7446
7447         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7448         if (!ret)
7449                 ret = drm_atomic_helper_swap_state(&state->base, true);
7450         if (!ret)
7451                 intel_atomic_swap_global_state(state);
7452
7453         if (ret) {
7454                 struct intel_crtc_state *new_crtc_state;
7455                 struct intel_crtc *crtc;
7456                 int i;
7457
7458                 i915_sw_fence_commit(&state->commit_ready);
7459
7460                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7461                         intel_color_cleanup_commit(new_crtc_state);
7462
7463                 drm_atomic_helper_cleanup_planes(dev, &state->base);
7464                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7465                 return ret;
7466         }
7467         intel_shared_dpll_swap_state(state);
7468         intel_atomic_track_fbs(state);
7469
7470         drm_atomic_state_get(&state->base);
7471         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7472
7473         i915_sw_fence_commit(&state->commit_ready);
7474         if (nonblock && state->modeset) {
7475                 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7476         } else if (nonblock) {
7477                 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7478         } else {
7479                 if (state->modeset)
7480                         flush_workqueue(dev_priv->display.wq.modeset);
7481                 intel_atomic_commit_tail(state);
7482         }
7483
7484         return 0;
7485 }
7486
7487 /**
7488  * intel_plane_destroy - destroy a plane
7489  * @plane: plane to destroy
7490  *
7491  * Common destruction function for all types of planes (primary, cursor,
7492  * sprite).
7493  */
7494 void intel_plane_destroy(struct drm_plane *plane)
7495 {
7496         drm_plane_cleanup(plane);
7497         kfree(to_intel_plane(plane));
7498 }
7499
7500 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7501                                       struct drm_file *file)
7502 {
7503         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7504         struct drm_crtc *drmmode_crtc;
7505         struct intel_crtc *crtc;
7506
7507         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7508         if (!drmmode_crtc)
7509                 return -ENOENT;
7510
7511         crtc = to_intel_crtc(drmmode_crtc);
7512         pipe_from_crtc_id->pipe = crtc->pipe;
7513
7514         return 0;
7515 }
7516
7517 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7518 {
7519         struct drm_device *dev = encoder->base.dev;
7520         struct intel_encoder *source_encoder;
7521         u32 possible_clones = 0;
7522
7523         for_each_intel_encoder(dev, source_encoder) {
7524                 if (encoders_cloneable(encoder, source_encoder))
7525                         possible_clones |= drm_encoder_mask(&source_encoder->base);
7526         }
7527
7528         return possible_clones;
7529 }
7530
7531 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7532 {
7533         struct drm_device *dev = encoder->base.dev;
7534         struct intel_crtc *crtc;
7535         u32 possible_crtcs = 0;
7536
7537         for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7538                 possible_crtcs |= drm_crtc_mask(&crtc->base);
7539
7540         return possible_crtcs;
7541 }
7542
7543 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7544 {
7545         if (!IS_MOBILE(dev_priv))
7546                 return false;
7547
7548         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7549                 return false;
7550
7551         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7552                 return false;
7553
7554         return true;
7555 }
7556
7557 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7558 {
7559         if (DISPLAY_VER(dev_priv) >= 9)
7560                 return false;
7561
7562         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7563                 return false;
7564
7565         if (HAS_PCH_LPT_H(dev_priv) &&
7566             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7567                 return false;
7568
7569         /* DDI E can't be used if DDI A requires 4 lanes */
7570         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7571                 return false;
7572
7573         if (!dev_priv->display.vbt.int_crt_support)
7574                 return false;
7575
7576         return true;
7577 }
7578
7579 void intel_setup_outputs(struct drm_i915_private *dev_priv)
7580 {
7581         struct intel_encoder *encoder;
7582         bool dpd_is_edp = false;
7583
7584         intel_pps_unlock_regs_wa(dev_priv);
7585
7586         if (!HAS_DISPLAY(dev_priv))
7587                 return;
7588
7589         if (IS_METEORLAKE(dev_priv)) {
7590                 /* TODO: initialize TC ports as well */
7591                 intel_ddi_init(dev_priv, PORT_A);
7592                 intel_ddi_init(dev_priv, PORT_B);
7593         } else if (IS_DG2(dev_priv)) {
7594                 intel_ddi_init(dev_priv, PORT_A);
7595                 intel_ddi_init(dev_priv, PORT_B);
7596                 intel_ddi_init(dev_priv, PORT_C);
7597                 intel_ddi_init(dev_priv, PORT_D_XELPD);
7598                 intel_ddi_init(dev_priv, PORT_TC1);
7599         } else if (IS_ALDERLAKE_P(dev_priv)) {
7600                 intel_ddi_init(dev_priv, PORT_A);
7601                 intel_ddi_init(dev_priv, PORT_B);
7602                 intel_ddi_init(dev_priv, PORT_TC1);
7603                 intel_ddi_init(dev_priv, PORT_TC2);
7604                 intel_ddi_init(dev_priv, PORT_TC3);
7605                 intel_ddi_init(dev_priv, PORT_TC4);
7606                 icl_dsi_init(dev_priv);
7607         } else if (IS_ALDERLAKE_S(dev_priv)) {
7608                 intel_ddi_init(dev_priv, PORT_A);
7609                 intel_ddi_init(dev_priv, PORT_TC1);
7610                 intel_ddi_init(dev_priv, PORT_TC2);
7611                 intel_ddi_init(dev_priv, PORT_TC3);
7612                 intel_ddi_init(dev_priv, PORT_TC4);
7613         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7614                 intel_ddi_init(dev_priv, PORT_A);
7615                 intel_ddi_init(dev_priv, PORT_B);
7616                 intel_ddi_init(dev_priv, PORT_TC1);
7617                 intel_ddi_init(dev_priv, PORT_TC2);
7618         } else if (DISPLAY_VER(dev_priv) >= 12) {
7619                 intel_ddi_init(dev_priv, PORT_A);
7620                 intel_ddi_init(dev_priv, PORT_B);
7621                 intel_ddi_init(dev_priv, PORT_TC1);
7622                 intel_ddi_init(dev_priv, PORT_TC2);
7623                 intel_ddi_init(dev_priv, PORT_TC3);
7624                 intel_ddi_init(dev_priv, PORT_TC4);
7625                 intel_ddi_init(dev_priv, PORT_TC5);
7626                 intel_ddi_init(dev_priv, PORT_TC6);
7627                 icl_dsi_init(dev_priv);
7628         } else if (IS_JSL_EHL(dev_priv)) {
7629                 intel_ddi_init(dev_priv, PORT_A);
7630                 intel_ddi_init(dev_priv, PORT_B);
7631                 intel_ddi_init(dev_priv, PORT_C);
7632                 intel_ddi_init(dev_priv, PORT_D);
7633                 icl_dsi_init(dev_priv);
7634         } else if (DISPLAY_VER(dev_priv) == 11) {
7635                 intel_ddi_init(dev_priv, PORT_A);
7636                 intel_ddi_init(dev_priv, PORT_B);
7637                 intel_ddi_init(dev_priv, PORT_C);
7638                 intel_ddi_init(dev_priv, PORT_D);
7639                 intel_ddi_init(dev_priv, PORT_E);
7640                 intel_ddi_init(dev_priv, PORT_F);
7641                 icl_dsi_init(dev_priv);
7642         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7643                 intel_ddi_init(dev_priv, PORT_A);
7644                 intel_ddi_init(dev_priv, PORT_B);
7645                 intel_ddi_init(dev_priv, PORT_C);
7646                 vlv_dsi_init(dev_priv);
7647         } else if (DISPLAY_VER(dev_priv) >= 9) {
7648                 intel_ddi_init(dev_priv, PORT_A);
7649                 intel_ddi_init(dev_priv, PORT_B);
7650                 intel_ddi_init(dev_priv, PORT_C);
7651                 intel_ddi_init(dev_priv, PORT_D);
7652                 intel_ddi_init(dev_priv, PORT_E);
7653         } else if (HAS_DDI(dev_priv)) {
7654                 u32 found;
7655
7656                 if (intel_ddi_crt_present(dev_priv))
7657                         intel_crt_init(dev_priv);
7658
7659                 /* Haswell uses DDI functions to detect digital outputs. */
7660                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7661                 if (found)
7662                         intel_ddi_init(dev_priv, PORT_A);
7663
7664                 found = intel_de_read(dev_priv, SFUSE_STRAP);
7665                 if (found & SFUSE_STRAP_DDIB_DETECTED)
7666                         intel_ddi_init(dev_priv, PORT_B);
7667                 if (found & SFUSE_STRAP_DDIC_DETECTED)
7668                         intel_ddi_init(dev_priv, PORT_C);
7669                 if (found & SFUSE_STRAP_DDID_DETECTED)
7670                         intel_ddi_init(dev_priv, PORT_D);
7671                 if (found & SFUSE_STRAP_DDIF_DETECTED)
7672                         intel_ddi_init(dev_priv, PORT_F);
7673         } else if (HAS_PCH_SPLIT(dev_priv)) {
7674                 int found;
7675
7676                 /*
7677                  * intel_edp_init_connector() depends on this completing first,
7678                  * to prevent the registration of both eDP and LVDS and the
7679                  * incorrect sharing of the PPS.
7680                  */
7681                 intel_lvds_init(dev_priv);
7682                 intel_crt_init(dev_priv);
7683
7684                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7685
7686                 if (ilk_has_edp_a(dev_priv))
7687                         g4x_dp_init(dev_priv, DP_A, PORT_A);
7688
7689                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7690                         /* PCH SDVOB multiplex with HDMIB */
7691                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7692                         if (!found)
7693                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7694                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7695                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7696                 }
7697
7698                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7699                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7700
7701                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7702                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7703
7704                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7705                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7706
7707                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7708                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7709         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7710                 bool has_edp, has_port;
7711
7712                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7713                         intel_crt_init(dev_priv);
7714
7715                 /*
7716                  * The DP_DETECTED bit is the latched state of the DDC
7717                  * SDA pin at boot. However since eDP doesn't require DDC
7718                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
7719                  * eDP ports may have been muxed to an alternate function.
7720                  * Thus we can't rely on the DP_DETECTED bit alone to detect
7721                  * eDP ports. Consult the VBT as well as DP_DETECTED to
7722                  * detect eDP ports.
7723                  *
7724                  * Sadly the straps seem to be missing sometimes even for HDMI
7725                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7726                  * and VBT for the presence of the port. Additionally we can't
7727                  * trust the port type the VBT declares as we've seen at least
7728                  * HDMI ports that the VBT claim are DP or eDP.
7729                  */
7730                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7731                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
7732                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7733                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7734                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7735                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7736
7737                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7738                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
7739                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7740                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7741                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7742                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7743
7744                 if (IS_CHERRYVIEW(dev_priv)) {
7745                         /*
7746                          * eDP not supported on port D,
7747                          * so no need to worry about it
7748                          */
7749                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
7750                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7751                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7752                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7753                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7754                 }
7755
7756                 vlv_dsi_init(dev_priv);
7757         } else if (IS_PINEVIEW(dev_priv)) {
7758                 intel_lvds_init(dev_priv);
7759                 intel_crt_init(dev_priv);
7760         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
7761                 bool found = false;
7762
7763                 if (IS_MOBILE(dev_priv))
7764                         intel_lvds_init(dev_priv);
7765
7766                 intel_crt_init(dev_priv);
7767
7768                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7769                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
7770                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
7771                         if (!found && IS_G4X(dev_priv)) {
7772                                 drm_dbg_kms(&dev_priv->drm,
7773                                             "probing HDMI on SDVOB\n");
7774                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
7775                         }
7776
7777                         if (!found && IS_G4X(dev_priv))
7778                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
7779                 }
7780
7781                 /* Before G4X SDVOC doesn't have its own detect register */
7782
7783                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7784                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
7785                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
7786                 }
7787
7788                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
7789
7790                         if (IS_G4X(dev_priv)) {
7791                                 drm_dbg_kms(&dev_priv->drm,
7792                                             "probing HDMI on SDVOC\n");
7793                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
7794                         }
7795                         if (IS_G4X(dev_priv))
7796                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
7797                 }
7798
7799                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
7800                         g4x_dp_init(dev_priv, DP_D, PORT_D);
7801
7802                 if (SUPPORTS_TV(dev_priv))
7803                         intel_tv_init(dev_priv);
7804         } else if (DISPLAY_VER(dev_priv) == 2) {
7805                 if (IS_I85X(dev_priv))
7806                         intel_lvds_init(dev_priv);
7807
7808                 intel_crt_init(dev_priv);
7809                 intel_dvo_init(dev_priv);
7810         }
7811
7812         for_each_intel_encoder(&dev_priv->drm, encoder) {
7813                 encoder->base.possible_crtcs =
7814                         intel_encoder_possible_crtcs(encoder);
7815                 encoder->base.possible_clones =
7816                         intel_encoder_possible_clones(encoder);
7817         }
7818
7819         intel_init_pch_refclk(dev_priv);
7820
7821         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
7822 }
7823
7824 static int max_dotclock(struct drm_i915_private *i915)
7825 {
7826         int max_dotclock = i915->max_dotclk_freq;
7827
7828         /* icl+ might use bigjoiner */
7829         if (DISPLAY_VER(i915) >= 11)
7830                 max_dotclock *= 2;
7831
7832         return max_dotclock;
7833 }
7834
7835 enum drm_mode_status intel_mode_valid(struct drm_device *dev,
7836                                       const struct drm_display_mode *mode)
7837 {
7838         struct drm_i915_private *dev_priv = to_i915(dev);
7839         int hdisplay_max, htotal_max;
7840         int vdisplay_max, vtotal_max;
7841
7842         /*
7843          * Can't reject DBLSCAN here because Xorg ddxen can add piles
7844          * of DBLSCAN modes to the output's mode list when they detect
7845          * the scaling mode property on the connector. And they don't
7846          * ask the kernel to validate those modes in any way until
7847          * modeset time at which point the client gets a protocol error.
7848          * So in order to not upset those clients we silently ignore the
7849          * DBLSCAN flag on such connectors. For other connectors we will
7850          * reject modes with the DBLSCAN flag in encoder->compute_config().
7851          * And we always reject DBLSCAN modes in connector->mode_valid()
7852          * as we never want such modes on the connector's mode list.
7853          */
7854
7855         if (mode->vscan > 1)
7856                 return MODE_NO_VSCAN;
7857
7858         if (mode->flags & DRM_MODE_FLAG_HSKEW)
7859                 return MODE_H_ILLEGAL;
7860
7861         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
7862                            DRM_MODE_FLAG_NCSYNC |
7863                            DRM_MODE_FLAG_PCSYNC))
7864                 return MODE_HSYNC;
7865
7866         if (mode->flags & (DRM_MODE_FLAG_BCAST |
7867                            DRM_MODE_FLAG_PIXMUX |
7868                            DRM_MODE_FLAG_CLKDIV2))
7869                 return MODE_BAD;
7870
7871         /*
7872          * Reject clearly excessive dotclocks early to
7873          * avoid having to worry about huge integers later.
7874          */
7875         if (mode->clock > max_dotclock(dev_priv))
7876                 return MODE_CLOCK_HIGH;
7877
7878         /* Transcoder timing limits */
7879         if (DISPLAY_VER(dev_priv) >= 11) {
7880                 hdisplay_max = 16384;
7881                 vdisplay_max = 8192;
7882                 htotal_max = 16384;
7883                 vtotal_max = 8192;
7884         } else if (DISPLAY_VER(dev_priv) >= 9 ||
7885                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7886                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
7887                 vdisplay_max = 4096;
7888                 htotal_max = 8192;
7889                 vtotal_max = 8192;
7890         } else if (DISPLAY_VER(dev_priv) >= 3) {
7891                 hdisplay_max = 4096;
7892                 vdisplay_max = 4096;
7893                 htotal_max = 8192;
7894                 vtotal_max = 8192;
7895         } else {
7896                 hdisplay_max = 2048;
7897                 vdisplay_max = 2048;
7898                 htotal_max = 4096;
7899                 vtotal_max = 4096;
7900         }
7901
7902         if (mode->hdisplay > hdisplay_max ||
7903             mode->hsync_start > htotal_max ||
7904             mode->hsync_end > htotal_max ||
7905             mode->htotal > htotal_max)
7906                 return MODE_H_ILLEGAL;
7907
7908         if (mode->vdisplay > vdisplay_max ||
7909             mode->vsync_start > vtotal_max ||
7910             mode->vsync_end > vtotal_max ||
7911             mode->vtotal > vtotal_max)
7912                 return MODE_V_ILLEGAL;
7913
7914         if (DISPLAY_VER(dev_priv) >= 5) {
7915                 if (mode->hdisplay < 64 ||
7916                     mode->htotal - mode->hdisplay < 32)
7917                         return MODE_H_ILLEGAL;
7918
7919                 if (mode->vtotal - mode->vdisplay < 5)
7920                         return MODE_V_ILLEGAL;
7921         } else {
7922                 if (mode->htotal - mode->hdisplay < 32)
7923                         return MODE_H_ILLEGAL;
7924
7925                 if (mode->vtotal - mode->vdisplay < 3)
7926                         return MODE_V_ILLEGAL;
7927         }
7928
7929         /*
7930          * Cantiga+ cannot handle modes with a hsync front porch of 0.
7931          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7932          */
7933         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7934             mode->hsync_start == mode->hdisplay)
7935                 return MODE_H_ILLEGAL;
7936
7937         return MODE_OK;
7938 }
7939
7940 enum drm_mode_status
7941 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
7942                                 const struct drm_display_mode *mode,
7943                                 bool bigjoiner)
7944 {
7945         int plane_width_max, plane_height_max;
7946
7947         /*
7948          * intel_mode_valid() should be
7949          * sufficient on older platforms.
7950          */
7951         if (DISPLAY_VER(dev_priv) < 9)
7952                 return MODE_OK;
7953
7954         /*
7955          * Most people will probably want a fullscreen
7956          * plane so let's not advertize modes that are
7957          * too big for that.
7958          */
7959         if (DISPLAY_VER(dev_priv) >= 11) {
7960                 plane_width_max = 5120 << bigjoiner;
7961                 plane_height_max = 4320;
7962         } else {
7963                 plane_width_max = 5120;
7964                 plane_height_max = 4096;
7965         }
7966
7967         if (mode->hdisplay > plane_width_max)
7968                 return MODE_H_ILLEGAL;
7969
7970         if (mode->vdisplay > plane_height_max)
7971                 return MODE_V_ILLEGAL;
7972
7973         return MODE_OK;
7974 }
7975
7976 static const struct intel_display_funcs skl_display_funcs = {
7977         .get_pipe_config = hsw_get_pipe_config,
7978         .crtc_enable = hsw_crtc_enable,
7979         .crtc_disable = hsw_crtc_disable,
7980         .commit_modeset_enables = skl_commit_modeset_enables,
7981         .get_initial_plane_config = skl_get_initial_plane_config,
7982 };
7983
7984 static const struct intel_display_funcs ddi_display_funcs = {
7985         .get_pipe_config = hsw_get_pipe_config,
7986         .crtc_enable = hsw_crtc_enable,
7987         .crtc_disable = hsw_crtc_disable,
7988         .commit_modeset_enables = intel_commit_modeset_enables,
7989         .get_initial_plane_config = i9xx_get_initial_plane_config,
7990 };
7991
7992 static const struct intel_display_funcs pch_split_display_funcs = {
7993         .get_pipe_config = ilk_get_pipe_config,
7994         .crtc_enable = ilk_crtc_enable,
7995         .crtc_disable = ilk_crtc_disable,
7996         .commit_modeset_enables = intel_commit_modeset_enables,
7997         .get_initial_plane_config = i9xx_get_initial_plane_config,
7998 };
7999
8000 static const struct intel_display_funcs vlv_display_funcs = {
8001         .get_pipe_config = i9xx_get_pipe_config,
8002         .crtc_enable = valleyview_crtc_enable,
8003         .crtc_disable = i9xx_crtc_disable,
8004         .commit_modeset_enables = intel_commit_modeset_enables,
8005         .get_initial_plane_config = i9xx_get_initial_plane_config,
8006 };
8007
8008 static const struct intel_display_funcs i9xx_display_funcs = {
8009         .get_pipe_config = i9xx_get_pipe_config,
8010         .crtc_enable = i9xx_crtc_enable,
8011         .crtc_disable = i9xx_crtc_disable,
8012         .commit_modeset_enables = intel_commit_modeset_enables,
8013         .get_initial_plane_config = i9xx_get_initial_plane_config,
8014 };
8015
8016 /**
8017  * intel_init_display_hooks - initialize the display modesetting hooks
8018  * @dev_priv: device private
8019  */
8020 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8021 {
8022         if (DISPLAY_VER(dev_priv) >= 9) {
8023                 dev_priv->display.funcs.display = &skl_display_funcs;
8024         } else if (HAS_DDI(dev_priv)) {
8025                 dev_priv->display.funcs.display = &ddi_display_funcs;
8026         } else if (HAS_PCH_SPLIT(dev_priv)) {
8027                 dev_priv->display.funcs.display = &pch_split_display_funcs;
8028         } else if (IS_CHERRYVIEW(dev_priv) ||
8029                    IS_VALLEYVIEW(dev_priv)) {
8030                 dev_priv->display.funcs.display = &vlv_display_funcs;
8031         } else {
8032                 dev_priv->display.funcs.display = &i9xx_display_funcs;
8033         }
8034 }
8035
8036 int intel_initial_commit(struct drm_device *dev)
8037 {
8038         struct drm_atomic_state *state = NULL;
8039         struct drm_modeset_acquire_ctx ctx;
8040         struct intel_crtc *crtc;
8041         int ret = 0;
8042
8043         state = drm_atomic_state_alloc(dev);
8044         if (!state)
8045                 return -ENOMEM;
8046
8047         drm_modeset_acquire_init(&ctx, 0);
8048
8049         state->acquire_ctx = &ctx;
8050         to_intel_atomic_state(state)->internal = true;
8051
8052 retry:
8053         for_each_intel_crtc(dev, crtc) {
8054                 struct intel_crtc_state *crtc_state =
8055                         intel_atomic_get_crtc_state(state, crtc);
8056
8057                 if (IS_ERR(crtc_state)) {
8058                         ret = PTR_ERR(crtc_state);
8059                         goto out;
8060                 }
8061
8062                 if (crtc_state->hw.active) {
8063                         struct intel_encoder *encoder;
8064
8065                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
8066                         if (ret)
8067                                 goto out;
8068
8069                         /*
8070                          * FIXME hack to force a LUT update to avoid the
8071                          * plane update forcing the pipe gamma on without
8072                          * having a proper LUT loaded. Remove once we
8073                          * have readout for pipe gamma enable.
8074                          */
8075                         crtc_state->uapi.color_mgmt_changed = true;
8076
8077                         for_each_intel_encoder_mask(dev, encoder,
8078                                                     crtc_state->uapi.encoder_mask) {
8079                                 if (encoder->initial_fastset_check &&
8080                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
8081                                         ret = drm_atomic_add_affected_connectors(state,
8082                                                                                  &crtc->base);
8083                                         if (ret)
8084                                                 goto out;
8085                                 }
8086                         }
8087                 }
8088         }
8089
8090         ret = drm_atomic_commit(state);
8091
8092 out:
8093         if (ret == -EDEADLK) {
8094                 drm_atomic_state_clear(state);
8095                 drm_modeset_backoff(&ctx);
8096                 goto retry;
8097         }
8098
8099         drm_atomic_state_put(state);
8100
8101         drm_modeset_drop_locks(&ctx);
8102         drm_modeset_acquire_fini(&ctx);
8103
8104         return ret;
8105 }
8106
8107 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8108 {
8109         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8110         enum transcoder cpu_transcoder = (enum transcoder)pipe;
8111         /* 640x480@60Hz, ~25175 kHz */
8112         struct dpll clock = {
8113                 .m1 = 18,
8114                 .m2 = 7,
8115                 .p1 = 13,
8116                 .p2 = 4,
8117                 .n = 2,
8118         };
8119         u32 dpll, fp;
8120         int i;
8121
8122         drm_WARN_ON(&dev_priv->drm,
8123                     i9xx_calc_dpll_params(48000, &clock) != 25154);
8124
8125         drm_dbg_kms(&dev_priv->drm,
8126                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8127                     pipe_name(pipe), clock.vco, clock.dot);
8128
8129         fp = i9xx_dpll_compute_fp(&clock);
8130         dpll = DPLL_DVO_2X_MODE |
8131                 DPLL_VGA_MODE_DIS |
8132                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8133                 PLL_P2_DIVIDE_BY_4 |
8134                 PLL_REF_INPUT_DREFCLK |
8135                 DPLL_VCO_ENABLE;
8136
8137         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
8138                        HACTIVE(640 - 1) | HTOTAL(800 - 1));
8139         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
8140                        HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
8141         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
8142                        HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
8143         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
8144                        VACTIVE(480 - 1) | VTOTAL(525 - 1));
8145         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
8146                        VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
8147         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
8148                        VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
8149         intel_de_write(dev_priv, PIPESRC(pipe),
8150                        PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
8151
8152         intel_de_write(dev_priv, FP0(pipe), fp);
8153         intel_de_write(dev_priv, FP1(pipe), fp);
8154
8155         /*
8156          * Apparently we need to have VGA mode enabled prior to changing
8157          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8158          * dividers, even though the register value does change.
8159          */
8160         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8161         intel_de_write(dev_priv, DPLL(pipe), dpll);
8162
8163         /* Wait for the clocks to stabilize. */
8164         intel_de_posting_read(dev_priv, DPLL(pipe));
8165         udelay(150);
8166
8167         /* The pixel multiplier can only be updated once the
8168          * DPLL is enabled and the clocks are stable.
8169          *
8170          * So write it again.
8171          */
8172         intel_de_write(dev_priv, DPLL(pipe), dpll);
8173
8174         /* We do this three times for luck */
8175         for (i = 0; i < 3 ; i++) {
8176                 intel_de_write(dev_priv, DPLL(pipe), dpll);
8177                 intel_de_posting_read(dev_priv, DPLL(pipe));
8178                 udelay(150); /* wait for warmup */
8179         }
8180
8181         intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
8182         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8183
8184         intel_wait_for_pipe_scanline_moving(crtc);
8185 }
8186
8187 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8188 {
8189         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8190
8191         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8192                     pipe_name(pipe));
8193
8194         drm_WARN_ON(&dev_priv->drm,
8195                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8196         drm_WARN_ON(&dev_priv->drm,
8197                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8198         drm_WARN_ON(&dev_priv->drm,
8199                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8200         drm_WARN_ON(&dev_priv->drm,
8201                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8202         drm_WARN_ON(&dev_priv->drm,
8203                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8204
8205         intel_de_write(dev_priv, TRANSCONF(pipe), 0);
8206         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8207
8208         intel_wait_for_pipe_scanline_stopped(crtc);
8209
8210         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8211         intel_de_posting_read(dev_priv, DPLL(pipe));
8212 }
8213
8214 void intel_hpd_poll_fini(struct drm_i915_private *i915)
8215 {
8216         struct intel_connector *connector;
8217         struct drm_connector_list_iter conn_iter;
8218
8219         /* Kill all the work that may have been queued by hpd. */
8220         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8221         for_each_intel_connector_iter(connector, &conn_iter) {
8222                 if (connector->modeset_retry_work.func)
8223                         cancel_work_sync(&connector->modeset_retry_work);
8224                 if (connector->hdcp.shim) {
8225                         cancel_delayed_work_sync(&connector->hdcp.check_work);
8226                         cancel_work_sync(&connector->hdcp.prop_work);
8227                 }
8228         }
8229         drm_connector_list_iter_end(&conn_iter);
8230 }
8231
8232 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8233 {
8234         return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8235 }