Merge tag 'drm-intel-next-2021-01-04' of git://anongit.freedesktop.org/drm/drm-intel...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll_mgr.h"
52 #include "display/intel_dsi.h"
53 #include "display/intel_dvo.h"
54 #include "display/intel_gmbus.h"
55 #include "display/intel_hdmi.h"
56 #include "display/intel_lvds.h"
57 #include "display/intel_sdvo.h"
58 #include "display/intel_tv.h"
59 #include "display/intel_vdsc.h"
60
61 #include "gt/intel_rps.h"
62
63 #include "i915_drv.h"
64 #include "i915_trace.h"
65 #include "intel_acpi.h"
66 #include "intel_atomic.h"
67 #include "intel_atomic_plane.h"
68 #include "intel_bw.h"
69 #include "intel_cdclk.h"
70 #include "intel_color.h"
71 #include "intel_csr.h"
72 #include "intel_cursor.h"
73 #include "intel_display_types.h"
74 #include "intel_dp_link_training.h"
75 #include "intel_fbc.h"
76 #include "intel_fbdev.h"
77 #include "intel_fifo_underrun.h"
78 #include "intel_frontbuffer.h"
79 #include "intel_hdcp.h"
80 #include "intel_hotplug.h"
81 #include "intel_overlay.h"
82 #include "intel_pipe_crc.h"
83 #include "intel_pm.h"
84 #include "intel_psr.h"
85 #include "intel_quirks.h"
86 #include "intel_sideband.h"
87 #include "intel_sprite.h"
88 #include "intel_tc.h"
89 #include "intel_vga.h"
90 #include "i9xx_plane.h"
91
92 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
93                                 struct intel_crtc_state *pipe_config);
94 static void ilk_pch_clock_get(struct intel_crtc *crtc,
95                               struct intel_crtc_state *pipe_config);
96
97 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
98                                   struct drm_i915_gem_object *obj,
99                                   struct drm_mode_fb_cmd2 *mode_cmd);
100 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
101 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
102 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
103                                          const struct intel_link_m_n *m_n,
104                                          const struct intel_link_m_n *m2_n2);
105 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
106 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
107 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
108 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
109 static void vlv_prepare_pll(struct intel_crtc *crtc,
110                             const struct intel_crtc_state *pipe_config);
111 static void chv_prepare_pll(struct intel_crtc *crtc,
112                             const struct intel_crtc_state *pipe_config);
113 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
114 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
115 static void intel_modeset_setup_hw_state(struct drm_device *dev,
116                                          struct drm_modeset_acquire_ctx *ctx);
117 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
118
119 struct intel_limit {
120         struct {
121                 int min, max;
122         } dot, vco, n, m, m1, m2, p, p1;
123
124         struct {
125                 int dot_limit;
126                 int p2_slow, p2_fast;
127         } p2;
128 };
129
130 /* returns HPLL frequency in kHz */
131 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
132 {
133         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
134
135         /* Obtain SKU information */
136         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137                 CCK_FUSE_HPLL_FREQ_MASK;
138
139         return vco_freq[hpll_freq] * 1000;
140 }
141
142 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143                       const char *name, u32 reg, int ref_freq)
144 {
145         u32 val;
146         int divider;
147
148         val = vlv_cck_read(dev_priv, reg);
149         divider = val & CCK_FREQUENCY_VALUES;
150
151         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
152                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
153                  "%s change in progress\n", name);
154
155         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
156 }
157
158 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
159                            const char *name, u32 reg)
160 {
161         int hpll;
162
163         vlv_cck_get(dev_priv);
164
165         if (dev_priv->hpll_freq == 0)
166                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
167
168         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
169
170         vlv_cck_put(dev_priv);
171
172         return hpll;
173 }
174
175 static void intel_update_czclk(struct drm_i915_private *dev_priv)
176 {
177         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
178                 return;
179
180         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
181                                                       CCK_CZ_CLOCK_CONTROL);
182
183         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
184                 dev_priv->czclk_freq);
185 }
186
187 /* units of 100MHz */
188 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
189                                const struct intel_crtc_state *pipe_config)
190 {
191         if (HAS_DDI(dev_priv))
192                 return pipe_config->port_clock; /* SPLL */
193         else
194                 return dev_priv->fdi_pll_freq;
195 }
196
197 static const struct intel_limit intel_limits_i8xx_dac = {
198         .dot = { .min = 25000, .max = 350000 },
199         .vco = { .min = 908000, .max = 1512000 },
200         .n = { .min = 2, .max = 16 },
201         .m = { .min = 96, .max = 140 },
202         .m1 = { .min = 18, .max = 26 },
203         .m2 = { .min = 6, .max = 16 },
204         .p = { .min = 4, .max = 128 },
205         .p1 = { .min = 2, .max = 33 },
206         .p2 = { .dot_limit = 165000,
207                 .p2_slow = 4, .p2_fast = 2 },
208 };
209
210 static const struct intel_limit intel_limits_i8xx_dvo = {
211         .dot = { .min = 25000, .max = 350000 },
212         .vco = { .min = 908000, .max = 1512000 },
213         .n = { .min = 2, .max = 16 },
214         .m = { .min = 96, .max = 140 },
215         .m1 = { .min = 18, .max = 26 },
216         .m2 = { .min = 6, .max = 16 },
217         .p = { .min = 4, .max = 128 },
218         .p1 = { .min = 2, .max = 33 },
219         .p2 = { .dot_limit = 165000,
220                 .p2_slow = 4, .p2_fast = 4 },
221 };
222
223 static const struct intel_limit intel_limits_i8xx_lvds = {
224         .dot = { .min = 25000, .max = 350000 },
225         .vco = { .min = 908000, .max = 1512000 },
226         .n = { .min = 2, .max = 16 },
227         .m = { .min = 96, .max = 140 },
228         .m1 = { .min = 18, .max = 26 },
229         .m2 = { .min = 6, .max = 16 },
230         .p = { .min = 4, .max = 128 },
231         .p1 = { .min = 1, .max = 6 },
232         .p2 = { .dot_limit = 165000,
233                 .p2_slow = 14, .p2_fast = 7 },
234 };
235
236 static const struct intel_limit intel_limits_i9xx_sdvo = {
237         .dot = { .min = 20000, .max = 400000 },
238         .vco = { .min = 1400000, .max = 2800000 },
239         .n = { .min = 1, .max = 6 },
240         .m = { .min = 70, .max = 120 },
241         .m1 = { .min = 8, .max = 18 },
242         .m2 = { .min = 3, .max = 7 },
243         .p = { .min = 5, .max = 80 },
244         .p1 = { .min = 1, .max = 8 },
245         .p2 = { .dot_limit = 200000,
246                 .p2_slow = 10, .p2_fast = 5 },
247 };
248
249 static const struct intel_limit intel_limits_i9xx_lvds = {
250         .dot = { .min = 20000, .max = 400000 },
251         .vco = { .min = 1400000, .max = 2800000 },
252         .n = { .min = 1, .max = 6 },
253         .m = { .min = 70, .max = 120 },
254         .m1 = { .min = 8, .max = 18 },
255         .m2 = { .min = 3, .max = 7 },
256         .p = { .min = 7, .max = 98 },
257         .p1 = { .min = 1, .max = 8 },
258         .p2 = { .dot_limit = 112000,
259                 .p2_slow = 14, .p2_fast = 7 },
260 };
261
262
263 static const struct intel_limit intel_limits_g4x_sdvo = {
264         .dot = { .min = 25000, .max = 270000 },
265         .vco = { .min = 1750000, .max = 3500000},
266         .n = { .min = 1, .max = 4 },
267         .m = { .min = 104, .max = 138 },
268         .m1 = { .min = 17, .max = 23 },
269         .m2 = { .min = 5, .max = 11 },
270         .p = { .min = 10, .max = 30 },
271         .p1 = { .min = 1, .max = 3},
272         .p2 = { .dot_limit = 270000,
273                 .p2_slow = 10,
274                 .p2_fast = 10
275         },
276 };
277
278 static const struct intel_limit intel_limits_g4x_hdmi = {
279         .dot = { .min = 22000, .max = 400000 },
280         .vco = { .min = 1750000, .max = 3500000},
281         .n = { .min = 1, .max = 4 },
282         .m = { .min = 104, .max = 138 },
283         .m1 = { .min = 16, .max = 23 },
284         .m2 = { .min = 5, .max = 11 },
285         .p = { .min = 5, .max = 80 },
286         .p1 = { .min = 1, .max = 8},
287         .p2 = { .dot_limit = 165000,
288                 .p2_slow = 10, .p2_fast = 5 },
289 };
290
291 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
292         .dot = { .min = 20000, .max = 115000 },
293         .vco = { .min = 1750000, .max = 3500000 },
294         .n = { .min = 1, .max = 3 },
295         .m = { .min = 104, .max = 138 },
296         .m1 = { .min = 17, .max = 23 },
297         .m2 = { .min = 5, .max = 11 },
298         .p = { .min = 28, .max = 112 },
299         .p1 = { .min = 2, .max = 8 },
300         .p2 = { .dot_limit = 0,
301                 .p2_slow = 14, .p2_fast = 14
302         },
303 };
304
305 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
306         .dot = { .min = 80000, .max = 224000 },
307         .vco = { .min = 1750000, .max = 3500000 },
308         .n = { .min = 1, .max = 3 },
309         .m = { .min = 104, .max = 138 },
310         .m1 = { .min = 17, .max = 23 },
311         .m2 = { .min = 5, .max = 11 },
312         .p = { .min = 14, .max = 42 },
313         .p1 = { .min = 2, .max = 6 },
314         .p2 = { .dot_limit = 0,
315                 .p2_slow = 7, .p2_fast = 7
316         },
317 };
318
319 static const struct intel_limit pnv_limits_sdvo = {
320         .dot = { .min = 20000, .max = 400000},
321         .vco = { .min = 1700000, .max = 3500000 },
322         /* Pineview's Ncounter is a ring counter */
323         .n = { .min = 3, .max = 6 },
324         .m = { .min = 2, .max = 256 },
325         /* Pineview only has one combined m divider, which we treat as m2. */
326         .m1 = { .min = 0, .max = 0 },
327         .m2 = { .min = 0, .max = 254 },
328         .p = { .min = 5, .max = 80 },
329         .p1 = { .min = 1, .max = 8 },
330         .p2 = { .dot_limit = 200000,
331                 .p2_slow = 10, .p2_fast = 5 },
332 };
333
334 static const struct intel_limit pnv_limits_lvds = {
335         .dot = { .min = 20000, .max = 400000 },
336         .vco = { .min = 1700000, .max = 3500000 },
337         .n = { .min = 3, .max = 6 },
338         .m = { .min = 2, .max = 256 },
339         .m1 = { .min = 0, .max = 0 },
340         .m2 = { .min = 0, .max = 254 },
341         .p = { .min = 7, .max = 112 },
342         .p1 = { .min = 1, .max = 8 },
343         .p2 = { .dot_limit = 112000,
344                 .p2_slow = 14, .p2_fast = 14 },
345 };
346
347 /* Ironlake / Sandybridge
348  *
349  * We calculate clock using (register_value + 2) for N/M1/M2, so here
350  * the range value for them is (actual_value - 2).
351  */
352 static const struct intel_limit ilk_limits_dac = {
353         .dot = { .min = 25000, .max = 350000 },
354         .vco = { .min = 1760000, .max = 3510000 },
355         .n = { .min = 1, .max = 5 },
356         .m = { .min = 79, .max = 127 },
357         .m1 = { .min = 12, .max = 22 },
358         .m2 = { .min = 5, .max = 9 },
359         .p = { .min = 5, .max = 80 },
360         .p1 = { .min = 1, .max = 8 },
361         .p2 = { .dot_limit = 225000,
362                 .p2_slow = 10, .p2_fast = 5 },
363 };
364
365 static const struct intel_limit ilk_limits_single_lvds = {
366         .dot = { .min = 25000, .max = 350000 },
367         .vco = { .min = 1760000, .max = 3510000 },
368         .n = { .min = 1, .max = 3 },
369         .m = { .min = 79, .max = 118 },
370         .m1 = { .min = 12, .max = 22 },
371         .m2 = { .min = 5, .max = 9 },
372         .p = { .min = 28, .max = 112 },
373         .p1 = { .min = 2, .max = 8 },
374         .p2 = { .dot_limit = 225000,
375                 .p2_slow = 14, .p2_fast = 14 },
376 };
377
378 static const struct intel_limit ilk_limits_dual_lvds = {
379         .dot = { .min = 25000, .max = 350000 },
380         .vco = { .min = 1760000, .max = 3510000 },
381         .n = { .min = 1, .max = 3 },
382         .m = { .min = 79, .max = 127 },
383         .m1 = { .min = 12, .max = 22 },
384         .m2 = { .min = 5, .max = 9 },
385         .p = { .min = 14, .max = 56 },
386         .p1 = { .min = 2, .max = 8 },
387         .p2 = { .dot_limit = 225000,
388                 .p2_slow = 7, .p2_fast = 7 },
389 };
390
391 /* LVDS 100mhz refclk limits. */
392 static const struct intel_limit ilk_limits_single_lvds_100m = {
393         .dot = { .min = 25000, .max = 350000 },
394         .vco = { .min = 1760000, .max = 3510000 },
395         .n = { .min = 1, .max = 2 },
396         .m = { .min = 79, .max = 126 },
397         .m1 = { .min = 12, .max = 22 },
398         .m2 = { .min = 5, .max = 9 },
399         .p = { .min = 28, .max = 112 },
400         .p1 = { .min = 2, .max = 8 },
401         .p2 = { .dot_limit = 225000,
402                 .p2_slow = 14, .p2_fast = 14 },
403 };
404
405 static const struct intel_limit ilk_limits_dual_lvds_100m = {
406         .dot = { .min = 25000, .max = 350000 },
407         .vco = { .min = 1760000, .max = 3510000 },
408         .n = { .min = 1, .max = 3 },
409         .m = { .min = 79, .max = 126 },
410         .m1 = { .min = 12, .max = 22 },
411         .m2 = { .min = 5, .max = 9 },
412         .p = { .min = 14, .max = 42 },
413         .p1 = { .min = 2, .max = 6 },
414         .p2 = { .dot_limit = 225000,
415                 .p2_slow = 7, .p2_fast = 7 },
416 };
417
418 static const struct intel_limit intel_limits_vlv = {
419          /*
420           * These are the data rate limits (measured in fast clocks)
421           * since those are the strictest limits we have. The fast
422           * clock and actual rate limits are more relaxed, so checking
423           * them would make no difference.
424           */
425         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
426         .vco = { .min = 4000000, .max = 6000000 },
427         .n = { .min = 1, .max = 7 },
428         .m1 = { .min = 2, .max = 3 },
429         .m2 = { .min = 11, .max = 156 },
430         .p1 = { .min = 2, .max = 3 },
431         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
432 };
433
434 static const struct intel_limit intel_limits_chv = {
435         /*
436          * These are the data rate limits (measured in fast clocks)
437          * since those are the strictest limits we have.  The fast
438          * clock and actual rate limits are more relaxed, so checking
439          * them would make no difference.
440          */
441         .dot = { .min = 25000 * 5, .max = 540000 * 5},
442         .vco = { .min = 4800000, .max = 6480000 },
443         .n = { .min = 1, .max = 1 },
444         .m1 = { .min = 2, .max = 2 },
445         .m2 = { .min = 24 << 22, .max = 175 << 22 },
446         .p1 = { .min = 2, .max = 4 },
447         .p2 = { .p2_slow = 1, .p2_fast = 14 },
448 };
449
450 static const struct intel_limit intel_limits_bxt = {
451         /* FIXME: find real dot limits */
452         .dot = { .min = 0, .max = INT_MAX },
453         .vco = { .min = 4800000, .max = 6700000 },
454         .n = { .min = 1, .max = 1 },
455         .m1 = { .min = 2, .max = 2 },
456         /* FIXME: find real m2 limits */
457         .m2 = { .min = 2 << 22, .max = 255 << 22 },
458         .p1 = { .min = 2, .max = 4 },
459         .p2 = { .p2_slow = 1, .p2_fast = 20 },
460 };
461
462 /* WA Display #0827: Gen9:all */
463 static void
464 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
465 {
466         if (enable)
467                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
468                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
469         else
470                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
471                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
472 }
473
474 /* Wa_2006604312:icl,ehl */
475 static void
476 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
477                        bool enable)
478 {
479         if (enable)
480                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
481                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
482         else
483                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
484                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
485 }
486
487 static bool
488 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
489 {
490         return crtc_state->master_transcoder != INVALID_TRANSCODER;
491 }
492
493 static bool
494 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
495 {
496         return crtc_state->sync_mode_slaves_mask != 0;
497 }
498
499 bool
500 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
501 {
502         return is_trans_port_sync_master(crtc_state) ||
503                 is_trans_port_sync_slave(crtc_state);
504 }
505
506 /*
507  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
508  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
509  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
510  * The helpers' return value is the rate of the clock that is fed to the
511  * display engine's pipe which can be the above fast dot clock rate or a
512  * divided-down version of it.
513  */
514 /* m1 is reserved as 0 in Pineview, n is a ring counter */
515 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
516 {
517         clock->m = clock->m2 + 2;
518         clock->p = clock->p1 * clock->p2;
519         if (WARN_ON(clock->n == 0 || clock->p == 0))
520                 return 0;
521         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
522         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
523
524         return clock->dot;
525 }
526
527 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
528 {
529         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
530 }
531
532 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
533 {
534         clock->m = i9xx_dpll_compute_m(clock);
535         clock->p = clock->p1 * clock->p2;
536         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
537                 return 0;
538         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
539         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
540
541         return clock->dot;
542 }
543
544 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
545 {
546         clock->m = clock->m1 * clock->m2;
547         clock->p = clock->p1 * clock->p2;
548         if (WARN_ON(clock->n == 0 || clock->p == 0))
549                 return 0;
550         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
551         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
552
553         return clock->dot / 5;
554 }
555
556 int chv_calc_dpll_params(int refclk, struct dpll *clock)
557 {
558         clock->m = clock->m1 * clock->m2;
559         clock->p = clock->p1 * clock->p2;
560         if (WARN_ON(clock->n == 0 || clock->p == 0))
561                 return 0;
562         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
563                                            clock->n << 22);
564         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
565
566         return clock->dot / 5;
567 }
568
569 /*
570  * Returns whether the given set of divisors are valid for a given refclk with
571  * the given connectors.
572  */
573 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
574                                const struct intel_limit *limit,
575                                const struct dpll *clock)
576 {
577         if (clock->n < limit->n.min || limit->n.max < clock->n)
578                 return false;
579         if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
580                 return false;
581         if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
582                 return false;
583         if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
584                 return false;
585
586         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
587             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
588                 if (clock->m1 <= clock->m2)
589                         return false;
590
591         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
592             !IS_GEN9_LP(dev_priv)) {
593                 if (clock->p < limit->p.min || limit->p.max < clock->p)
594                         return false;
595                 if (clock->m < limit->m.min || limit->m.max < clock->m)
596                         return false;
597         }
598
599         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
600                 return false;
601         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
602          * connector, etc., rather than just a single range.
603          */
604         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
605                 return false;
606
607         return true;
608 }
609
610 static int
611 i9xx_select_p2_div(const struct intel_limit *limit,
612                    const struct intel_crtc_state *crtc_state,
613                    int target)
614 {
615         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
616
617         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
618                 /*
619                  * For LVDS just rely on its current settings for dual-channel.
620                  * We haven't figured out how to reliably set up different
621                  * single/dual channel state, if we even can.
622                  */
623                 if (intel_is_dual_link_lvds(dev_priv))
624                         return limit->p2.p2_fast;
625                 else
626                         return limit->p2.p2_slow;
627         } else {
628                 if (target < limit->p2.dot_limit)
629                         return limit->p2.p2_slow;
630                 else
631                         return limit->p2.p2_fast;
632         }
633 }
634
635 /*
636  * Returns a set of divisors for the desired target clock with the given
637  * refclk, or FALSE.  The returned values represent the clock equation:
638  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
639  *
640  * Target and reference clocks are specified in kHz.
641  *
642  * If match_clock is provided, then best_clock P divider must match the P
643  * divider from @match_clock used for LVDS downclocking.
644  */
645 static bool
646 i9xx_find_best_dpll(const struct intel_limit *limit,
647                     struct intel_crtc_state *crtc_state,
648                     int target, int refclk, struct dpll *match_clock,
649                     struct dpll *best_clock)
650 {
651         struct drm_device *dev = crtc_state->uapi.crtc->dev;
652         struct dpll clock;
653         int err = target;
654
655         memset(best_clock, 0, sizeof(*best_clock));
656
657         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
658
659         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
660              clock.m1++) {
661                 for (clock.m2 = limit->m2.min;
662                      clock.m2 <= limit->m2.max; clock.m2++) {
663                         if (clock.m2 >= clock.m1)
664                                 break;
665                         for (clock.n = limit->n.min;
666                              clock.n <= limit->n.max; clock.n++) {
667                                 for (clock.p1 = limit->p1.min;
668                                         clock.p1 <= limit->p1.max; clock.p1++) {
669                                         int this_err;
670
671                                         i9xx_calc_dpll_params(refclk, &clock);
672                                         if (!intel_pll_is_valid(to_i915(dev),
673                                                                 limit,
674                                                                 &clock))
675                                                 continue;
676                                         if (match_clock &&
677                                             clock.p != match_clock->p)
678                                                 continue;
679
680                                         this_err = abs(clock.dot - target);
681                                         if (this_err < err) {
682                                                 *best_clock = clock;
683                                                 err = this_err;
684                                         }
685                                 }
686                         }
687                 }
688         }
689
690         return (err != target);
691 }
692
693 /*
694  * Returns a set of divisors for the desired target clock with the given
695  * refclk, or FALSE.  The returned values represent the clock equation:
696  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
697  *
698  * Target and reference clocks are specified in kHz.
699  *
700  * If match_clock is provided, then best_clock P divider must match the P
701  * divider from @match_clock used for LVDS downclocking.
702  */
703 static bool
704 pnv_find_best_dpll(const struct intel_limit *limit,
705                    struct intel_crtc_state *crtc_state,
706                    int target, int refclk, struct dpll *match_clock,
707                    struct dpll *best_clock)
708 {
709         struct drm_device *dev = crtc_state->uapi.crtc->dev;
710         struct dpll clock;
711         int err = target;
712
713         memset(best_clock, 0, sizeof(*best_clock));
714
715         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
716
717         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
718              clock.m1++) {
719                 for (clock.m2 = limit->m2.min;
720                      clock.m2 <= limit->m2.max; clock.m2++) {
721                         for (clock.n = limit->n.min;
722                              clock.n <= limit->n.max; clock.n++) {
723                                 for (clock.p1 = limit->p1.min;
724                                         clock.p1 <= limit->p1.max; clock.p1++) {
725                                         int this_err;
726
727                                         pnv_calc_dpll_params(refclk, &clock);
728                                         if (!intel_pll_is_valid(to_i915(dev),
729                                                                 limit,
730                                                                 &clock))
731                                                 continue;
732                                         if (match_clock &&
733                                             clock.p != match_clock->p)
734                                                 continue;
735
736                                         this_err = abs(clock.dot - target);
737                                         if (this_err < err) {
738                                                 *best_clock = clock;
739                                                 err = this_err;
740                                         }
741                                 }
742                         }
743                 }
744         }
745
746         return (err != target);
747 }
748
749 /*
750  * Returns a set of divisors for the desired target clock with the given
751  * refclk, or FALSE.  The returned values represent the clock equation:
752  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
753  *
754  * Target and reference clocks are specified in kHz.
755  *
756  * If match_clock is provided, then best_clock P divider must match the P
757  * divider from @match_clock used for LVDS downclocking.
758  */
759 static bool
760 g4x_find_best_dpll(const struct intel_limit *limit,
761                    struct intel_crtc_state *crtc_state,
762                    int target, int refclk, struct dpll *match_clock,
763                    struct dpll *best_clock)
764 {
765         struct drm_device *dev = crtc_state->uapi.crtc->dev;
766         struct dpll clock;
767         int max_n;
768         bool found = false;
769         /* approximately equals target * 0.00585 */
770         int err_most = (target >> 8) + (target >> 9);
771
772         memset(best_clock, 0, sizeof(*best_clock));
773
774         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
775
776         max_n = limit->n.max;
777         /* based on hardware requirement, prefer smaller n to precision */
778         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
779                 /* based on hardware requirement, prefere larger m1,m2 */
780                 for (clock.m1 = limit->m1.max;
781                      clock.m1 >= limit->m1.min; clock.m1--) {
782                         for (clock.m2 = limit->m2.max;
783                              clock.m2 >= limit->m2.min; clock.m2--) {
784                                 for (clock.p1 = limit->p1.max;
785                                      clock.p1 >= limit->p1.min; clock.p1--) {
786                                         int this_err;
787
788                                         i9xx_calc_dpll_params(refclk, &clock);
789                                         if (!intel_pll_is_valid(to_i915(dev),
790                                                                 limit,
791                                                                 &clock))
792                                                 continue;
793
794                                         this_err = abs(clock.dot - target);
795                                         if (this_err < err_most) {
796                                                 *best_clock = clock;
797                                                 err_most = this_err;
798                                                 max_n = clock.n;
799                                                 found = true;
800                                         }
801                                 }
802                         }
803                 }
804         }
805         return found;
806 }
807
808 /*
809  * Check if the calculated PLL configuration is more optimal compared to the
810  * best configuration and error found so far. Return the calculated error.
811  */
812 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
813                                const struct dpll *calculated_clock,
814                                const struct dpll *best_clock,
815                                unsigned int best_error_ppm,
816                                unsigned int *error_ppm)
817 {
818         /*
819          * For CHV ignore the error and consider only the P value.
820          * Prefer a bigger P value based on HW requirements.
821          */
822         if (IS_CHERRYVIEW(to_i915(dev))) {
823                 *error_ppm = 0;
824
825                 return calculated_clock->p > best_clock->p;
826         }
827
828         if (drm_WARN_ON_ONCE(dev, !target_freq))
829                 return false;
830
831         *error_ppm = div_u64(1000000ULL *
832                                 abs(target_freq - calculated_clock->dot),
833                              target_freq);
834         /*
835          * Prefer a better P value over a better (smaller) error if the error
836          * is small. Ensure this preference for future configurations too by
837          * setting the error to 0.
838          */
839         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
840                 *error_ppm = 0;
841
842                 return true;
843         }
844
845         return *error_ppm + 10 < best_error_ppm;
846 }
847
848 /*
849  * Returns a set of divisors for the desired target clock with the given
850  * refclk, or FALSE.  The returned values represent the clock equation:
851  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
852  */
853 static bool
854 vlv_find_best_dpll(const struct intel_limit *limit,
855                    struct intel_crtc_state *crtc_state,
856                    int target, int refclk, struct dpll *match_clock,
857                    struct dpll *best_clock)
858 {
859         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
860         struct drm_device *dev = crtc->base.dev;
861         struct dpll clock;
862         unsigned int bestppm = 1000000;
863         /* min update 19.2 MHz */
864         int max_n = min(limit->n.max, refclk / 19200);
865         bool found = false;
866
867         target *= 5; /* fast clock */
868
869         memset(best_clock, 0, sizeof(*best_clock));
870
871         /* based on hardware requirement, prefer smaller n to precision */
872         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
873                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
874                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
875                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
876                                 clock.p = clock.p1 * clock.p2;
877                                 /* based on hardware requirement, prefer bigger m1,m2 values */
878                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
879                                         unsigned int ppm;
880
881                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
882                                                                      refclk * clock.m1);
883
884                                         vlv_calc_dpll_params(refclk, &clock);
885
886                                         if (!intel_pll_is_valid(to_i915(dev),
887                                                                 limit,
888                                                                 &clock))
889                                                 continue;
890
891                                         if (!vlv_PLL_is_optimal(dev, target,
892                                                                 &clock,
893                                                                 best_clock,
894                                                                 bestppm, &ppm))
895                                                 continue;
896
897                                         *best_clock = clock;
898                                         bestppm = ppm;
899                                         found = true;
900                                 }
901                         }
902                 }
903         }
904
905         return found;
906 }
907
908 /*
909  * Returns a set of divisors for the desired target clock with the given
910  * refclk, or FALSE.  The returned values represent the clock equation:
911  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
912  */
913 static bool
914 chv_find_best_dpll(const struct intel_limit *limit,
915                    struct intel_crtc_state *crtc_state,
916                    int target, int refclk, struct dpll *match_clock,
917                    struct dpll *best_clock)
918 {
919         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
920         struct drm_device *dev = crtc->base.dev;
921         unsigned int best_error_ppm;
922         struct dpll clock;
923         u64 m2;
924         int found = false;
925
926         memset(best_clock, 0, sizeof(*best_clock));
927         best_error_ppm = 1000000;
928
929         /*
930          * Based on hardware doc, the n always set to 1, and m1 always
931          * set to 2.  If requires to support 200Mhz refclk, we need to
932          * revisit this because n may not 1 anymore.
933          */
934         clock.n = 1;
935         clock.m1 = 2;
936         target *= 5;    /* fast clock */
937
938         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
939                 for (clock.p2 = limit->p2.p2_fast;
940                                 clock.p2 >= limit->p2.p2_slow;
941                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
942                         unsigned int error_ppm;
943
944                         clock.p = clock.p1 * clock.p2;
945
946                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
947                                                    refclk * clock.m1);
948
949                         if (m2 > INT_MAX/clock.m1)
950                                 continue;
951
952                         clock.m2 = m2;
953
954                         chv_calc_dpll_params(refclk, &clock);
955
956                         if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
957                                 continue;
958
959                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
960                                                 best_error_ppm, &error_ppm))
961                                 continue;
962
963                         *best_clock = clock;
964                         best_error_ppm = error_ppm;
965                         found = true;
966                 }
967         }
968
969         return found;
970 }
971
972 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
973                         struct dpll *best_clock)
974 {
975         int refclk = 100000;
976         const struct intel_limit *limit = &intel_limits_bxt;
977
978         return chv_find_best_dpll(limit, crtc_state,
979                                   crtc_state->port_clock, refclk,
980                                   NULL, best_clock);
981 }
982
983 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
984                                     enum pipe pipe)
985 {
986         i915_reg_t reg = PIPEDSL(pipe);
987         u32 line1, line2;
988         u32 line_mask;
989
990         if (IS_GEN(dev_priv, 2))
991                 line_mask = DSL_LINEMASK_GEN2;
992         else
993                 line_mask = DSL_LINEMASK_GEN3;
994
995         line1 = intel_de_read(dev_priv, reg) & line_mask;
996         msleep(5);
997         line2 = intel_de_read(dev_priv, reg) & line_mask;
998
999         return line1 != line2;
1000 }
1001
1002 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1003 {
1004         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1005         enum pipe pipe = crtc->pipe;
1006
1007         /* Wait for the display line to settle/start moving */
1008         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1009                 drm_err(&dev_priv->drm,
1010                         "pipe %c scanline %s wait timed out\n",
1011                         pipe_name(pipe), onoff(state));
1012 }
1013
1014 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1015 {
1016         wait_for_pipe_scanline_moving(crtc, false);
1017 }
1018
1019 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1020 {
1021         wait_for_pipe_scanline_moving(crtc, true);
1022 }
1023
1024 static void
1025 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1026 {
1027         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1028         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1029
1030         if (INTEL_GEN(dev_priv) >= 4) {
1031                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1032                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1033
1034                 /* Wait for the Pipe State to go off */
1035                 if (intel_de_wait_for_clear(dev_priv, reg,
1036                                             I965_PIPECONF_ACTIVE, 100))
1037                         drm_WARN(&dev_priv->drm, 1,
1038                                  "pipe_off wait timed out\n");
1039         } else {
1040                 intel_wait_for_pipe_scanline_stopped(crtc);
1041         }
1042 }
1043
1044 /* Only for pre-ILK configs */
1045 void assert_pll(struct drm_i915_private *dev_priv,
1046                 enum pipe pipe, bool state)
1047 {
1048         u32 val;
1049         bool cur_state;
1050
1051         val = intel_de_read(dev_priv, DPLL(pipe));
1052         cur_state = !!(val & DPLL_VCO_ENABLE);
1053         I915_STATE_WARN(cur_state != state,
1054              "PLL state assertion failure (expected %s, current %s)\n",
1055                         onoff(state), onoff(cur_state));
1056 }
1057
1058 /* XXX: the dsi pll is shared between MIPI DSI ports */
1059 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1060 {
1061         u32 val;
1062         bool cur_state;
1063
1064         vlv_cck_get(dev_priv);
1065         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1066         vlv_cck_put(dev_priv);
1067
1068         cur_state = val & DSI_PLL_VCO_EN;
1069         I915_STATE_WARN(cur_state != state,
1070              "DSI PLL state assertion failure (expected %s, current %s)\n",
1071                         onoff(state), onoff(cur_state));
1072 }
1073
1074 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1075                           enum pipe pipe, bool state)
1076 {
1077         bool cur_state;
1078
1079         if (HAS_DDI(dev_priv)) {
1080                 /*
1081                  * DDI does not have a specific FDI_TX register.
1082                  *
1083                  * FDI is never fed from EDP transcoder
1084                  * so pipe->transcoder cast is fine here.
1085                  */
1086                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1087                 u32 val = intel_de_read(dev_priv,
1088                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
1089                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1090         } else {
1091                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1092                 cur_state = !!(val & FDI_TX_ENABLE);
1093         }
1094         I915_STATE_WARN(cur_state != state,
1095              "FDI TX state assertion failure (expected %s, current %s)\n",
1096                         onoff(state), onoff(cur_state));
1097 }
1098 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1099 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1100
1101 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1102                           enum pipe pipe, bool state)
1103 {
1104         u32 val;
1105         bool cur_state;
1106
1107         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1108         cur_state = !!(val & FDI_RX_ENABLE);
1109         I915_STATE_WARN(cur_state != state,
1110              "FDI RX state assertion failure (expected %s, current %s)\n",
1111                         onoff(state), onoff(cur_state));
1112 }
1113 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1114 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1115
1116 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1117                                       enum pipe pipe)
1118 {
1119         u32 val;
1120
1121         /* ILK FDI PLL is always enabled */
1122         if (IS_GEN(dev_priv, 5))
1123                 return;
1124
1125         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1126         if (HAS_DDI(dev_priv))
1127                 return;
1128
1129         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1130         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1131 }
1132
1133 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1134                        enum pipe pipe, bool state)
1135 {
1136         u32 val;
1137         bool cur_state;
1138
1139         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1140         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1141         I915_STATE_WARN(cur_state != state,
1142              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1143                         onoff(state), onoff(cur_state));
1144 }
1145
1146 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1147 {
1148         i915_reg_t pp_reg;
1149         u32 val;
1150         enum pipe panel_pipe = INVALID_PIPE;
1151         bool locked = true;
1152
1153         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1154                 return;
1155
1156         if (HAS_PCH_SPLIT(dev_priv)) {
1157                 u32 port_sel;
1158
1159                 pp_reg = PP_CONTROL(0);
1160                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1161
1162                 switch (port_sel) {
1163                 case PANEL_PORT_SELECT_LVDS:
1164                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1165                         break;
1166                 case PANEL_PORT_SELECT_DPA:
1167                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1168                         break;
1169                 case PANEL_PORT_SELECT_DPC:
1170                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1171                         break;
1172                 case PANEL_PORT_SELECT_DPD:
1173                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1174                         break;
1175                 default:
1176                         MISSING_CASE(port_sel);
1177                         break;
1178                 }
1179         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1180                 /* presumably write lock depends on pipe, not port select */
1181                 pp_reg = PP_CONTROL(pipe);
1182                 panel_pipe = pipe;
1183         } else {
1184                 u32 port_sel;
1185
1186                 pp_reg = PP_CONTROL(0);
1187                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1188
1189                 drm_WARN_ON(&dev_priv->drm,
1190                             port_sel != PANEL_PORT_SELECT_LVDS);
1191                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1192         }
1193
1194         val = intel_de_read(dev_priv, pp_reg);
1195         if (!(val & PANEL_POWER_ON) ||
1196             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1197                 locked = false;
1198
1199         I915_STATE_WARN(panel_pipe == pipe && locked,
1200              "panel assertion failure, pipe %c regs locked\n",
1201              pipe_name(pipe));
1202 }
1203
1204 void assert_pipe(struct drm_i915_private *dev_priv,
1205                  enum transcoder cpu_transcoder, bool state)
1206 {
1207         bool cur_state;
1208         enum intel_display_power_domain power_domain;
1209         intel_wakeref_t wakeref;
1210
1211         /* we keep both pipes enabled on 830 */
1212         if (IS_I830(dev_priv))
1213                 state = true;
1214
1215         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1216         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1217         if (wakeref) {
1218                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1219                 cur_state = !!(val & PIPECONF_ENABLE);
1220
1221                 intel_display_power_put(dev_priv, power_domain, wakeref);
1222         } else {
1223                 cur_state = false;
1224         }
1225
1226         I915_STATE_WARN(cur_state != state,
1227                         "transcoder %s assertion failure (expected %s, current %s)\n",
1228                         transcoder_name(cpu_transcoder),
1229                         onoff(state), onoff(cur_state));
1230 }
1231
1232 static void assert_plane(struct intel_plane *plane, bool state)
1233 {
1234         enum pipe pipe;
1235         bool cur_state;
1236
1237         cur_state = plane->get_hw_state(plane, &pipe);
1238
1239         I915_STATE_WARN(cur_state != state,
1240                         "%s assertion failure (expected %s, current %s)\n",
1241                         plane->base.name, onoff(state), onoff(cur_state));
1242 }
1243
1244 #define assert_plane_enabled(p) assert_plane(p, true)
1245 #define assert_plane_disabled(p) assert_plane(p, false)
1246
1247 static void assert_planes_disabled(struct intel_crtc *crtc)
1248 {
1249         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1250         struct intel_plane *plane;
1251
1252         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1253                 assert_plane_disabled(plane);
1254 }
1255
1256 static void assert_vblank_disabled(struct drm_crtc *crtc)
1257 {
1258         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1259                 drm_crtc_vblank_put(crtc);
1260 }
1261
1262 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1263                                     enum pipe pipe)
1264 {
1265         u32 val;
1266         bool enabled;
1267
1268         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1269         enabled = !!(val & TRANS_ENABLE);
1270         I915_STATE_WARN(enabled,
1271              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1272              pipe_name(pipe));
1273 }
1274
1275 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1276                                    enum pipe pipe, enum port port,
1277                                    i915_reg_t dp_reg)
1278 {
1279         enum pipe port_pipe;
1280         bool state;
1281
1282         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1283
1284         I915_STATE_WARN(state && port_pipe == pipe,
1285                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1286                         port_name(port), pipe_name(pipe));
1287
1288         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1289                         "IBX PCH DP %c still using transcoder B\n",
1290                         port_name(port));
1291 }
1292
1293 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1294                                      enum pipe pipe, enum port port,
1295                                      i915_reg_t hdmi_reg)
1296 {
1297         enum pipe port_pipe;
1298         bool state;
1299
1300         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1301
1302         I915_STATE_WARN(state && port_pipe == pipe,
1303                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1304                         port_name(port), pipe_name(pipe));
1305
1306         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1307                         "IBX PCH HDMI %c still using transcoder B\n",
1308                         port_name(port));
1309 }
1310
1311 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1312                                       enum pipe pipe)
1313 {
1314         enum pipe port_pipe;
1315
1316         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1317         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1318         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1319
1320         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1321                         port_pipe == pipe,
1322                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1323                         pipe_name(pipe));
1324
1325         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1326                         port_pipe == pipe,
1327                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1328                         pipe_name(pipe));
1329
1330         /* PCH SDVOB multiplex with HDMIB */
1331         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1332         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1333         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1334 }
1335
1336 static void _vlv_enable_pll(struct intel_crtc *crtc,
1337                             const struct intel_crtc_state *pipe_config)
1338 {
1339         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1340         enum pipe pipe = crtc->pipe;
1341
1342         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1343         intel_de_posting_read(dev_priv, DPLL(pipe));
1344         udelay(150);
1345
1346         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1347                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1348 }
1349
1350 static void vlv_enable_pll(struct intel_crtc *crtc,
1351                            const struct intel_crtc_state *pipe_config)
1352 {
1353         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1354         enum pipe pipe = crtc->pipe;
1355
1356         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1357
1358         /* PLL is protected by panel, make sure we can write it */
1359         assert_panel_unlocked(dev_priv, pipe);
1360
1361         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1362                 _vlv_enable_pll(crtc, pipe_config);
1363
1364         intel_de_write(dev_priv, DPLL_MD(pipe),
1365                        pipe_config->dpll_hw_state.dpll_md);
1366         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1367 }
1368
1369
1370 static void _chv_enable_pll(struct intel_crtc *crtc,
1371                             const struct intel_crtc_state *pipe_config)
1372 {
1373         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1374         enum pipe pipe = crtc->pipe;
1375         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1376         u32 tmp;
1377
1378         vlv_dpio_get(dev_priv);
1379
1380         /* Enable back the 10bit clock to display controller */
1381         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1382         tmp |= DPIO_DCLKP_EN;
1383         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1384
1385         vlv_dpio_put(dev_priv);
1386
1387         /*
1388          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1389          */
1390         udelay(1);
1391
1392         /* Enable PLL */
1393         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1394
1395         /* Check PLL is locked */
1396         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1397                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1398 }
1399
1400 static void chv_enable_pll(struct intel_crtc *crtc,
1401                            const struct intel_crtc_state *pipe_config)
1402 {
1403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1404         enum pipe pipe = crtc->pipe;
1405
1406         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1407
1408         /* PLL is protected by panel, make sure we can write it */
1409         assert_panel_unlocked(dev_priv, pipe);
1410
1411         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1412                 _chv_enable_pll(crtc, pipe_config);
1413
1414         if (pipe != PIPE_A) {
1415                 /*
1416                  * WaPixelRepeatModeFixForC0:chv
1417                  *
1418                  * DPLLCMD is AWOL. Use chicken bits to propagate
1419                  * the value from DPLLBMD to either pipe B or C.
1420                  */
1421                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1422                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1423                                pipe_config->dpll_hw_state.dpll_md);
1424                 intel_de_write(dev_priv, CBR4_VLV, 0);
1425                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1426
1427                 /*
1428                  * DPLLB VGA mode also seems to cause problems.
1429                  * We should always have it disabled.
1430                  */
1431                 drm_WARN_ON(&dev_priv->drm,
1432                             (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1433                              DPLL_VGA_MODE_DIS) == 0);
1434         } else {
1435                 intel_de_write(dev_priv, DPLL_MD(pipe),
1436                                pipe_config->dpll_hw_state.dpll_md);
1437                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1438         }
1439 }
1440
1441 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1442 {
1443         if (IS_I830(dev_priv))
1444                 return false;
1445
1446         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1447 }
1448
1449 static void i9xx_enable_pll(struct intel_crtc *crtc,
1450                             const struct intel_crtc_state *crtc_state)
1451 {
1452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453         i915_reg_t reg = DPLL(crtc->pipe);
1454         u32 dpll = crtc_state->dpll_hw_state.dpll;
1455         int i;
1456
1457         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1458
1459         /* PLL is protected by panel, make sure we can write it */
1460         if (i9xx_has_pps(dev_priv))
1461                 assert_panel_unlocked(dev_priv, crtc->pipe);
1462
1463         /*
1464          * Apparently we need to have VGA mode enabled prior to changing
1465          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1466          * dividers, even though the register value does change.
1467          */
1468         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1469         intel_de_write(dev_priv, reg, dpll);
1470
1471         /* Wait for the clocks to stabilize. */
1472         intel_de_posting_read(dev_priv, reg);
1473         udelay(150);
1474
1475         if (INTEL_GEN(dev_priv) >= 4) {
1476                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1477                                crtc_state->dpll_hw_state.dpll_md);
1478         } else {
1479                 /* The pixel multiplier can only be updated once the
1480                  * DPLL is enabled and the clocks are stable.
1481                  *
1482                  * So write it again.
1483                  */
1484                 intel_de_write(dev_priv, reg, dpll);
1485         }
1486
1487         /* We do this three times for luck */
1488         for (i = 0; i < 3; i++) {
1489                 intel_de_write(dev_priv, reg, dpll);
1490                 intel_de_posting_read(dev_priv, reg);
1491                 udelay(150); /* wait for warmup */
1492         }
1493 }
1494
1495 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1496 {
1497         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1499         enum pipe pipe = crtc->pipe;
1500
1501         /* Don't disable pipe or pipe PLLs if needed */
1502         if (IS_I830(dev_priv))
1503                 return;
1504
1505         /* Make sure the pipe isn't still relying on us */
1506         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1507
1508         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1509         intel_de_posting_read(dev_priv, DPLL(pipe));
1510 }
1511
1512 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1513 {
1514         u32 val;
1515
1516         /* Make sure the pipe isn't still relying on us */
1517         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1518
1519         val = DPLL_INTEGRATED_REF_CLK_VLV |
1520                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1521         if (pipe != PIPE_A)
1522                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1523
1524         intel_de_write(dev_priv, DPLL(pipe), val);
1525         intel_de_posting_read(dev_priv, DPLL(pipe));
1526 }
1527
1528 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1529 {
1530         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1531         u32 val;
1532
1533         /* Make sure the pipe isn't still relying on us */
1534         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1535
1536         val = DPLL_SSC_REF_CLK_CHV |
1537                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1538         if (pipe != PIPE_A)
1539                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1540
1541         intel_de_write(dev_priv, DPLL(pipe), val);
1542         intel_de_posting_read(dev_priv, DPLL(pipe));
1543
1544         vlv_dpio_get(dev_priv);
1545
1546         /* Disable 10bit clock to display controller */
1547         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1548         val &= ~DPIO_DCLKP_EN;
1549         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1550
1551         vlv_dpio_put(dev_priv);
1552 }
1553
1554 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1555                          struct intel_digital_port *dig_port,
1556                          unsigned int expected_mask)
1557 {
1558         u32 port_mask;
1559         i915_reg_t dpll_reg;
1560
1561         switch (dig_port->base.port) {
1562         case PORT_B:
1563                 port_mask = DPLL_PORTB_READY_MASK;
1564                 dpll_reg = DPLL(0);
1565                 break;
1566         case PORT_C:
1567                 port_mask = DPLL_PORTC_READY_MASK;
1568                 dpll_reg = DPLL(0);
1569                 expected_mask <<= 4;
1570                 break;
1571         case PORT_D:
1572                 port_mask = DPLL_PORTD_READY_MASK;
1573                 dpll_reg = DPIO_PHY_STATUS;
1574                 break;
1575         default:
1576                 BUG();
1577         }
1578
1579         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1580                                        port_mask, expected_mask, 1000))
1581                 drm_WARN(&dev_priv->drm, 1,
1582                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1583                          dig_port->base.base.base.id, dig_port->base.base.name,
1584                          intel_de_read(dev_priv, dpll_reg) & port_mask,
1585                          expected_mask);
1586 }
1587
1588 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1589 {
1590         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1591         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1592         enum pipe pipe = crtc->pipe;
1593         i915_reg_t reg;
1594         u32 val, pipeconf_val;
1595
1596         /* Make sure PCH DPLL is enabled */
1597         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1598
1599         /* FDI must be feeding us bits for PCH ports */
1600         assert_fdi_tx_enabled(dev_priv, pipe);
1601         assert_fdi_rx_enabled(dev_priv, pipe);
1602
1603         if (HAS_PCH_CPT(dev_priv)) {
1604                 reg = TRANS_CHICKEN2(pipe);
1605                 val = intel_de_read(dev_priv, reg);
1606                 /*
1607                  * Workaround: Set the timing override bit
1608                  * before enabling the pch transcoder.
1609                  */
1610                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1611                 /* Configure frame start delay to match the CPU */
1612                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1613                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1614                 intel_de_write(dev_priv, reg, val);
1615         }
1616
1617         reg = PCH_TRANSCONF(pipe);
1618         val = intel_de_read(dev_priv, reg);
1619         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1620
1621         if (HAS_PCH_IBX(dev_priv)) {
1622                 /* Configure frame start delay to match the CPU */
1623                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1624                 val |= TRANS_FRAME_START_DELAY(0);
1625
1626                 /*
1627                  * Make the BPC in transcoder be consistent with
1628                  * that in pipeconf reg. For HDMI we must use 8bpc
1629                  * here for both 8bpc and 12bpc.
1630                  */
1631                 val &= ~PIPECONF_BPC_MASK;
1632                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1633                         val |= PIPECONF_8BPC;
1634                 else
1635                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1636         }
1637
1638         val &= ~TRANS_INTERLACE_MASK;
1639         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1640                 if (HAS_PCH_IBX(dev_priv) &&
1641                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1642                         val |= TRANS_LEGACY_INTERLACED_ILK;
1643                 else
1644                         val |= TRANS_INTERLACED;
1645         } else {
1646                 val |= TRANS_PROGRESSIVE;
1647         }
1648
1649         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1650         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1651                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1652                         pipe_name(pipe));
1653 }
1654
1655 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1656                                       enum transcoder cpu_transcoder)
1657 {
1658         u32 val, pipeconf_val;
1659
1660         /* FDI must be feeding us bits for PCH ports */
1661         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1662         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1663
1664         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1665         /* Workaround: set timing override bit. */
1666         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1667         /* Configure frame start delay to match the CPU */
1668         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1669         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1670         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1671
1672         val = TRANS_ENABLE;
1673         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1674
1675         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1676             PIPECONF_INTERLACED_ILK)
1677                 val |= TRANS_INTERLACED;
1678         else
1679                 val |= TRANS_PROGRESSIVE;
1680
1681         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1682         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1683                                   TRANS_STATE_ENABLE, 100))
1684                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1685 }
1686
1687 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1688                                        enum pipe pipe)
1689 {
1690         i915_reg_t reg;
1691         u32 val;
1692
1693         /* FDI relies on the transcoder */
1694         assert_fdi_tx_disabled(dev_priv, pipe);
1695         assert_fdi_rx_disabled(dev_priv, pipe);
1696
1697         /* Ports must be off as well */
1698         assert_pch_ports_disabled(dev_priv, pipe);
1699
1700         reg = PCH_TRANSCONF(pipe);
1701         val = intel_de_read(dev_priv, reg);
1702         val &= ~TRANS_ENABLE;
1703         intel_de_write(dev_priv, reg, val);
1704         /* wait for PCH transcoder off, transcoder state */
1705         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1706                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1707                         pipe_name(pipe));
1708
1709         if (HAS_PCH_CPT(dev_priv)) {
1710                 /* Workaround: Clear the timing override chicken bit again. */
1711                 reg = TRANS_CHICKEN2(pipe);
1712                 val = intel_de_read(dev_priv, reg);
1713                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1714                 intel_de_write(dev_priv, reg, val);
1715         }
1716 }
1717
1718 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1719 {
1720         u32 val;
1721
1722         val = intel_de_read(dev_priv, LPT_TRANSCONF);
1723         val &= ~TRANS_ENABLE;
1724         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1725         /* wait for PCH transcoder off, transcoder state */
1726         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1727                                     TRANS_STATE_ENABLE, 50))
1728                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1729
1730         /* Workaround: clear timing override bit. */
1731         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1732         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1733         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1734 }
1735
1736 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1737 {
1738         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1739
1740         if (HAS_PCH_LPT(dev_priv))
1741                 return PIPE_A;
1742         else
1743                 return crtc->pipe;
1744 }
1745
1746 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1747 {
1748         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1749         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1750         u32 mode_flags = crtc->mode_flags;
1751
1752         /*
1753          * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
1754          * have updated at the beginning of TE, if we want to use
1755          * the hw counter, then we would find it updated in only
1756          * the next TE, hence switching to sw counter.
1757          */
1758         if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
1759                 return 0;
1760
1761         /*
1762          * On i965gm the hardware frame counter reads
1763          * zero when the TV encoder is enabled :(
1764          */
1765         if (IS_I965GM(dev_priv) &&
1766             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1767                 return 0;
1768
1769         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1770                 return 0xffffffff; /* full 32 bit counter */
1771         else if (INTEL_GEN(dev_priv) >= 3)
1772                 return 0xffffff; /* only 24 bits of frame count */
1773         else
1774                 return 0; /* Gen2 doesn't have a hardware frame counter */
1775 }
1776
1777 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1778 {
1779         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1780
1781         assert_vblank_disabled(&crtc->base);
1782         drm_crtc_set_max_vblank_count(&crtc->base,
1783                                       intel_crtc_max_vblank_count(crtc_state));
1784         drm_crtc_vblank_on(&crtc->base);
1785 }
1786
1787 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1788 {
1789         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1790
1791         drm_crtc_vblank_off(&crtc->base);
1792         assert_vblank_disabled(&crtc->base);
1793 }
1794
1795 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1796 {
1797         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1798         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1799         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1800         enum pipe pipe = crtc->pipe;
1801         i915_reg_t reg;
1802         u32 val;
1803
1804         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1805
1806         assert_planes_disabled(crtc);
1807
1808         /*
1809          * A pipe without a PLL won't actually be able to drive bits from
1810          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1811          * need the check.
1812          */
1813         if (HAS_GMCH(dev_priv)) {
1814                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1815                         assert_dsi_pll_enabled(dev_priv);
1816                 else
1817                         assert_pll_enabled(dev_priv, pipe);
1818         } else {
1819                 if (new_crtc_state->has_pch_encoder) {
1820                         /* if driving the PCH, we need FDI enabled */
1821                         assert_fdi_rx_pll_enabled(dev_priv,
1822                                                   intel_crtc_pch_transcoder(crtc));
1823                         assert_fdi_tx_pll_enabled(dev_priv,
1824                                                   (enum pipe) cpu_transcoder);
1825                 }
1826                 /* FIXME: assert CPU port conditions for SNB+ */
1827         }
1828
1829         trace_intel_pipe_enable(crtc);
1830
1831         reg = PIPECONF(cpu_transcoder);
1832         val = intel_de_read(dev_priv, reg);
1833         if (val & PIPECONF_ENABLE) {
1834                 /* we keep both pipes enabled on 830 */
1835                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1836                 return;
1837         }
1838
1839         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1840         intel_de_posting_read(dev_priv, reg);
1841
1842         /*
1843          * Until the pipe starts PIPEDSL reads will return a stale value,
1844          * which causes an apparent vblank timestamp jump when PIPEDSL
1845          * resets to its proper value. That also messes up the frame count
1846          * when it's derived from the timestamps. So let's wait for the
1847          * pipe to start properly before we call drm_crtc_vblank_on()
1848          */
1849         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1850                 intel_wait_for_pipe_scanline_moving(crtc);
1851 }
1852
1853 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1854 {
1855         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1856         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1857         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1858         enum pipe pipe = crtc->pipe;
1859         i915_reg_t reg;
1860         u32 val;
1861
1862         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1863
1864         /*
1865          * Make sure planes won't keep trying to pump pixels to us,
1866          * or we might hang the display.
1867          */
1868         assert_planes_disabled(crtc);
1869
1870         trace_intel_pipe_disable(crtc);
1871
1872         reg = PIPECONF(cpu_transcoder);
1873         val = intel_de_read(dev_priv, reg);
1874         if ((val & PIPECONF_ENABLE) == 0)
1875                 return;
1876
1877         /*
1878          * Double wide has implications for planes
1879          * so best keep it disabled when not needed.
1880          */
1881         if (old_crtc_state->double_wide)
1882                 val &= ~PIPECONF_DOUBLE_WIDE;
1883
1884         /* Don't disable pipe or pipe PLLs if needed */
1885         if (!IS_I830(dev_priv))
1886                 val &= ~PIPECONF_ENABLE;
1887
1888         intel_de_write(dev_priv, reg, val);
1889         if ((val & PIPECONF_ENABLE) == 0)
1890                 intel_wait_for_pipe_off(old_crtc_state);
1891 }
1892
1893 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1894 {
1895         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1896 }
1897
1898 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1899 {
1900         if (!is_ccs_modifier(fb->modifier))
1901                 return false;
1902
1903         return plane >= fb->format->num_planes / 2;
1904 }
1905
1906 static bool is_gen12_ccs_modifier(u64 modifier)
1907 {
1908         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1909                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1910
1911 }
1912
1913 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1914 {
1915         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1916 }
1917
1918 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1919 {
1920         if (is_ccs_modifier(fb->modifier))
1921                 return is_ccs_plane(fb, plane);
1922
1923         return plane == 1;
1924 }
1925
1926 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1927 {
1928         drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1929                     (main_plane && main_plane >= fb->format->num_planes / 2));
1930
1931         return fb->format->num_planes / 2 + main_plane;
1932 }
1933
1934 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1935 {
1936         drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1937                     ccs_plane < fb->format->num_planes / 2);
1938
1939         return ccs_plane - fb->format->num_planes / 2;
1940 }
1941
1942 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1943 {
1944         struct drm_i915_private *i915 = to_i915(fb->dev);
1945
1946         if (is_ccs_modifier(fb->modifier))
1947                 return main_to_ccs_plane(fb, main_plane);
1948         else if (INTEL_GEN(i915) < 11 &&
1949                  intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
1950                 return 1;
1951         else
1952                 return 0;
1953 }
1954
1955 bool
1956 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1957                                     uint64_t modifier)
1958 {
1959         return info->is_yuv &&
1960                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1961 }
1962
1963 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
1964                                    int color_plane)
1965 {
1966         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
1967                color_plane == 1;
1968 }
1969
1970 static unsigned int
1971 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1972 {
1973         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1974         unsigned int cpp = fb->format->cpp[color_plane];
1975
1976         switch (fb->modifier) {
1977         case DRM_FORMAT_MOD_LINEAR:
1978                 return intel_tile_size(dev_priv);
1979         case I915_FORMAT_MOD_X_TILED:
1980                 if (IS_GEN(dev_priv, 2))
1981                         return 128;
1982                 else
1983                         return 512;
1984         case I915_FORMAT_MOD_Y_TILED_CCS:
1985                 if (is_ccs_plane(fb, color_plane))
1986                         return 128;
1987                 fallthrough;
1988         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1989         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1990                 if (is_ccs_plane(fb, color_plane))
1991                         return 64;
1992                 fallthrough;
1993         case I915_FORMAT_MOD_Y_TILED:
1994                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1995                         return 128;
1996                 else
1997                         return 512;
1998         case I915_FORMAT_MOD_Yf_TILED_CCS:
1999                 if (is_ccs_plane(fb, color_plane))
2000                         return 128;
2001                 fallthrough;
2002         case I915_FORMAT_MOD_Yf_TILED:
2003                 switch (cpp) {
2004                 case 1:
2005                         return 64;
2006                 case 2:
2007                 case 4:
2008                         return 128;
2009                 case 8:
2010                 case 16:
2011                         return 256;
2012                 default:
2013                         MISSING_CASE(cpp);
2014                         return cpp;
2015                 }
2016                 break;
2017         default:
2018                 MISSING_CASE(fb->modifier);
2019                 return cpp;
2020         }
2021 }
2022
2023 static unsigned int
2024 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2025 {
2026         if (is_gen12_ccs_plane(fb, color_plane))
2027                 return 1;
2028
2029         return intel_tile_size(to_i915(fb->dev)) /
2030                 intel_tile_width_bytes(fb, color_plane);
2031 }
2032
2033 /* Return the tile dimensions in pixel units */
2034 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2035                             unsigned int *tile_width,
2036                             unsigned int *tile_height)
2037 {
2038         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2039         unsigned int cpp = fb->format->cpp[color_plane];
2040
2041         *tile_width = tile_width_bytes / cpp;
2042         *tile_height = intel_tile_height(fb, color_plane);
2043 }
2044
2045 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2046                                         int color_plane)
2047 {
2048         unsigned int tile_width, tile_height;
2049
2050         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2051
2052         return fb->pitches[color_plane] * tile_height;
2053 }
2054
2055 unsigned int
2056 intel_fb_align_height(const struct drm_framebuffer *fb,
2057                       int color_plane, unsigned int height)
2058 {
2059         unsigned int tile_height = intel_tile_height(fb, color_plane);
2060
2061         return ALIGN(height, tile_height);
2062 }
2063
2064 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2065 {
2066         unsigned int size = 0;
2067         int i;
2068
2069         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2070                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2071
2072         return size;
2073 }
2074
2075 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2076 {
2077         unsigned int size = 0;
2078         int i;
2079
2080         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2081                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2082
2083         return size;
2084 }
2085
2086 static void
2087 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2088                         const struct drm_framebuffer *fb,
2089                         unsigned int rotation)
2090 {
2091         view->type = I915_GGTT_VIEW_NORMAL;
2092         if (drm_rotation_90_or_270(rotation)) {
2093                 view->type = I915_GGTT_VIEW_ROTATED;
2094                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2095         }
2096 }
2097
2098 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2099 {
2100         if (IS_I830(dev_priv))
2101                 return 16 * 1024;
2102         else if (IS_I85X(dev_priv))
2103                 return 256;
2104         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2105                 return 32;
2106         else
2107                 return 4 * 1024;
2108 }
2109
2110 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2111 {
2112         if (INTEL_GEN(dev_priv) >= 9)
2113                 return 256 * 1024;
2114         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2115                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2116                 return 128 * 1024;
2117         else if (INTEL_GEN(dev_priv) >= 4)
2118                 return 4 * 1024;
2119         else
2120                 return 0;
2121 }
2122
2123 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2124                                          int color_plane)
2125 {
2126         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2127
2128         /* AUX_DIST needs only 4K alignment */
2129         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2130             is_ccs_plane(fb, color_plane))
2131                 return 4096;
2132
2133         switch (fb->modifier) {
2134         case DRM_FORMAT_MOD_LINEAR:
2135                 return intel_linear_alignment(dev_priv);
2136         case I915_FORMAT_MOD_X_TILED:
2137                 if (INTEL_GEN(dev_priv) >= 9)
2138                         return 256 * 1024;
2139                 return 0;
2140         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2141                 if (is_semiplanar_uv_plane(fb, color_plane))
2142                         return intel_tile_row_size(fb, color_plane);
2143                 fallthrough;
2144         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2145                 return 16 * 1024;
2146         case I915_FORMAT_MOD_Y_TILED_CCS:
2147         case I915_FORMAT_MOD_Yf_TILED_CCS:
2148         case I915_FORMAT_MOD_Y_TILED:
2149                 if (INTEL_GEN(dev_priv) >= 12 &&
2150                     is_semiplanar_uv_plane(fb, color_plane))
2151                         return intel_tile_row_size(fb, color_plane);
2152                 fallthrough;
2153         case I915_FORMAT_MOD_Yf_TILED:
2154                 return 1 * 1024 * 1024;
2155         default:
2156                 MISSING_CASE(fb->modifier);
2157                 return 0;
2158         }
2159 }
2160
2161 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2162 {
2163         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2164         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2165
2166         return INTEL_GEN(dev_priv) < 4 ||
2167                 (plane->has_fbc &&
2168                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2169 }
2170
2171 struct i915_vma *
2172 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2173                            const struct i915_ggtt_view *view,
2174                            bool uses_fence,
2175                            unsigned long *out_flags)
2176 {
2177         struct drm_device *dev = fb->dev;
2178         struct drm_i915_private *dev_priv = to_i915(dev);
2179         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2180         intel_wakeref_t wakeref;
2181         struct i915_vma *vma;
2182         unsigned int pinctl;
2183         u32 alignment;
2184
2185         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2186                 return ERR_PTR(-EINVAL);
2187
2188         alignment = intel_surf_alignment(fb, 0);
2189         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2190                 return ERR_PTR(-EINVAL);
2191
2192         /* Note that the w/a also requires 64 PTE of padding following the
2193          * bo. We currently fill all unused PTE with the shadow page and so
2194          * we should always have valid PTE following the scanout preventing
2195          * the VT-d warning.
2196          */
2197         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2198                 alignment = 256 * 1024;
2199
2200         /*
2201          * Global gtt pte registers are special registers which actually forward
2202          * writes to a chunk of system memory. Which means that there is no risk
2203          * that the register values disappear as soon as we call
2204          * intel_runtime_pm_put(), so it is correct to wrap only the
2205          * pin/unpin/fence and not more.
2206          */
2207         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2208
2209         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2210
2211         /*
2212          * Valleyview is definitely limited to scanning out the first
2213          * 512MiB. Lets presume this behaviour was inherited from the
2214          * g4x display engine and that all earlier gen are similarly
2215          * limited. Testing suggests that it is a little more
2216          * complicated than this. For example, Cherryview appears quite
2217          * happy to scanout from anywhere within its global aperture.
2218          */
2219         pinctl = 0;
2220         if (HAS_GMCH(dev_priv))
2221                 pinctl |= PIN_MAPPABLE;
2222
2223         vma = i915_gem_object_pin_to_display_plane(obj,
2224                                                    alignment, view, pinctl);
2225         if (IS_ERR(vma))
2226                 goto err;
2227
2228         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2229                 int ret;
2230
2231                 /*
2232                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2233                  * fence, whereas 965+ only requires a fence if using
2234                  * framebuffer compression.  For simplicity, we always, when
2235                  * possible, install a fence as the cost is not that onerous.
2236                  *
2237                  * If we fail to fence the tiled scanout, then either the
2238                  * modeset will reject the change (which is highly unlikely as
2239                  * the affected systems, all but one, do not have unmappable
2240                  * space) or we will not be able to enable full powersaving
2241                  * techniques (also likely not to apply due to various limits
2242                  * FBC and the like impose on the size of the buffer, which
2243                  * presumably we violated anyway with this unmappable buffer).
2244                  * Anyway, it is presumably better to stumble onwards with
2245                  * something and try to run the system in a "less than optimal"
2246                  * mode that matches the user configuration.
2247                  */
2248                 ret = i915_vma_pin_fence(vma);
2249                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2250                         i915_gem_object_unpin_from_display_plane(vma);
2251                         vma = ERR_PTR(ret);
2252                         goto err;
2253                 }
2254
2255                 if (ret == 0 && vma->fence)
2256                         *out_flags |= PLANE_HAS_FENCE;
2257         }
2258
2259         i915_vma_get(vma);
2260 err:
2261         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2262         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2263         return vma;
2264 }
2265
2266 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2267 {
2268         i915_gem_object_lock(vma->obj, NULL);
2269         if (flags & PLANE_HAS_FENCE)
2270                 i915_vma_unpin_fence(vma);
2271         i915_gem_object_unpin_from_display_plane(vma);
2272         i915_gem_object_unlock(vma->obj);
2273
2274         i915_vma_put(vma);
2275 }
2276
2277 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2278                           unsigned int rotation)
2279 {
2280         if (drm_rotation_90_or_270(rotation))
2281                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2282         else
2283                 return fb->pitches[color_plane];
2284 }
2285
2286 /*
2287  * Convert the x/y offsets into a linear offset.
2288  * Only valid with 0/180 degree rotation, which is fine since linear
2289  * offset is only used with linear buffers on pre-hsw and tiled buffers
2290  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2291  */
2292 u32 intel_fb_xy_to_linear(int x, int y,
2293                           const struct intel_plane_state *state,
2294                           int color_plane)
2295 {
2296         const struct drm_framebuffer *fb = state->hw.fb;
2297         unsigned int cpp = fb->format->cpp[color_plane];
2298         unsigned int pitch = state->color_plane[color_plane].stride;
2299
2300         return y * pitch + x * cpp;
2301 }
2302
2303 /*
2304  * Add the x/y offsets derived from fb->offsets[] to the user
2305  * specified plane src x/y offsets. The resulting x/y offsets
2306  * specify the start of scanout from the beginning of the gtt mapping.
2307  */
2308 void intel_add_fb_offsets(int *x, int *y,
2309                           const struct intel_plane_state *state,
2310                           int color_plane)
2311
2312 {
2313         *x += state->color_plane[color_plane].x;
2314         *y += state->color_plane[color_plane].y;
2315 }
2316
2317 static u32 intel_adjust_tile_offset(int *x, int *y,
2318                                     unsigned int tile_width,
2319                                     unsigned int tile_height,
2320                                     unsigned int tile_size,
2321                                     unsigned int pitch_tiles,
2322                                     u32 old_offset,
2323                                     u32 new_offset)
2324 {
2325         unsigned int pitch_pixels = pitch_tiles * tile_width;
2326         unsigned int tiles;
2327
2328         WARN_ON(old_offset & (tile_size - 1));
2329         WARN_ON(new_offset & (tile_size - 1));
2330         WARN_ON(new_offset > old_offset);
2331
2332         tiles = (old_offset - new_offset) / tile_size;
2333
2334         *y += tiles / pitch_tiles * tile_height;
2335         *x += tiles % pitch_tiles * tile_width;
2336
2337         /* minimize x in case it got needlessly big */
2338         *y += *x / pitch_pixels * tile_height;
2339         *x %= pitch_pixels;
2340
2341         return new_offset;
2342 }
2343
2344 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2345 {
2346         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2347                is_gen12_ccs_plane(fb, color_plane);
2348 }
2349
2350 static u32 intel_adjust_aligned_offset(int *x, int *y,
2351                                        const struct drm_framebuffer *fb,
2352                                        int color_plane,
2353                                        unsigned int rotation,
2354                                        unsigned int pitch,
2355                                        u32 old_offset, u32 new_offset)
2356 {
2357         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2358         unsigned int cpp = fb->format->cpp[color_plane];
2359
2360         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2361
2362         if (!is_surface_linear(fb, color_plane)) {
2363                 unsigned int tile_size, tile_width, tile_height;
2364                 unsigned int pitch_tiles;
2365
2366                 tile_size = intel_tile_size(dev_priv);
2367                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2368
2369                 if (drm_rotation_90_or_270(rotation)) {
2370                         pitch_tiles = pitch / tile_height;
2371                         swap(tile_width, tile_height);
2372                 } else {
2373                         pitch_tiles = pitch / (tile_width * cpp);
2374                 }
2375
2376                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2377                                          tile_size, pitch_tiles,
2378                                          old_offset, new_offset);
2379         } else {
2380                 old_offset += *y * pitch + *x * cpp;
2381
2382                 *y = (old_offset - new_offset) / pitch;
2383                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2384         }
2385
2386         return new_offset;
2387 }
2388
2389 /*
2390  * Adjust the tile offset by moving the difference into
2391  * the x/y offsets.
2392  */
2393 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2394                                              const struct intel_plane_state *state,
2395                                              int color_plane,
2396                                              u32 old_offset, u32 new_offset)
2397 {
2398         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2399                                            state->hw.rotation,
2400                                            state->color_plane[color_plane].stride,
2401                                            old_offset, new_offset);
2402 }
2403
2404 /*
2405  * Computes the aligned offset to the base tile and adjusts
2406  * x, y. bytes per pixel is assumed to be a power-of-two.
2407  *
2408  * In the 90/270 rotated case, x and y are assumed
2409  * to be already rotated to match the rotated GTT view, and
2410  * pitch is the tile_height aligned framebuffer height.
2411  *
2412  * This function is used when computing the derived information
2413  * under intel_framebuffer, so using any of that information
2414  * here is not allowed. Anything under drm_framebuffer can be
2415  * used. This is why the user has to pass in the pitch since it
2416  * is specified in the rotated orientation.
2417  */
2418 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2419                                         int *x, int *y,
2420                                         const struct drm_framebuffer *fb,
2421                                         int color_plane,
2422                                         unsigned int pitch,
2423                                         unsigned int rotation,
2424                                         u32 alignment)
2425 {
2426         unsigned int cpp = fb->format->cpp[color_plane];
2427         u32 offset, offset_aligned;
2428
2429         if (!is_surface_linear(fb, color_plane)) {
2430                 unsigned int tile_size, tile_width, tile_height;
2431                 unsigned int tile_rows, tiles, pitch_tiles;
2432
2433                 tile_size = intel_tile_size(dev_priv);
2434                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2435
2436                 if (drm_rotation_90_or_270(rotation)) {
2437                         pitch_tiles = pitch / tile_height;
2438                         swap(tile_width, tile_height);
2439                 } else {
2440                         pitch_tiles = pitch / (tile_width * cpp);
2441                 }
2442
2443                 tile_rows = *y / tile_height;
2444                 *y %= tile_height;
2445
2446                 tiles = *x / tile_width;
2447                 *x %= tile_width;
2448
2449                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2450
2451                 offset_aligned = offset;
2452                 if (alignment)
2453                         offset_aligned = rounddown(offset_aligned, alignment);
2454
2455                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2456                                          tile_size, pitch_tiles,
2457                                          offset, offset_aligned);
2458         } else {
2459                 offset = *y * pitch + *x * cpp;
2460                 offset_aligned = offset;
2461                 if (alignment) {
2462                         offset_aligned = rounddown(offset_aligned, alignment);
2463                         *y = (offset % alignment) / pitch;
2464                         *x = ((offset % alignment) - *y * pitch) / cpp;
2465                 } else {
2466                         *y = *x = 0;
2467                 }
2468         }
2469
2470         return offset_aligned;
2471 }
2472
2473 u32 intel_plane_compute_aligned_offset(int *x, int *y,
2474                                        const struct intel_plane_state *state,
2475                                        int color_plane)
2476 {
2477         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2478         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2479         const struct drm_framebuffer *fb = state->hw.fb;
2480         unsigned int rotation = state->hw.rotation;
2481         int pitch = state->color_plane[color_plane].stride;
2482         u32 alignment;
2483
2484         if (intel_plane->id == PLANE_CURSOR)
2485                 alignment = intel_cursor_alignment(dev_priv);
2486         else
2487                 alignment = intel_surf_alignment(fb, color_plane);
2488
2489         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2490                                             pitch, rotation, alignment);
2491 }
2492
2493 /* Convert the fb->offset[] into x/y offsets */
2494 static int intel_fb_offset_to_xy(int *x, int *y,
2495                                  const struct drm_framebuffer *fb,
2496                                  int color_plane)
2497 {
2498         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2499         unsigned int height;
2500         u32 alignment;
2501
2502         if (INTEL_GEN(dev_priv) >= 12 &&
2503             is_semiplanar_uv_plane(fb, color_plane))
2504                 alignment = intel_tile_row_size(fb, color_plane);
2505         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2506                 alignment = intel_tile_size(dev_priv);
2507         else
2508                 alignment = 0;
2509
2510         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2511                 drm_dbg_kms(&dev_priv->drm,
2512                             "Misaligned offset 0x%08x for color plane %d\n",
2513                             fb->offsets[color_plane], color_plane);
2514                 return -EINVAL;
2515         }
2516
2517         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2518         height = ALIGN(height, intel_tile_height(fb, color_plane));
2519
2520         /* Catch potential overflows early */
2521         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2522                             fb->offsets[color_plane])) {
2523                 drm_dbg_kms(&dev_priv->drm,
2524                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
2525                             fb->offsets[color_plane], fb->pitches[color_plane],
2526                             color_plane);
2527                 return -ERANGE;
2528         }
2529
2530         *x = 0;
2531         *y = 0;
2532
2533         intel_adjust_aligned_offset(x, y,
2534                                     fb, color_plane, DRM_MODE_ROTATE_0,
2535                                     fb->pitches[color_plane],
2536                                     fb->offsets[color_plane], 0);
2537
2538         return 0;
2539 }
2540
2541 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2542 {
2543         switch (fb_modifier) {
2544         case I915_FORMAT_MOD_X_TILED:
2545                 return I915_TILING_X;
2546         case I915_FORMAT_MOD_Y_TILED:
2547         case I915_FORMAT_MOD_Y_TILED_CCS:
2548         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2549         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2550                 return I915_TILING_Y;
2551         default:
2552                 return I915_TILING_NONE;
2553         }
2554 }
2555
2556 /*
2557  * From the Sky Lake PRM:
2558  * "The Color Control Surface (CCS) contains the compression status of
2559  *  the cache-line pairs. The compression state of the cache-line pair
2560  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2561  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2562  *  cache-line-pairs. CCS is always Y tiled."
2563  *
2564  * Since cache line pairs refers to horizontally adjacent cache lines,
2565  * each cache line in the CCS corresponds to an area of 32x16 cache
2566  * lines on the main surface. Since each pixel is 4 bytes, this gives
2567  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2568  * main surface.
2569  */
2570 static const struct drm_format_info skl_ccs_formats[] = {
2571         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2572           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2573         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2574           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2575         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2576           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2577         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2578           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2579 };
2580
2581 /*
2582  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2583  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2584  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2585  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2586  * the main surface.
2587  */
2588 static const struct drm_format_info gen12_ccs_formats[] = {
2589         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2590           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2591           .hsub = 1, .vsub = 1, },
2592         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2593           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2594           .hsub = 1, .vsub = 1, },
2595         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2596           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2597           .hsub = 1, .vsub = 1, .has_alpha = true },
2598         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2599           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2600           .hsub = 1, .vsub = 1, .has_alpha = true },
2601         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2602           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2603           .hsub = 2, .vsub = 1, .is_yuv = true },
2604         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2605           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2606           .hsub = 2, .vsub = 1, .is_yuv = true },
2607         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2608           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2609           .hsub = 2, .vsub = 1, .is_yuv = true },
2610         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2611           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2612           .hsub = 2, .vsub = 1, .is_yuv = true },
2613         { .format = DRM_FORMAT_NV12, .num_planes = 4,
2614           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2615           .hsub = 2, .vsub = 2, .is_yuv = true },
2616         { .format = DRM_FORMAT_P010, .num_planes = 4,
2617           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2618           .hsub = 2, .vsub = 2, .is_yuv = true },
2619         { .format = DRM_FORMAT_P012, .num_planes = 4,
2620           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2621           .hsub = 2, .vsub = 2, .is_yuv = true },
2622         { .format = DRM_FORMAT_P016, .num_planes = 4,
2623           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2624           .hsub = 2, .vsub = 2, .is_yuv = true },
2625 };
2626
2627 static const struct drm_format_info *
2628 lookup_format_info(const struct drm_format_info formats[],
2629                    int num_formats, u32 format)
2630 {
2631         int i;
2632
2633         for (i = 0; i < num_formats; i++) {
2634                 if (formats[i].format == format)
2635                         return &formats[i];
2636         }
2637
2638         return NULL;
2639 }
2640
2641 static const struct drm_format_info *
2642 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2643 {
2644         switch (cmd->modifier[0]) {
2645         case I915_FORMAT_MOD_Y_TILED_CCS:
2646         case I915_FORMAT_MOD_Yf_TILED_CCS:
2647                 return lookup_format_info(skl_ccs_formats,
2648                                           ARRAY_SIZE(skl_ccs_formats),
2649                                           cmd->pixel_format);
2650         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2651         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2652                 return lookup_format_info(gen12_ccs_formats,
2653                                           ARRAY_SIZE(gen12_ccs_formats),
2654                                           cmd->pixel_format);
2655         default:
2656                 return NULL;
2657         }
2658 }
2659
2660 bool is_ccs_modifier(u64 modifier)
2661 {
2662         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2663                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2664                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2665                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2666 }
2667
2668 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2669 {
2670         return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2671                             512) * 64;
2672 }
2673
2674 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2675                               u32 pixel_format, u64 modifier)
2676 {
2677         struct intel_crtc *crtc;
2678         struct intel_plane *plane;
2679
2680         /*
2681          * We assume the primary plane for pipe A has
2682          * the highest stride limits of them all,
2683          * if in case pipe A is disabled, use the first pipe from pipe_mask.
2684          */
2685         crtc = intel_get_first_crtc(dev_priv);
2686         if (!crtc)
2687                 return 0;
2688
2689         plane = to_intel_plane(crtc->base.primary);
2690
2691         return plane->max_stride(plane, pixel_format, modifier,
2692                                  DRM_MODE_ROTATE_0);
2693 }
2694
2695 static
2696 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2697                         u32 pixel_format, u64 modifier)
2698 {
2699         /*
2700          * Arbitrary limit for gen4+ chosen to match the
2701          * render engine max stride.
2702          *
2703          * The new CCS hash mode makes remapping impossible
2704          */
2705         if (!is_ccs_modifier(modifier)) {
2706                 if (INTEL_GEN(dev_priv) >= 7)
2707                         return 256*1024;
2708                 else if (INTEL_GEN(dev_priv) >= 4)
2709                         return 128*1024;
2710         }
2711
2712         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2713 }
2714
2715 static u32
2716 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2717 {
2718         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2719         u32 tile_width;
2720
2721         if (is_surface_linear(fb, color_plane)) {
2722                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2723                                                            fb->format->format,
2724                                                            fb->modifier);
2725
2726                 /*
2727                  * To make remapping with linear generally feasible
2728                  * we need the stride to be page aligned.
2729                  */
2730                 if (fb->pitches[color_plane] > max_stride &&
2731                     !is_ccs_modifier(fb->modifier))
2732                         return intel_tile_size(dev_priv);
2733                 else
2734                         return 64;
2735         }
2736
2737         tile_width = intel_tile_width_bytes(fb, color_plane);
2738         if (is_ccs_modifier(fb->modifier)) {
2739                 /*
2740                  * Display WA #0531: skl,bxt,kbl,glk
2741                  *
2742                  * Render decompression and plane width > 3840
2743                  * combined with horizontal panning requires the
2744                  * plane stride to be a multiple of 4. We'll just
2745                  * require the entire fb to accommodate that to avoid
2746                  * potential runtime errors at plane configuration time.
2747                  */
2748                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2749                         tile_width *= 4;
2750                 /*
2751                  * The main surface pitch must be padded to a multiple of four
2752                  * tile widths.
2753                  */
2754                 else if (INTEL_GEN(dev_priv) >= 12)
2755                         tile_width *= 4;
2756         }
2757         return tile_width;
2758 }
2759
2760 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2761 {
2762         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2763         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2764         const struct drm_framebuffer *fb = plane_state->hw.fb;
2765         int i;
2766
2767         /* We don't want to deal with remapping with cursors */
2768         if (plane->id == PLANE_CURSOR)
2769                 return false;
2770
2771         /*
2772          * The display engine limits already match/exceed the
2773          * render engine limits, so not much point in remapping.
2774          * Would also need to deal with the fence POT alignment
2775          * and gen2 2KiB GTT tile size.
2776          */
2777         if (INTEL_GEN(dev_priv) < 4)
2778                 return false;
2779
2780         /*
2781          * The new CCS hash mode isn't compatible with remapping as
2782          * the virtual address of the pages affects the compressed data.
2783          */
2784         if (is_ccs_modifier(fb->modifier))
2785                 return false;
2786
2787         /* Linear needs a page aligned stride for remapping */
2788         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2789                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2790
2791                 for (i = 0; i < fb->format->num_planes; i++) {
2792                         if (fb->pitches[i] & alignment)
2793                                 return false;
2794                 }
2795         }
2796
2797         return true;
2798 }
2799
2800 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2801 {
2802         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2803         const struct drm_framebuffer *fb = plane_state->hw.fb;
2804         unsigned int rotation = plane_state->hw.rotation;
2805         u32 stride, max_stride;
2806
2807         /*
2808          * No remapping for invisible planes since we don't have
2809          * an actual source viewport to remap.
2810          */
2811         if (!plane_state->uapi.visible)
2812                 return false;
2813
2814         if (!intel_plane_can_remap(plane_state))
2815                 return false;
2816
2817         /*
2818          * FIXME: aux plane limits on gen9+ are
2819          * unclear in Bspec, for now no checking.
2820          */
2821         stride = intel_fb_pitch(fb, 0, rotation);
2822         max_stride = plane->max_stride(plane, fb->format->format,
2823                                        fb->modifier, rotation);
2824
2825         return stride > max_stride;
2826 }
2827
2828 static void
2829 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2830                                const struct drm_framebuffer *fb,
2831                                int color_plane)
2832 {
2833         int main_plane;
2834
2835         if (color_plane == 0) {
2836                 *hsub = 1;
2837                 *vsub = 1;
2838
2839                 return;
2840         }
2841
2842         /*
2843          * TODO: Deduct the subsampling from the char block for all CCS
2844          * formats and planes.
2845          */
2846         if (!is_gen12_ccs_plane(fb, color_plane)) {
2847                 *hsub = fb->format->hsub;
2848                 *vsub = fb->format->vsub;
2849
2850                 return;
2851         }
2852
2853         main_plane = ccs_to_main_plane(fb, color_plane);
2854         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2855                 drm_format_info_block_width(fb->format, main_plane);
2856
2857         /*
2858          * The min stride check in the core framebuffer_check() function
2859          * assumes that format->hsub applies to every plane except for the
2860          * first plane. That's incorrect for the CCS AUX plane of the first
2861          * plane, but for the above check to pass we must define the block
2862          * width with that subsampling applied to it. Adjust the width here
2863          * accordingly, so we can calculate the actual subsampling factor.
2864          */
2865         if (main_plane == 0)
2866                 *hsub *= fb->format->hsub;
2867
2868         *vsub = 32;
2869 }
2870 static int
2871 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2872 {
2873         struct drm_i915_private *i915 = to_i915(fb->dev);
2874         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2875         int main_plane;
2876         int hsub, vsub;
2877         int tile_width, tile_height;
2878         int ccs_x, ccs_y;
2879         int main_x, main_y;
2880
2881         if (!is_ccs_plane(fb, ccs_plane))
2882                 return 0;
2883
2884         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2885         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2886
2887         tile_width *= hsub;
2888         tile_height *= vsub;
2889
2890         ccs_x = (x * hsub) % tile_width;
2891         ccs_y = (y * vsub) % tile_height;
2892
2893         main_plane = ccs_to_main_plane(fb, ccs_plane);
2894         main_x = intel_fb->normal[main_plane].x % tile_width;
2895         main_y = intel_fb->normal[main_plane].y % tile_height;
2896
2897         /*
2898          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2899          * x/y offsets must match between CCS and the main surface.
2900          */
2901         if (main_x != ccs_x || main_y != ccs_y) {
2902                 drm_dbg_kms(&i915->drm,
2903                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2904                               main_x, main_y,
2905                               ccs_x, ccs_y,
2906                               intel_fb->normal[main_plane].x,
2907                               intel_fb->normal[main_plane].y,
2908                               x, y);
2909                 return -EINVAL;
2910         }
2911
2912         return 0;
2913 }
2914
2915 static void
2916 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2917 {
2918         int main_plane = is_ccs_plane(fb, color_plane) ?
2919                          ccs_to_main_plane(fb, color_plane) : 0;
2920         int main_hsub, main_vsub;
2921         int hsub, vsub;
2922
2923         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2924         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2925         *w = fb->width / main_hsub / hsub;
2926         *h = fb->height / main_vsub / vsub;
2927 }
2928
2929 /*
2930  * Setup the rotated view for an FB plane and return the size the GTT mapping
2931  * requires for this view.
2932  */
2933 static u32
2934 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2935                   u32 gtt_offset_rotated, int x, int y,
2936                   unsigned int width, unsigned int height,
2937                   unsigned int tile_size,
2938                   unsigned int tile_width, unsigned int tile_height,
2939                   struct drm_framebuffer *fb)
2940 {
2941         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2942         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2943         unsigned int pitch_tiles;
2944         struct drm_rect r;
2945
2946         /* Y or Yf modifiers required for 90/270 rotation */
2947         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2948             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2949                 return 0;
2950
2951         if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2952                 return 0;
2953
2954         rot_info->plane[plane] = *plane_info;
2955
2956         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2957
2958         /* rotate the x/y offsets to match the GTT view */
2959         drm_rect_init(&r, x, y, width, height);
2960         drm_rect_rotate(&r,
2961                         plane_info->width * tile_width,
2962                         plane_info->height * tile_height,
2963                         DRM_MODE_ROTATE_270);
2964         x = r.x1;
2965         y = r.y1;
2966
2967         /* rotate the tile dimensions to match the GTT view */
2968         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
2969         swap(tile_width, tile_height);
2970
2971         /*
2972          * We only keep the x/y offsets, so push all of the
2973          * gtt offset into the x/y offsets.
2974          */
2975         intel_adjust_tile_offset(&x, &y,
2976                                  tile_width, tile_height,
2977                                  tile_size, pitch_tiles,
2978                                  gtt_offset_rotated * tile_size, 0);
2979
2980         /*
2981          * First pixel of the framebuffer from
2982          * the start of the rotated gtt mapping.
2983          */
2984         intel_fb->rotated[plane].x = x;
2985         intel_fb->rotated[plane].y = y;
2986
2987         return plane_info->width * plane_info->height;
2988 }
2989
2990 static int
2991 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2992                    struct drm_framebuffer *fb)
2993 {
2994         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2995         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2996         u32 gtt_offset_rotated = 0;
2997         unsigned int max_size = 0;
2998         int i, num_planes = fb->format->num_planes;
2999         unsigned int tile_size = intel_tile_size(dev_priv);
3000
3001         for (i = 0; i < num_planes; i++) {
3002                 unsigned int width, height;
3003                 unsigned int cpp, size;
3004                 u32 offset;
3005                 int x, y;
3006                 int ret;
3007
3008                 cpp = fb->format->cpp[i];
3009                 intel_fb_plane_dims(&width, &height, fb, i);
3010
3011                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3012                 if (ret) {
3013                         drm_dbg_kms(&dev_priv->drm,
3014                                     "bad fb plane %d offset: 0x%x\n",
3015                                     i, fb->offsets[i]);
3016                         return ret;
3017                 }
3018
3019                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3020                 if (ret)
3021                         return ret;
3022
3023                 /*
3024                  * The fence (if used) is aligned to the start of the object
3025                  * so having the framebuffer wrap around across the edge of the
3026                  * fenced region doesn't really work. We have no API to configure
3027                  * the fence start offset within the object (nor could we probably
3028                  * on gen2/3). So it's just easier if we just require that the
3029                  * fb layout agrees with the fence layout. We already check that the
3030                  * fb stride matches the fence stride elsewhere.
3031                  */
3032                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3033                     (x + width) * cpp > fb->pitches[i]) {
3034                         drm_dbg_kms(&dev_priv->drm,
3035                                     "bad fb plane %d offset: 0x%x\n",
3036                                      i, fb->offsets[i]);
3037                         return -EINVAL;
3038                 }
3039
3040                 /*
3041                  * First pixel of the framebuffer from
3042                  * the start of the normal gtt mapping.
3043                  */
3044                 intel_fb->normal[i].x = x;
3045                 intel_fb->normal[i].y = y;
3046
3047                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3048                                                       fb->pitches[i],
3049                                                       DRM_MODE_ROTATE_0,
3050                                                       tile_size);
3051                 offset /= tile_size;
3052
3053                 if (!is_surface_linear(fb, i)) {
3054                         struct intel_remapped_plane_info plane_info;
3055                         unsigned int tile_width, tile_height;
3056
3057                         intel_tile_dims(fb, i, &tile_width, &tile_height);
3058
3059                         plane_info.offset = offset;
3060                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3061                                                          tile_width * cpp);
3062                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3063                         plane_info.height = DIV_ROUND_UP(y + height,
3064                                                          tile_height);
3065
3066                         /* how many tiles does this plane need */
3067                         size = plane_info.stride * plane_info.height;
3068                         /*
3069                          * If the plane isn't horizontally tile aligned,
3070                          * we need one more tile.
3071                          */
3072                         if (x != 0)
3073                                 size++;
3074
3075                         gtt_offset_rotated +=
3076                                 setup_fb_rotation(i, &plane_info,
3077                                                   gtt_offset_rotated,
3078                                                   x, y, width, height,
3079                                                   tile_size,
3080                                                   tile_width, tile_height,
3081                                                   fb);
3082                 } else {
3083                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3084                                             x * cpp, tile_size);
3085                 }
3086
3087                 /* how many tiles in total needed in the bo */
3088                 max_size = max(max_size, offset + size);
3089         }
3090
3091         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3092                 drm_dbg_kms(&dev_priv->drm,
3093                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3094                             mul_u32_u32(max_size, tile_size), obj->base.size);
3095                 return -EINVAL;
3096         }
3097
3098         return 0;
3099 }
3100
3101 static void
3102 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3103 {
3104         struct drm_i915_private *dev_priv =
3105                 to_i915(plane_state->uapi.plane->dev);
3106         struct drm_framebuffer *fb = plane_state->hw.fb;
3107         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3108         struct intel_rotation_info *info = &plane_state->view.rotated;
3109         unsigned int rotation = plane_state->hw.rotation;
3110         int i, num_planes = fb->format->num_planes;
3111         unsigned int tile_size = intel_tile_size(dev_priv);
3112         unsigned int src_x, src_y;
3113         unsigned int src_w, src_h;
3114         u32 gtt_offset = 0;
3115
3116         memset(&plane_state->view, 0, sizeof(plane_state->view));
3117         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3118                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3119
3120         src_x = plane_state->uapi.src.x1 >> 16;
3121         src_y = plane_state->uapi.src.y1 >> 16;
3122         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3123         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3124
3125         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3126
3127         /* Make src coordinates relative to the viewport */
3128         drm_rect_translate(&plane_state->uapi.src,
3129                            -(src_x << 16), -(src_y << 16));
3130
3131         /* Rotate src coordinates to match rotated GTT view */
3132         if (drm_rotation_90_or_270(rotation))
3133                 drm_rect_rotate(&plane_state->uapi.src,
3134                                 src_w << 16, src_h << 16,
3135                                 DRM_MODE_ROTATE_270);
3136
3137         for (i = 0; i < num_planes; i++) {
3138                 unsigned int hsub = i ? fb->format->hsub : 1;
3139                 unsigned int vsub = i ? fb->format->vsub : 1;
3140                 unsigned int cpp = fb->format->cpp[i];
3141                 unsigned int tile_width, tile_height;
3142                 unsigned int width, height;
3143                 unsigned int pitch_tiles;
3144                 unsigned int x, y;
3145                 u32 offset;
3146
3147                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3148
3149                 x = src_x / hsub;
3150                 y = src_y / vsub;
3151                 width = src_w / hsub;
3152                 height = src_h / vsub;
3153
3154                 /*
3155                  * First pixel of the src viewport from the
3156                  * start of the normal gtt mapping.
3157                  */
3158                 x += intel_fb->normal[i].x;
3159                 y += intel_fb->normal[i].y;
3160
3161                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3162                                                       fb, i, fb->pitches[i],
3163                                                       DRM_MODE_ROTATE_0, tile_size);
3164                 offset /= tile_size;
3165
3166                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3167                 info->plane[i].offset = offset;
3168                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3169                                                      tile_width * cpp);
3170                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3171                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3172
3173                 if (drm_rotation_90_or_270(rotation)) {
3174                         struct drm_rect r;
3175
3176                         /* rotate the x/y offsets to match the GTT view */
3177                         drm_rect_init(&r, x, y, width, height);
3178                         drm_rect_rotate(&r,
3179                                         info->plane[i].width * tile_width,
3180                                         info->plane[i].height * tile_height,
3181                                         DRM_MODE_ROTATE_270);
3182                         x = r.x1;
3183                         y = r.y1;
3184
3185                         pitch_tiles = info->plane[i].height;
3186                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3187
3188                         /* rotate the tile dimensions to match the GTT view */
3189                         swap(tile_width, tile_height);
3190                 } else {
3191                         pitch_tiles = info->plane[i].width;
3192                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3193                 }
3194
3195                 /*
3196                  * We only keep the x/y offsets, so push all of the
3197                  * gtt offset into the x/y offsets.
3198                  */
3199                 intel_adjust_tile_offset(&x, &y,
3200                                          tile_width, tile_height,
3201                                          tile_size, pitch_tiles,
3202                                          gtt_offset * tile_size, 0);
3203
3204                 gtt_offset += info->plane[i].width * info->plane[i].height;
3205
3206                 plane_state->color_plane[i].offset = 0;
3207                 plane_state->color_plane[i].x = x;
3208                 plane_state->color_plane[i].y = y;
3209         }
3210 }
3211
3212 int
3213 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3214 {
3215         const struct intel_framebuffer *fb =
3216                 to_intel_framebuffer(plane_state->hw.fb);
3217         unsigned int rotation = plane_state->hw.rotation;
3218         int i, num_planes;
3219
3220         if (!fb)
3221                 return 0;
3222
3223         num_planes = fb->base.format->num_planes;
3224
3225         if (intel_plane_needs_remap(plane_state)) {
3226                 intel_plane_remap_gtt(plane_state);
3227
3228                 /*
3229                  * Sometimes even remapping can't overcome
3230                  * the stride limitations :( Can happen with
3231                  * big plane sizes and suitably misaligned
3232                  * offsets.
3233                  */
3234                 return intel_plane_check_stride(plane_state);
3235         }
3236
3237         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3238
3239         for (i = 0; i < num_planes; i++) {
3240                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3241                 plane_state->color_plane[i].offset = 0;
3242
3243                 if (drm_rotation_90_or_270(rotation)) {
3244                         plane_state->color_plane[i].x = fb->rotated[i].x;
3245                         plane_state->color_plane[i].y = fb->rotated[i].y;
3246                 } else {
3247                         plane_state->color_plane[i].x = fb->normal[i].x;
3248                         plane_state->color_plane[i].y = fb->normal[i].y;
3249                 }
3250         }
3251
3252         /* Rotate src coordinates to match rotated GTT view */
3253         if (drm_rotation_90_or_270(rotation))
3254                 drm_rect_rotate(&plane_state->uapi.src,
3255                                 fb->base.width << 16, fb->base.height << 16,
3256                                 DRM_MODE_ROTATE_270);
3257
3258         return intel_plane_check_stride(plane_state);
3259 }
3260
3261 static int i9xx_format_to_fourcc(int format)
3262 {
3263         switch (format) {
3264         case DISPPLANE_8BPP:
3265                 return DRM_FORMAT_C8;
3266         case DISPPLANE_BGRA555:
3267                 return DRM_FORMAT_ARGB1555;
3268         case DISPPLANE_BGRX555:
3269                 return DRM_FORMAT_XRGB1555;
3270         case DISPPLANE_BGRX565:
3271                 return DRM_FORMAT_RGB565;
3272         default:
3273         case DISPPLANE_BGRX888:
3274                 return DRM_FORMAT_XRGB8888;
3275         case DISPPLANE_RGBX888:
3276                 return DRM_FORMAT_XBGR8888;
3277         case DISPPLANE_BGRA888:
3278                 return DRM_FORMAT_ARGB8888;
3279         case DISPPLANE_RGBA888:
3280                 return DRM_FORMAT_ABGR8888;
3281         case DISPPLANE_BGRX101010:
3282                 return DRM_FORMAT_XRGB2101010;
3283         case DISPPLANE_RGBX101010:
3284                 return DRM_FORMAT_XBGR2101010;
3285         case DISPPLANE_BGRA101010:
3286                 return DRM_FORMAT_ARGB2101010;
3287         case DISPPLANE_RGBA101010:
3288                 return DRM_FORMAT_ABGR2101010;
3289         case DISPPLANE_RGBX161616:
3290                 return DRM_FORMAT_XBGR16161616F;
3291         }
3292 }
3293
3294 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3295 {
3296         switch (format) {
3297         case PLANE_CTL_FORMAT_RGB_565:
3298                 return DRM_FORMAT_RGB565;
3299         case PLANE_CTL_FORMAT_NV12:
3300                 return DRM_FORMAT_NV12;
3301         case PLANE_CTL_FORMAT_XYUV:
3302                 return DRM_FORMAT_XYUV8888;
3303         case PLANE_CTL_FORMAT_P010:
3304                 return DRM_FORMAT_P010;
3305         case PLANE_CTL_FORMAT_P012:
3306                 return DRM_FORMAT_P012;
3307         case PLANE_CTL_FORMAT_P016:
3308                 return DRM_FORMAT_P016;
3309         case PLANE_CTL_FORMAT_Y210:
3310                 return DRM_FORMAT_Y210;
3311         case PLANE_CTL_FORMAT_Y212:
3312                 return DRM_FORMAT_Y212;
3313         case PLANE_CTL_FORMAT_Y216:
3314                 return DRM_FORMAT_Y216;
3315         case PLANE_CTL_FORMAT_Y410:
3316                 return DRM_FORMAT_XVYU2101010;
3317         case PLANE_CTL_FORMAT_Y412:
3318                 return DRM_FORMAT_XVYU12_16161616;
3319         case PLANE_CTL_FORMAT_Y416:
3320                 return DRM_FORMAT_XVYU16161616;
3321         default:
3322         case PLANE_CTL_FORMAT_XRGB_8888:
3323                 if (rgb_order) {
3324                         if (alpha)
3325                                 return DRM_FORMAT_ABGR8888;
3326                         else
3327                                 return DRM_FORMAT_XBGR8888;
3328                 } else {
3329                         if (alpha)
3330                                 return DRM_FORMAT_ARGB8888;
3331                         else
3332                                 return DRM_FORMAT_XRGB8888;
3333                 }
3334         case PLANE_CTL_FORMAT_XRGB_2101010:
3335                 if (rgb_order) {
3336                         if (alpha)
3337                                 return DRM_FORMAT_ABGR2101010;
3338                         else
3339                                 return DRM_FORMAT_XBGR2101010;
3340                 } else {
3341                         if (alpha)
3342                                 return DRM_FORMAT_ARGB2101010;
3343                         else
3344                                 return DRM_FORMAT_XRGB2101010;
3345                 }
3346         case PLANE_CTL_FORMAT_XRGB_16161616F:
3347                 if (rgb_order) {
3348                         if (alpha)
3349                                 return DRM_FORMAT_ABGR16161616F;
3350                         else
3351                                 return DRM_FORMAT_XBGR16161616F;
3352                 } else {
3353                         if (alpha)
3354                                 return DRM_FORMAT_ARGB16161616F;
3355                         else
3356                                 return DRM_FORMAT_XRGB16161616F;
3357                 }
3358         }
3359 }
3360
3361 static struct i915_vma *
3362 initial_plane_vma(struct drm_i915_private *i915,
3363                   struct intel_initial_plane_config *plane_config)
3364 {
3365         struct drm_i915_gem_object *obj;
3366         struct i915_vma *vma;
3367         u32 base, size;
3368
3369         if (plane_config->size == 0)
3370                 return NULL;
3371
3372         base = round_down(plane_config->base,
3373                           I915_GTT_MIN_ALIGNMENT);
3374         size = round_up(plane_config->base + plane_config->size,
3375                         I915_GTT_MIN_ALIGNMENT);
3376         size -= base;
3377
3378         /*
3379          * If the FB is too big, just don't use it since fbdev is not very
3380          * important and we should probably use that space with FBC or other
3381          * features.
3382          */
3383         if (size * 2 > i915->stolen_usable_size)
3384                 return NULL;
3385
3386         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3387         if (IS_ERR(obj))
3388                 return NULL;
3389
3390         /*
3391          * Mark it WT ahead of time to avoid changing the
3392          * cache_level during fbdev initialization. The
3393          * unbind there would get stuck waiting for rcu.
3394          */
3395         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
3396                                             I915_CACHE_WT : I915_CACHE_NONE);
3397
3398         switch (plane_config->tiling) {
3399         case I915_TILING_NONE:
3400                 break;
3401         case I915_TILING_X:
3402         case I915_TILING_Y:
3403                 obj->tiling_and_stride =
3404                         plane_config->fb->base.pitches[0] |
3405                         plane_config->tiling;
3406                 break;
3407         default:
3408                 MISSING_CASE(plane_config->tiling);
3409                 goto err_obj;
3410         }
3411
3412         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3413         if (IS_ERR(vma))
3414                 goto err_obj;
3415
3416         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3417                 goto err_obj;
3418
3419         if (i915_gem_object_is_tiled(obj) &&
3420             !i915_vma_is_map_and_fenceable(vma))
3421                 goto err_obj;
3422
3423         return vma;
3424
3425 err_obj:
3426         i915_gem_object_put(obj);
3427         return NULL;
3428 }
3429
3430 static bool
3431 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3432                               struct intel_initial_plane_config *plane_config)
3433 {
3434         struct drm_device *dev = crtc->base.dev;
3435         struct drm_i915_private *dev_priv = to_i915(dev);
3436         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3437         struct drm_framebuffer *fb = &plane_config->fb->base;
3438         struct i915_vma *vma;
3439
3440         switch (fb->modifier) {
3441         case DRM_FORMAT_MOD_LINEAR:
3442         case I915_FORMAT_MOD_X_TILED:
3443         case I915_FORMAT_MOD_Y_TILED:
3444                 break;
3445         default:
3446                 drm_dbg(&dev_priv->drm,
3447                         "Unsupported modifier for initial FB: 0x%llx\n",
3448                         fb->modifier);
3449                 return false;
3450         }
3451
3452         vma = initial_plane_vma(dev_priv, plane_config);
3453         if (!vma)
3454                 return false;
3455
3456         mode_cmd.pixel_format = fb->format->format;
3457         mode_cmd.width = fb->width;
3458         mode_cmd.height = fb->height;
3459         mode_cmd.pitches[0] = fb->pitches[0];
3460         mode_cmd.modifier[0] = fb->modifier;
3461         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3462
3463         if (intel_framebuffer_init(to_intel_framebuffer(fb),
3464                                    vma->obj, &mode_cmd)) {
3465                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3466                 goto err_vma;
3467         }
3468
3469         plane_config->vma = vma;
3470         return true;
3471
3472 err_vma:
3473         i915_vma_put(vma);
3474         return false;
3475 }
3476
3477 static void
3478 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3479                         struct intel_plane_state *plane_state,
3480                         bool visible)
3481 {
3482         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3483
3484         plane_state->uapi.visible = visible;
3485
3486         if (visible)
3487                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3488         else
3489                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3490 }
3491
3492 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
3493 {
3494         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3495         struct drm_plane *plane;
3496
3497         /*
3498          * Active_planes aliases if multiple "primary" or cursor planes
3499          * have been used on the same (or wrong) pipe. plane_mask uses
3500          * unique ids, hence we can use that to reconstruct active_planes.
3501          */
3502         crtc_state->enabled_planes = 0;
3503         crtc_state->active_planes = 0;
3504
3505         drm_for_each_plane_mask(plane, &dev_priv->drm,
3506                                 crtc_state->uapi.plane_mask) {
3507                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
3508                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3509         }
3510 }
3511
3512 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3513                                          struct intel_plane *plane)
3514 {
3515         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3516         struct intel_crtc_state *crtc_state =
3517                 to_intel_crtc_state(crtc->base.state);
3518         struct intel_plane_state *plane_state =
3519                 to_intel_plane_state(plane->base.state);
3520
3521         drm_dbg_kms(&dev_priv->drm,
3522                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3523                     plane->base.base.id, plane->base.name,
3524                     crtc->base.base.id, crtc->base.name);
3525
3526         intel_set_plane_visible(crtc_state, plane_state, false);
3527         fixup_plane_bitmasks(crtc_state);
3528         crtc_state->data_rate[plane->id] = 0;
3529         crtc_state->min_cdclk[plane->id] = 0;
3530
3531         if (plane->id == PLANE_PRIMARY)
3532                 hsw_disable_ips(crtc_state);
3533
3534         /*
3535          * Vblank time updates from the shadow to live plane control register
3536          * are blocked if the memory self-refresh mode is active at that
3537          * moment. So to make sure the plane gets truly disabled, disable
3538          * first the self-refresh mode. The self-refresh enable bit in turn
3539          * will be checked/applied by the HW only at the next frame start
3540          * event which is after the vblank start event, so we need to have a
3541          * wait-for-vblank between disabling the plane and the pipe.
3542          */
3543         if (HAS_GMCH(dev_priv) &&
3544             intel_set_memory_cxsr(dev_priv, false))
3545                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3546
3547         /*
3548          * Gen2 reports pipe underruns whenever all planes are disabled.
3549          * So disable underrun reporting before all the planes get disabled.
3550          */
3551         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3552                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3553
3554         intel_disable_plane(plane, crtc_state);
3555 }
3556
3557 static void
3558 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3559                              struct intel_initial_plane_config *plane_config)
3560 {
3561         struct drm_device *dev = intel_crtc->base.dev;
3562         struct drm_i915_private *dev_priv = to_i915(dev);
3563         struct drm_crtc *c;
3564         struct drm_plane *primary = intel_crtc->base.primary;
3565         struct drm_plane_state *plane_state = primary->state;
3566         struct intel_plane *intel_plane = to_intel_plane(primary);
3567         struct intel_plane_state *intel_state =
3568                 to_intel_plane_state(plane_state);
3569         struct intel_crtc_state *crtc_state =
3570                 to_intel_crtc_state(intel_crtc->base.state);
3571         struct drm_framebuffer *fb;
3572         struct i915_vma *vma;
3573
3574         if (!plane_config->fb)
3575                 return;
3576
3577         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3578                 fb = &plane_config->fb->base;
3579                 vma = plane_config->vma;
3580                 goto valid_fb;
3581         }
3582
3583         /*
3584          * Failed to alloc the obj, check to see if we should share
3585          * an fb with another CRTC instead
3586          */
3587         for_each_crtc(dev, c) {
3588                 struct intel_plane_state *state;
3589
3590                 if (c == &intel_crtc->base)
3591                         continue;
3592
3593                 if (!to_intel_crtc_state(c->state)->uapi.active)
3594                         continue;
3595
3596                 state = to_intel_plane_state(c->primary->state);
3597                 if (!state->vma)
3598                         continue;
3599
3600                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3601                         fb = state->hw.fb;
3602                         vma = state->vma;
3603                         goto valid_fb;
3604                 }
3605         }
3606
3607         /*
3608          * We've failed to reconstruct the BIOS FB.  Current display state
3609          * indicates that the primary plane is visible, but has a NULL FB,
3610          * which will lead to problems later if we don't fix it up.  The
3611          * simplest solution is to just disable the primary plane now and
3612          * pretend the BIOS never had it enabled.
3613          */
3614         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3615         if (crtc_state->bigjoiner) {
3616                 struct intel_crtc *slave =
3617                         crtc_state->bigjoiner_linked_crtc;
3618                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
3619         }
3620
3621         return;
3622
3623 valid_fb:
3624         intel_state->hw.rotation = plane_config->rotation;
3625         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3626                                 intel_state->hw.rotation);
3627         intel_state->color_plane[0].stride =
3628                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3629
3630         __i915_vma_pin(vma);
3631         intel_state->vma = i915_vma_get(vma);
3632         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3633                 if (vma->fence)
3634                         intel_state->flags |= PLANE_HAS_FENCE;
3635
3636         plane_state->src_x = 0;
3637         plane_state->src_y = 0;
3638         plane_state->src_w = fb->width << 16;
3639         plane_state->src_h = fb->height << 16;
3640
3641         plane_state->crtc_x = 0;
3642         plane_state->crtc_y = 0;
3643         plane_state->crtc_w = fb->width;
3644         plane_state->crtc_h = fb->height;
3645
3646         intel_state->uapi.src = drm_plane_state_src(plane_state);
3647         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3648
3649         if (plane_config->tiling)
3650                 dev_priv->preserve_bios_swizzle = true;
3651
3652         plane_state->fb = fb;
3653         drm_framebuffer_get(fb);
3654
3655         plane_state->crtc = &intel_crtc->base;
3656         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
3657                                           intel_crtc);
3658
3659         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3660
3661         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3662                   &to_intel_frontbuffer(fb)->bits);
3663 }
3664
3665
3666 static bool
3667 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3668                                int main_x, int main_y, u32 main_offset,
3669                                int ccs_plane)
3670 {
3671         const struct drm_framebuffer *fb = plane_state->hw.fb;
3672         int aux_x = plane_state->color_plane[ccs_plane].x;
3673         int aux_y = plane_state->color_plane[ccs_plane].y;
3674         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3675         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3676         int hsub;
3677         int vsub;
3678
3679         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3680         while (aux_offset >= main_offset && aux_y <= main_y) {
3681                 int x, y;
3682
3683                 if (aux_x == main_x && aux_y == main_y)
3684                         break;
3685
3686                 if (aux_offset == 0)
3687                         break;
3688
3689                 x = aux_x / hsub;
3690                 y = aux_y / vsub;
3691                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3692                                                                plane_state,
3693                                                                ccs_plane,
3694                                                                aux_offset,
3695                                                                aux_offset -
3696                                                                 alignment);
3697                 aux_x = x * hsub + aux_x % hsub;
3698                 aux_y = y * vsub + aux_y % vsub;
3699         }
3700
3701         if (aux_x != main_x || aux_y != main_y)
3702                 return false;
3703
3704         plane_state->color_plane[ccs_plane].offset = aux_offset;
3705         plane_state->color_plane[ccs_plane].x = aux_x;
3706         plane_state->color_plane[ccs_plane].y = aux_y;
3707
3708         return true;
3709 }
3710
3711 unsigned int
3712 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
3713 {
3714         int x = 0, y = 0;
3715
3716         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3717                                           plane_state->color_plane[0].offset, 0);
3718
3719         return y;
3720 }
3721
3722 static int intel_plane_min_width(struct intel_plane *plane,
3723                                  const struct drm_framebuffer *fb,
3724                                  int color_plane,
3725                                  unsigned int rotation)
3726 {
3727         if (plane->min_width)
3728                 return plane->min_width(fb, color_plane, rotation);
3729         else
3730                 return 1;
3731 }
3732
3733 static int intel_plane_max_width(struct intel_plane *plane,
3734                                  const struct drm_framebuffer *fb,
3735                                  int color_plane,
3736                                  unsigned int rotation)
3737 {
3738         if (plane->max_width)
3739                 return plane->max_width(fb, color_plane, rotation);
3740         else
3741                 return INT_MAX;
3742 }
3743
3744 static int intel_plane_max_height(struct intel_plane *plane,
3745                                   const struct drm_framebuffer *fb,
3746                                   int color_plane,
3747                                   unsigned int rotation)
3748 {
3749         if (plane->max_height)
3750                 return plane->max_height(fb, color_plane, rotation);
3751         else
3752                 return INT_MAX;
3753 }
3754
3755 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3756 {
3757         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3758         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3759         const struct drm_framebuffer *fb = plane_state->hw.fb;
3760         unsigned int rotation = plane_state->hw.rotation;
3761         int x = plane_state->uapi.src.x1 >> 16;
3762         int y = plane_state->uapi.src.y1 >> 16;
3763         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3764         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3765         int min_width = intel_plane_min_width(plane, fb, 0, rotation);
3766         int max_width = intel_plane_max_width(plane, fb, 0, rotation);
3767         int max_height = intel_plane_max_height(plane, fb, 0, rotation);
3768         int aux_plane = intel_main_to_aux_plane(fb, 0);
3769         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3770         u32 alignment, offset;
3771
3772         if (w > max_width || w < min_width || h > max_height) {
3773                 drm_dbg_kms(&dev_priv->drm,
3774                             "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
3775                             w, h, min_width, max_width, max_height);
3776                 return -EINVAL;
3777         }
3778
3779         intel_add_fb_offsets(&x, &y, plane_state, 0);
3780         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3781         alignment = intel_surf_alignment(fb, 0);
3782         if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3783                 return -EINVAL;
3784
3785         /*
3786          * AUX surface offset is specified as the distance from the
3787          * main surface offset, and it must be non-negative. Make
3788          * sure that is what we will get.
3789          */
3790         if (aux_plane && offset > aux_offset)
3791                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3792                                                            offset, aux_offset & ~(alignment - 1));
3793
3794         /*
3795          * When using an X-tiled surface, the plane blows up
3796          * if the x offset + width exceed the stride.
3797          *
3798          * TODO: linear and Y-tiled seem fine, Yf untested,
3799          */
3800         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3801                 int cpp = fb->format->cpp[0];
3802
3803                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3804                         if (offset == 0) {
3805                                 drm_dbg_kms(&dev_priv->drm,
3806                                             "Unable to find suitable display surface offset due to X-tiling\n");
3807                                 return -EINVAL;
3808                         }
3809
3810                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3811                                                                    offset, offset - alignment);
3812                 }
3813         }
3814
3815         /*
3816          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3817          * they match with the main surface x/y offsets.
3818          */
3819         if (is_ccs_modifier(fb->modifier)) {
3820                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3821                                                        offset, aux_plane)) {
3822                         if (offset == 0)
3823                                 break;
3824
3825                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3826                                                                    offset, offset - alignment);
3827                 }
3828
3829                 if (x != plane_state->color_plane[aux_plane].x ||
3830                     y != plane_state->color_plane[aux_plane].y) {
3831                         drm_dbg_kms(&dev_priv->drm,
3832                                     "Unable to find suitable display surface offset due to CCS\n");
3833                         return -EINVAL;
3834                 }
3835         }
3836
3837         plane_state->color_plane[0].offset = offset;
3838         plane_state->color_plane[0].x = x;
3839         plane_state->color_plane[0].y = y;
3840
3841         /*
3842          * Put the final coordinates back so that the src
3843          * coordinate checks will see the right values.
3844          */
3845         drm_rect_translate_to(&plane_state->uapi.src,
3846                               x << 16, y << 16);
3847
3848         return 0;
3849 }
3850
3851 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3852 {
3853         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3854         struct drm_i915_private *i915 = to_i915(plane->base.dev);
3855         const struct drm_framebuffer *fb = plane_state->hw.fb;
3856         unsigned int rotation = plane_state->hw.rotation;
3857         int uv_plane = 1;
3858         int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
3859         int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
3860         int x = plane_state->uapi.src.x1 >> 17;
3861         int y = plane_state->uapi.src.y1 >> 17;
3862         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3863         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3864         u32 offset;
3865
3866         /* FIXME not quite sure how/if these apply to the chroma plane */
3867         if (w > max_width || h > max_height) {
3868                 drm_dbg_kms(&i915->drm,
3869                             "CbCr source size %dx%d too big (limit %dx%d)\n",
3870                             w, h, max_width, max_height);
3871                 return -EINVAL;
3872         }
3873
3874         intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3875         offset = intel_plane_compute_aligned_offset(&x, &y,
3876                                                     plane_state, uv_plane);
3877
3878         if (is_ccs_modifier(fb->modifier)) {
3879                 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3880                 u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3881                 u32 alignment = intel_surf_alignment(fb, uv_plane);
3882
3883                 if (offset > aux_offset)
3884                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3885                                                                    plane_state,
3886                                                                    uv_plane,
3887                                                                    offset,
3888                                                                    aux_offset & ~(alignment - 1));
3889
3890                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3891                                                        offset, ccs_plane)) {
3892                         if (offset == 0)
3893                                 break;
3894
3895                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3896                                                                    plane_state,
3897                                                                    uv_plane,
3898                                                                    offset, offset - alignment);
3899                 }
3900
3901                 if (x != plane_state->color_plane[ccs_plane].x ||
3902                     y != plane_state->color_plane[ccs_plane].y) {
3903                         drm_dbg_kms(&i915->drm,
3904                                     "Unable to find suitable display surface offset due to CCS\n");
3905                         return -EINVAL;
3906                 }
3907         }
3908
3909         plane_state->color_plane[uv_plane].offset = offset;
3910         plane_state->color_plane[uv_plane].x = x;
3911         plane_state->color_plane[uv_plane].y = y;
3912
3913         return 0;
3914 }
3915
3916 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3917 {
3918         const struct drm_framebuffer *fb = plane_state->hw.fb;
3919         int src_x = plane_state->uapi.src.x1 >> 16;
3920         int src_y = plane_state->uapi.src.y1 >> 16;
3921         u32 offset;
3922         int ccs_plane;
3923
3924         for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3925                 int main_hsub, main_vsub;
3926                 int hsub, vsub;
3927                 int x, y;
3928
3929                 if (!is_ccs_plane(fb, ccs_plane))
3930                         continue;
3931
3932                 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
3933                                                ccs_to_main_plane(fb, ccs_plane));
3934                 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3935
3936                 hsub *= main_hsub;
3937                 vsub *= main_vsub;
3938                 x = src_x / hsub;
3939                 y = src_y / vsub;
3940
3941                 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
3942
3943                 offset = intel_plane_compute_aligned_offset(&x, &y,
3944                                                             plane_state,
3945                                                             ccs_plane);
3946
3947                 plane_state->color_plane[ccs_plane].offset = offset;
3948                 plane_state->color_plane[ccs_plane].x = (x * hsub +
3949                                                          src_x % hsub) /
3950                                                         main_hsub;
3951                 plane_state->color_plane[ccs_plane].y = (y * vsub +
3952                                                          src_y % vsub) /
3953                                                         main_vsub;
3954         }
3955
3956         return 0;
3957 }
3958
3959 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3960 {
3961         const struct drm_framebuffer *fb = plane_state->hw.fb;
3962         int ret, i;
3963
3964         ret = intel_plane_compute_gtt(plane_state);
3965         if (ret)
3966                 return ret;
3967
3968         if (!plane_state->uapi.visible)
3969                 return 0;
3970
3971         /*
3972          * Handle the AUX surface first since the main surface setup depends on
3973          * it.
3974          */
3975         if (is_ccs_modifier(fb->modifier)) {
3976                 ret = skl_check_ccs_aux_surface(plane_state);
3977                 if (ret)
3978                         return ret;
3979         }
3980
3981         if (intel_format_info_is_yuv_semiplanar(fb->format,
3982                                                 fb->modifier)) {
3983                 ret = skl_check_nv12_aux_surface(plane_state);
3984                 if (ret)
3985                         return ret;
3986         }
3987
3988         for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
3989                 plane_state->color_plane[i].offset = 0;
3990                 plane_state->color_plane[i].x = 0;
3991                 plane_state->color_plane[i].y = 0;
3992         }
3993
3994         ret = skl_check_main_surface(plane_state);
3995         if (ret)
3996                 return ret;
3997
3998         return 0;
3999 }
4000
4001 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4002 {
4003         struct drm_device *dev = intel_crtc->base.dev;
4004         struct drm_i915_private *dev_priv = to_i915(dev);
4005         unsigned long irqflags;
4006
4007         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4008
4009         intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4010         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4011         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4012
4013         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4014 }
4015
4016 /*
4017  * This function detaches (aka. unbinds) unused scalers in hardware
4018  */
4019 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4020 {
4021         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4022         const struct intel_crtc_scaler_state *scaler_state =
4023                 &crtc_state->scaler_state;
4024         int i;
4025
4026         /* loop through and disable scalers that aren't in use */
4027         for (i = 0; i < intel_crtc->num_scalers; i++) {
4028                 if (!scaler_state->scalers[i].in_use)
4029                         skl_detach_scaler(intel_crtc, i);
4030         }
4031 }
4032
4033 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4034                                           int color_plane, unsigned int rotation)
4035 {
4036         /*
4037          * The stride is either expressed as a multiple of 64 bytes chunks for
4038          * linear buffers or in number of tiles for tiled buffers.
4039          */
4040         if (is_surface_linear(fb, color_plane))
4041                 return 64;
4042         else if (drm_rotation_90_or_270(rotation))
4043                 return intel_tile_height(fb, color_plane);
4044         else
4045                 return intel_tile_width_bytes(fb, color_plane);
4046 }
4047
4048 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4049                      int color_plane)
4050 {
4051         const struct drm_framebuffer *fb = plane_state->hw.fb;
4052         unsigned int rotation = plane_state->hw.rotation;
4053         u32 stride = plane_state->color_plane[color_plane].stride;
4054
4055         if (color_plane >= fb->format->num_planes)
4056                 return 0;
4057
4058         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4059 }
4060
4061 static u32 skl_plane_ctl_format(u32 pixel_format)
4062 {
4063         switch (pixel_format) {
4064         case DRM_FORMAT_C8:
4065                 return PLANE_CTL_FORMAT_INDEXED;
4066         case DRM_FORMAT_RGB565:
4067                 return PLANE_CTL_FORMAT_RGB_565;
4068         case DRM_FORMAT_XBGR8888:
4069         case DRM_FORMAT_ABGR8888:
4070                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4071         case DRM_FORMAT_XRGB8888:
4072         case DRM_FORMAT_ARGB8888:
4073                 return PLANE_CTL_FORMAT_XRGB_8888;
4074         case DRM_FORMAT_XBGR2101010:
4075         case DRM_FORMAT_ABGR2101010:
4076                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4077         case DRM_FORMAT_XRGB2101010:
4078         case DRM_FORMAT_ARGB2101010:
4079                 return PLANE_CTL_FORMAT_XRGB_2101010;
4080         case DRM_FORMAT_XBGR16161616F:
4081         case DRM_FORMAT_ABGR16161616F:
4082                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4083         case DRM_FORMAT_XRGB16161616F:
4084         case DRM_FORMAT_ARGB16161616F:
4085                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4086         case DRM_FORMAT_XYUV8888:
4087                 return PLANE_CTL_FORMAT_XYUV;
4088         case DRM_FORMAT_YUYV:
4089                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4090         case DRM_FORMAT_YVYU:
4091                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4092         case DRM_FORMAT_UYVY:
4093                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4094         case DRM_FORMAT_VYUY:
4095                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4096         case DRM_FORMAT_NV12:
4097                 return PLANE_CTL_FORMAT_NV12;
4098         case DRM_FORMAT_P010:
4099                 return PLANE_CTL_FORMAT_P010;
4100         case DRM_FORMAT_P012:
4101                 return PLANE_CTL_FORMAT_P012;
4102         case DRM_FORMAT_P016:
4103                 return PLANE_CTL_FORMAT_P016;
4104         case DRM_FORMAT_Y210:
4105                 return PLANE_CTL_FORMAT_Y210;
4106         case DRM_FORMAT_Y212:
4107                 return PLANE_CTL_FORMAT_Y212;
4108         case DRM_FORMAT_Y216:
4109                 return PLANE_CTL_FORMAT_Y216;
4110         case DRM_FORMAT_XVYU2101010:
4111                 return PLANE_CTL_FORMAT_Y410;
4112         case DRM_FORMAT_XVYU12_16161616:
4113                 return PLANE_CTL_FORMAT_Y412;
4114         case DRM_FORMAT_XVYU16161616:
4115                 return PLANE_CTL_FORMAT_Y416;
4116         default:
4117                 MISSING_CASE(pixel_format);
4118         }
4119
4120         return 0;
4121 }
4122
4123 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4124 {
4125         if (!plane_state->hw.fb->format->has_alpha)
4126                 return PLANE_CTL_ALPHA_DISABLE;
4127
4128         switch (plane_state->hw.pixel_blend_mode) {
4129         case DRM_MODE_BLEND_PIXEL_NONE:
4130                 return PLANE_CTL_ALPHA_DISABLE;
4131         case DRM_MODE_BLEND_PREMULTI:
4132                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4133         case DRM_MODE_BLEND_COVERAGE:
4134                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4135         default:
4136                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4137                 return PLANE_CTL_ALPHA_DISABLE;
4138         }
4139 }
4140
4141 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4142 {
4143         if (!plane_state->hw.fb->format->has_alpha)
4144                 return PLANE_COLOR_ALPHA_DISABLE;
4145
4146         switch (plane_state->hw.pixel_blend_mode) {
4147         case DRM_MODE_BLEND_PIXEL_NONE:
4148                 return PLANE_COLOR_ALPHA_DISABLE;
4149         case DRM_MODE_BLEND_PREMULTI:
4150                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4151         case DRM_MODE_BLEND_COVERAGE:
4152                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4153         default:
4154                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4155                 return PLANE_COLOR_ALPHA_DISABLE;
4156         }
4157 }
4158
4159 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4160 {
4161         switch (fb_modifier) {
4162         case DRM_FORMAT_MOD_LINEAR:
4163                 break;
4164         case I915_FORMAT_MOD_X_TILED:
4165                 return PLANE_CTL_TILED_X;
4166         case I915_FORMAT_MOD_Y_TILED:
4167                 return PLANE_CTL_TILED_Y;
4168         case I915_FORMAT_MOD_Y_TILED_CCS:
4169                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4170         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4171                 return PLANE_CTL_TILED_Y |
4172                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4173                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4174         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4175                 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4176         case I915_FORMAT_MOD_Yf_TILED:
4177                 return PLANE_CTL_TILED_YF;
4178         case I915_FORMAT_MOD_Yf_TILED_CCS:
4179                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4180         default:
4181                 MISSING_CASE(fb_modifier);
4182         }
4183
4184         return 0;
4185 }
4186
4187 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4188 {
4189         switch (rotate) {
4190         case DRM_MODE_ROTATE_0:
4191                 break;
4192         /*
4193          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4194          * while i915 HW rotation is clockwise, thats why this swapping.
4195          */
4196         case DRM_MODE_ROTATE_90:
4197                 return PLANE_CTL_ROTATE_270;
4198         case DRM_MODE_ROTATE_180:
4199                 return PLANE_CTL_ROTATE_180;
4200         case DRM_MODE_ROTATE_270:
4201                 return PLANE_CTL_ROTATE_90;
4202         default:
4203                 MISSING_CASE(rotate);
4204         }
4205
4206         return 0;
4207 }
4208
4209 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4210 {
4211         switch (reflect) {
4212         case 0:
4213                 break;
4214         case DRM_MODE_REFLECT_X:
4215                 return PLANE_CTL_FLIP_HORIZONTAL;
4216         case DRM_MODE_REFLECT_Y:
4217         default:
4218                 MISSING_CASE(reflect);
4219         }
4220
4221         return 0;
4222 }
4223
4224 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4225 {
4226         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4227         u32 plane_ctl = 0;
4228
4229         if (crtc_state->uapi.async_flip)
4230                 plane_ctl |= PLANE_CTL_ASYNC_FLIP;
4231
4232         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4233                 return plane_ctl;
4234
4235         if (crtc_state->gamma_enable)
4236                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4237
4238         if (crtc_state->csc_enable)
4239                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4240
4241         return plane_ctl;
4242 }
4243
4244 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4245                   const struct intel_plane_state *plane_state)
4246 {
4247         struct drm_i915_private *dev_priv =
4248                 to_i915(plane_state->uapi.plane->dev);
4249         const struct drm_framebuffer *fb = plane_state->hw.fb;
4250         unsigned int rotation = plane_state->hw.rotation;
4251         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4252         u32 plane_ctl;
4253
4254         plane_ctl = PLANE_CTL_ENABLE;
4255
4256         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4257                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4258                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4259
4260                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4261                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4262
4263                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4264                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4265         }
4266
4267         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4268         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4269         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4270
4271         if (INTEL_GEN(dev_priv) >= 10)
4272                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4273                                                 DRM_MODE_REFLECT_MASK);
4274
4275         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4276                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4277         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4278                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4279
4280         return plane_ctl;
4281 }
4282
4283 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4284 {
4285         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4286         u32 plane_color_ctl = 0;
4287
4288         if (INTEL_GEN(dev_priv) >= 11)
4289                 return plane_color_ctl;
4290
4291         if (crtc_state->gamma_enable)
4292                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4293
4294         if (crtc_state->csc_enable)
4295                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4296
4297         return plane_color_ctl;
4298 }
4299
4300 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4301                         const struct intel_plane_state *plane_state)
4302 {
4303         struct drm_i915_private *dev_priv =
4304                 to_i915(plane_state->uapi.plane->dev);
4305         const struct drm_framebuffer *fb = plane_state->hw.fb;
4306         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4307         u32 plane_color_ctl = 0;
4308
4309         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4310         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4311
4312         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4313                 switch (plane_state->hw.color_encoding) {
4314                 case DRM_COLOR_YCBCR_BT709:
4315                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4316                         break;
4317                 case DRM_COLOR_YCBCR_BT2020:
4318                         plane_color_ctl |=
4319                                 PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
4320                         break;
4321                 default:
4322                         plane_color_ctl |=
4323                                 PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
4324                 }
4325                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4326                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4327         } else if (fb->format->is_yuv) {
4328                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4329         }
4330
4331         return plane_color_ctl;
4332 }
4333
4334 static int
4335 __intel_display_resume(struct drm_device *dev,
4336                        struct drm_atomic_state *state,
4337                        struct drm_modeset_acquire_ctx *ctx)
4338 {
4339         struct drm_crtc_state *crtc_state;
4340         struct drm_crtc *crtc;
4341         int i, ret;
4342
4343         intel_modeset_setup_hw_state(dev, ctx);
4344         intel_vga_redisable(to_i915(dev));
4345
4346         if (!state)
4347                 return 0;
4348
4349         /*
4350          * We've duplicated the state, pointers to the old state are invalid.
4351          *
4352          * Don't attempt to use the old state until we commit the duplicated state.
4353          */
4354         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4355                 /*
4356                  * Force recalculation even if we restore
4357                  * current state. With fast modeset this may not result
4358                  * in a modeset when the state is compatible.
4359                  */
4360                 crtc_state->mode_changed = true;
4361         }
4362
4363         /* ignore any reset values/BIOS leftovers in the WM registers */
4364         if (!HAS_GMCH(to_i915(dev)))
4365                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4366
4367         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4368
4369         drm_WARN_ON(dev, ret == -EDEADLK);
4370         return ret;
4371 }
4372
4373 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4374 {
4375         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4376                 intel_has_gpu_reset(&dev_priv->gt));
4377 }
4378
4379 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
4380 {
4381         struct drm_device *dev = &dev_priv->drm;
4382         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4383         struct drm_atomic_state *state;
4384         int ret;
4385
4386         if (!HAS_DISPLAY(dev_priv))
4387                 return;
4388
4389         /* reset doesn't touch the display */
4390         if (!dev_priv->params.force_reset_modeset_test &&
4391             !gpu_reset_clobbers_display(dev_priv))
4392                 return;
4393
4394         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4395         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4396         smp_mb__after_atomic();
4397         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4398
4399         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4400                 drm_dbg_kms(&dev_priv->drm,
4401                             "Modeset potentially stuck, unbreaking through wedging\n");
4402                 intel_gt_set_wedged(&dev_priv->gt);
4403         }
4404
4405         /*
4406          * Need mode_config.mutex so that we don't
4407          * trample ongoing ->detect() and whatnot.
4408          */
4409         mutex_lock(&dev->mode_config.mutex);
4410         drm_modeset_acquire_init(ctx, 0);
4411         while (1) {
4412                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4413                 if (ret != -EDEADLK)
4414                         break;
4415
4416                 drm_modeset_backoff(ctx);
4417         }
4418         /*
4419          * Disabling the crtcs gracefully seems nicer. Also the
4420          * g33 docs say we should at least disable all the planes.
4421          */
4422         state = drm_atomic_helper_duplicate_state(dev, ctx);
4423         if (IS_ERR(state)) {
4424                 ret = PTR_ERR(state);
4425                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4426                         ret);
4427                 return;
4428         }
4429
4430         ret = drm_atomic_helper_disable_all(dev, ctx);
4431         if (ret) {
4432                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4433                         ret);
4434                 drm_atomic_state_put(state);
4435                 return;
4436         }
4437
4438         dev_priv->modeset_restore_state = state;
4439         state->acquire_ctx = ctx;
4440 }
4441
4442 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
4443 {
4444         struct drm_device *dev = &dev_priv->drm;
4445         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4446         struct drm_atomic_state *state;
4447         int ret;
4448
4449         if (!HAS_DISPLAY(dev_priv))
4450                 return;
4451
4452         /* reset doesn't touch the display */
4453         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4454                 return;
4455
4456         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4457         if (!state)
4458                 goto unlock;
4459
4460         /* reset doesn't touch the display */
4461         if (!gpu_reset_clobbers_display(dev_priv)) {
4462                 /* for testing only restore the display */
4463                 ret = __intel_display_resume(dev, state, ctx);
4464                 if (ret)
4465                         drm_err(&dev_priv->drm,
4466                                 "Restoring old state failed with %i\n", ret);
4467         } else {
4468                 /*
4469                  * The display has been reset as well,
4470                  * so need a full re-initialization.
4471                  */
4472                 intel_pps_unlock_regs_wa(dev_priv);
4473                 intel_modeset_init_hw(dev_priv);
4474                 intel_init_clock_gating(dev_priv);
4475                 intel_hpd_init(dev_priv);
4476
4477                 ret = __intel_display_resume(dev, state, ctx);
4478                 if (ret)
4479                         drm_err(&dev_priv->drm,
4480                                 "Restoring old state failed with %i\n", ret);
4481
4482                 intel_hpd_poll_disable(dev_priv);
4483         }
4484
4485         drm_atomic_state_put(state);
4486 unlock:
4487         drm_modeset_drop_locks(ctx);
4488         drm_modeset_acquire_fini(ctx);
4489         mutex_unlock(&dev->mode_config.mutex);
4490
4491         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4492 }
4493
4494 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4495 {
4496         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4497         enum pipe pipe = crtc->pipe;
4498         u32 tmp;
4499
4500         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4501
4502         /*
4503          * Display WA #1153: icl
4504          * enable hardware to bypass the alpha math
4505          * and rounding for per-pixel values 00 and 0xff
4506          */
4507         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4508         /*
4509          * Display WA # 1605353570: icl
4510          * Set the pixel rounding bit to 1 for allowing
4511          * passthrough of Frame buffer pixels unmodified
4512          * across pipe
4513          */
4514         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4515         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4516 }
4517
4518 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4519 {
4520         struct drm_device *dev = crtc->base.dev;
4521         struct drm_i915_private *dev_priv = to_i915(dev);
4522         enum pipe pipe = crtc->pipe;
4523         i915_reg_t reg;
4524         u32 temp;
4525
4526         /* enable normal train */
4527         reg = FDI_TX_CTL(pipe);
4528         temp = intel_de_read(dev_priv, reg);
4529         if (IS_IVYBRIDGE(dev_priv)) {
4530                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4531                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4532         } else {
4533                 temp &= ~FDI_LINK_TRAIN_NONE;
4534                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4535         }
4536         intel_de_write(dev_priv, reg, temp);
4537
4538         reg = FDI_RX_CTL(pipe);
4539         temp = intel_de_read(dev_priv, reg);
4540         if (HAS_PCH_CPT(dev_priv)) {
4541                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4542                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4543         } else {
4544                 temp &= ~FDI_LINK_TRAIN_NONE;
4545                 temp |= FDI_LINK_TRAIN_NONE;
4546         }
4547         intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4548
4549         /* wait one idle pattern time */
4550         intel_de_posting_read(dev_priv, reg);
4551         udelay(1000);
4552
4553         /* IVB wants error correction enabled */
4554         if (IS_IVYBRIDGE(dev_priv))
4555                 intel_de_write(dev_priv, reg,
4556                                intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
4557 }
4558
4559 /* The FDI link training functions for ILK/Ibexpeak. */
4560 static void ilk_fdi_link_train(struct intel_crtc *crtc,
4561                                const struct intel_crtc_state *crtc_state)
4562 {
4563         struct drm_device *dev = crtc->base.dev;
4564         struct drm_i915_private *dev_priv = to_i915(dev);
4565         enum pipe pipe = crtc->pipe;
4566         i915_reg_t reg;
4567         u32 temp, tries;
4568
4569         /* FDI needs bits from pipe first */
4570         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
4571
4572         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4573            for train result */
4574         reg = FDI_RX_IMR(pipe);
4575         temp = intel_de_read(dev_priv, reg);
4576         temp &= ~FDI_RX_SYMBOL_LOCK;
4577         temp &= ~FDI_RX_BIT_LOCK;
4578         intel_de_write(dev_priv, reg, temp);
4579         intel_de_read(dev_priv, reg);
4580         udelay(150);
4581
4582         /* enable CPU FDI TX and PCH FDI RX */
4583         reg = FDI_TX_CTL(pipe);
4584         temp = intel_de_read(dev_priv, reg);
4585         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4586         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4587         temp &= ~FDI_LINK_TRAIN_NONE;
4588         temp |= FDI_LINK_TRAIN_PATTERN_1;
4589         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4590
4591         reg = FDI_RX_CTL(pipe);
4592         temp = intel_de_read(dev_priv, reg);
4593         temp &= ~FDI_LINK_TRAIN_NONE;
4594         temp |= FDI_LINK_TRAIN_PATTERN_1;
4595         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4596
4597         intel_de_posting_read(dev_priv, reg);
4598         udelay(150);
4599
4600         /* Ironlake workaround, enable clock pointer after FDI enable*/
4601         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
4602                        FDI_RX_PHASE_SYNC_POINTER_OVR);
4603         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
4604                        FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
4605
4606         reg = FDI_RX_IIR(pipe);
4607         for (tries = 0; tries < 5; tries++) {
4608                 temp = intel_de_read(dev_priv, reg);
4609                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4610
4611                 if ((temp & FDI_RX_BIT_LOCK)) {
4612                         drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
4613                         intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
4614                         break;
4615                 }
4616         }
4617         if (tries == 5)
4618                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
4619
4620         /* Train 2 */
4621         reg = FDI_TX_CTL(pipe);
4622         temp = intel_de_read(dev_priv, reg);
4623         temp &= ~FDI_LINK_TRAIN_NONE;
4624         temp |= FDI_LINK_TRAIN_PATTERN_2;
4625         intel_de_write(dev_priv, reg, temp);
4626
4627         reg = FDI_RX_CTL(pipe);
4628         temp = intel_de_read(dev_priv, reg);
4629         temp &= ~FDI_LINK_TRAIN_NONE;
4630         temp |= FDI_LINK_TRAIN_PATTERN_2;
4631         intel_de_write(dev_priv, reg, temp);
4632
4633         intel_de_posting_read(dev_priv, reg);
4634         udelay(150);
4635
4636         reg = FDI_RX_IIR(pipe);
4637         for (tries = 0; tries < 5; tries++) {
4638                 temp = intel_de_read(dev_priv, reg);
4639                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4640
4641                 if (temp & FDI_RX_SYMBOL_LOCK) {
4642                         intel_de_write(dev_priv, reg,
4643                                        temp | FDI_RX_SYMBOL_LOCK);
4644                         drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
4645                         break;
4646                 }
4647         }
4648         if (tries == 5)
4649                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
4650
4651         drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
4652
4653 }
4654
4655 static const int snb_b_fdi_train_param[] = {
4656         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4657         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4658         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4659         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4660 };
4661
4662 /* The FDI link training functions for SNB/Cougarpoint. */
4663 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4664                                 const struct intel_crtc_state *crtc_state)
4665 {
4666         struct drm_device *dev = crtc->base.dev;
4667         struct drm_i915_private *dev_priv = to_i915(dev);
4668         enum pipe pipe = crtc->pipe;
4669         i915_reg_t reg;
4670         u32 temp, i, retry;
4671
4672         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4673            for train result */
4674         reg = FDI_RX_IMR(pipe);
4675         temp = intel_de_read(dev_priv, reg);
4676         temp &= ~FDI_RX_SYMBOL_LOCK;
4677         temp &= ~FDI_RX_BIT_LOCK;
4678         intel_de_write(dev_priv, reg, temp);
4679
4680         intel_de_posting_read(dev_priv, reg);
4681         udelay(150);
4682
4683         /* enable CPU FDI TX and PCH FDI RX */
4684         reg = FDI_TX_CTL(pipe);
4685         temp = intel_de_read(dev_priv, reg);
4686         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4687         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4688         temp &= ~FDI_LINK_TRAIN_NONE;
4689         temp |= FDI_LINK_TRAIN_PATTERN_1;
4690         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4691         /* SNB-B */
4692         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4693         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4694
4695         intel_de_write(dev_priv, FDI_RX_MISC(pipe),
4696                        FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4697
4698         reg = FDI_RX_CTL(pipe);
4699         temp = intel_de_read(dev_priv, reg);
4700         if (HAS_PCH_CPT(dev_priv)) {
4701                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4702                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4703         } else {
4704                 temp &= ~FDI_LINK_TRAIN_NONE;
4705                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4706         }
4707         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4708
4709         intel_de_posting_read(dev_priv, reg);
4710         udelay(150);
4711
4712         for (i = 0; i < 4; i++) {
4713                 reg = FDI_TX_CTL(pipe);
4714                 temp = intel_de_read(dev_priv, reg);
4715                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4716                 temp |= snb_b_fdi_train_param[i];
4717                 intel_de_write(dev_priv, reg, temp);
4718
4719                 intel_de_posting_read(dev_priv, reg);
4720                 udelay(500);
4721
4722                 for (retry = 0; retry < 5; retry++) {
4723                         reg = FDI_RX_IIR(pipe);
4724                         temp = intel_de_read(dev_priv, reg);
4725                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4726                         if (temp & FDI_RX_BIT_LOCK) {
4727                                 intel_de_write(dev_priv, reg,
4728                                                temp | FDI_RX_BIT_LOCK);
4729                                 drm_dbg_kms(&dev_priv->drm,
4730                                             "FDI train 1 done.\n");
4731                                 break;
4732                         }
4733                         udelay(50);
4734                 }
4735                 if (retry < 5)
4736                         break;
4737         }
4738         if (i == 4)
4739                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
4740
4741         /* Train 2 */
4742         reg = FDI_TX_CTL(pipe);
4743         temp = intel_de_read(dev_priv, reg);
4744         temp &= ~FDI_LINK_TRAIN_NONE;
4745         temp |= FDI_LINK_TRAIN_PATTERN_2;
4746         if (IS_GEN(dev_priv, 6)) {
4747                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4748                 /* SNB-B */
4749                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4750         }
4751         intel_de_write(dev_priv, reg, temp);
4752
4753         reg = FDI_RX_CTL(pipe);
4754         temp = intel_de_read(dev_priv, reg);
4755         if (HAS_PCH_CPT(dev_priv)) {
4756                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4757                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4758         } else {
4759                 temp &= ~FDI_LINK_TRAIN_NONE;
4760                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4761         }
4762         intel_de_write(dev_priv, reg, temp);
4763
4764         intel_de_posting_read(dev_priv, reg);
4765         udelay(150);
4766
4767         for (i = 0; i < 4; i++) {
4768                 reg = FDI_TX_CTL(pipe);
4769                 temp = intel_de_read(dev_priv, reg);
4770                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4771                 temp |= snb_b_fdi_train_param[i];
4772                 intel_de_write(dev_priv, reg, temp);
4773
4774                 intel_de_posting_read(dev_priv, reg);
4775                 udelay(500);
4776
4777                 for (retry = 0; retry < 5; retry++) {
4778                         reg = FDI_RX_IIR(pipe);
4779                         temp = intel_de_read(dev_priv, reg);
4780                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4781                         if (temp & FDI_RX_SYMBOL_LOCK) {
4782                                 intel_de_write(dev_priv, reg,
4783                                                temp | FDI_RX_SYMBOL_LOCK);
4784                                 drm_dbg_kms(&dev_priv->drm,
4785                                             "FDI train 2 done.\n");
4786                                 break;
4787                         }
4788                         udelay(50);
4789                 }
4790                 if (retry < 5)
4791                         break;
4792         }
4793         if (i == 4)
4794                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
4795
4796         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
4797 }
4798
4799 /* Manual link training for Ivy Bridge A0 parts */
4800 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4801                                       const struct intel_crtc_state *crtc_state)
4802 {
4803         struct drm_device *dev = crtc->base.dev;
4804         struct drm_i915_private *dev_priv = to_i915(dev);
4805         enum pipe pipe = crtc->pipe;
4806         i915_reg_t reg;
4807         u32 temp, i, j;
4808
4809         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4810            for train result */
4811         reg = FDI_RX_IMR(pipe);
4812         temp = intel_de_read(dev_priv, reg);
4813         temp &= ~FDI_RX_SYMBOL_LOCK;
4814         temp &= ~FDI_RX_BIT_LOCK;
4815         intel_de_write(dev_priv, reg, temp);
4816
4817         intel_de_posting_read(dev_priv, reg);
4818         udelay(150);
4819
4820         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
4821                     intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
4822
4823         /* Try each vswing and preemphasis setting twice before moving on */
4824         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4825                 /* disable first in case we need to retry */
4826                 reg = FDI_TX_CTL(pipe);
4827                 temp = intel_de_read(dev_priv, reg);
4828                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4829                 temp &= ~FDI_TX_ENABLE;
4830                 intel_de_write(dev_priv, reg, temp);
4831
4832                 reg = FDI_RX_CTL(pipe);
4833                 temp = intel_de_read(dev_priv, reg);
4834                 temp &= ~FDI_LINK_TRAIN_AUTO;
4835                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4836                 temp &= ~FDI_RX_ENABLE;
4837                 intel_de_write(dev_priv, reg, temp);
4838
4839                 /* enable CPU FDI TX and PCH FDI RX */
4840                 reg = FDI_TX_CTL(pipe);
4841                 temp = intel_de_read(dev_priv, reg);
4842                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4843                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4844                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4845                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4846                 temp |= snb_b_fdi_train_param[j/2];
4847                 temp |= FDI_COMPOSITE_SYNC;
4848                 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4849
4850                 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
4851                                FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4852
4853                 reg = FDI_RX_CTL(pipe);
4854                 temp = intel_de_read(dev_priv, reg);
4855                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4856                 temp |= FDI_COMPOSITE_SYNC;
4857                 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4858
4859                 intel_de_posting_read(dev_priv, reg);
4860                 udelay(1); /* should be 0.5us */
4861
4862                 for (i = 0; i < 4; i++) {
4863                         reg = FDI_RX_IIR(pipe);
4864                         temp = intel_de_read(dev_priv, reg);
4865                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4866
4867                         if (temp & FDI_RX_BIT_LOCK ||
4868                             (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
4869                                 intel_de_write(dev_priv, reg,
4870                                                temp | FDI_RX_BIT_LOCK);
4871                                 drm_dbg_kms(&dev_priv->drm,
4872                                             "FDI train 1 done, level %i.\n",
4873                                             i);
4874                                 break;
4875                         }
4876                         udelay(1); /* should be 0.5us */
4877                 }
4878                 if (i == 4) {
4879                         drm_dbg_kms(&dev_priv->drm,
4880                                     "FDI train 1 fail on vswing %d\n", j / 2);
4881                         continue;
4882                 }
4883
4884                 /* Train 2 */
4885                 reg = FDI_TX_CTL(pipe);
4886                 temp = intel_de_read(dev_priv, reg);
4887                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4888                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4889                 intel_de_write(dev_priv, reg, temp);
4890
4891                 reg = FDI_RX_CTL(pipe);
4892                 temp = intel_de_read(dev_priv, reg);
4893                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4894                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4895                 intel_de_write(dev_priv, reg, temp);
4896
4897                 intel_de_posting_read(dev_priv, reg);
4898                 udelay(2); /* should be 1.5us */
4899
4900                 for (i = 0; i < 4; i++) {
4901                         reg = FDI_RX_IIR(pipe);
4902                         temp = intel_de_read(dev_priv, reg);
4903                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4904
4905                         if (temp & FDI_RX_SYMBOL_LOCK ||
4906                             (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
4907                                 intel_de_write(dev_priv, reg,
4908                                                temp | FDI_RX_SYMBOL_LOCK);
4909                                 drm_dbg_kms(&dev_priv->drm,
4910                                             "FDI train 2 done, level %i.\n",
4911                                             i);
4912                                 goto train_done;
4913                         }
4914                         udelay(2); /* should be 1.5us */
4915                 }
4916                 if (i == 4)
4917                         drm_dbg_kms(&dev_priv->drm,
4918                                     "FDI train 2 fail on vswing %d\n", j / 2);
4919         }
4920
4921 train_done:
4922         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
4923 }
4924
4925 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4926 {
4927         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4928         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4929         enum pipe pipe = intel_crtc->pipe;
4930         i915_reg_t reg;
4931         u32 temp;
4932
4933         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4934         reg = FDI_RX_CTL(pipe);
4935         temp = intel_de_read(dev_priv, reg);
4936         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4937         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4938         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4939         intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
4940
4941         intel_de_posting_read(dev_priv, reg);
4942         udelay(200);
4943
4944         /* Switch from Rawclk to PCDclk */
4945         temp = intel_de_read(dev_priv, reg);
4946         intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
4947
4948         intel_de_posting_read(dev_priv, reg);
4949         udelay(200);
4950
4951         /* Enable CPU FDI TX PLL, always on for Ironlake */
4952         reg = FDI_TX_CTL(pipe);
4953         temp = intel_de_read(dev_priv, reg);
4954         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4955                 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
4956
4957                 intel_de_posting_read(dev_priv, reg);
4958                 udelay(100);
4959         }
4960 }
4961
4962 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
4963 {
4964         struct drm_device *dev = intel_crtc->base.dev;
4965         struct drm_i915_private *dev_priv = to_i915(dev);
4966         enum pipe pipe = intel_crtc->pipe;
4967         i915_reg_t reg;
4968         u32 temp;
4969
4970         /* Switch from PCDclk to Rawclk */
4971         reg = FDI_RX_CTL(pipe);
4972         temp = intel_de_read(dev_priv, reg);
4973         intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
4974
4975         /* Disable CPU FDI TX PLL */
4976         reg = FDI_TX_CTL(pipe);
4977         temp = intel_de_read(dev_priv, reg);
4978         intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
4979
4980         intel_de_posting_read(dev_priv, reg);
4981         udelay(100);
4982
4983         reg = FDI_RX_CTL(pipe);
4984         temp = intel_de_read(dev_priv, reg);
4985         intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
4986
4987         /* Wait for the clocks to turn off. */
4988         intel_de_posting_read(dev_priv, reg);
4989         udelay(100);
4990 }
4991
4992 static void ilk_fdi_disable(struct intel_crtc *crtc)
4993 {
4994         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4995         enum pipe pipe = crtc->pipe;
4996         i915_reg_t reg;
4997         u32 temp;
4998
4999         /* disable CPU FDI tx and PCH FDI rx */
5000         reg = FDI_TX_CTL(pipe);
5001         temp = intel_de_read(dev_priv, reg);
5002         intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5003         intel_de_posting_read(dev_priv, reg);
5004
5005         reg = FDI_RX_CTL(pipe);
5006         temp = intel_de_read(dev_priv, reg);
5007         temp &= ~(0x7 << 16);
5008         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5009         intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5010
5011         intel_de_posting_read(dev_priv, reg);
5012         udelay(100);
5013
5014         /* Ironlake workaround, disable clock pointer after downing FDI */
5015         if (HAS_PCH_IBX(dev_priv))
5016                 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5017                                FDI_RX_PHASE_SYNC_POINTER_OVR);
5018
5019         /* still set train pattern 1 */
5020         reg = FDI_TX_CTL(pipe);
5021         temp = intel_de_read(dev_priv, reg);
5022         temp &= ~FDI_LINK_TRAIN_NONE;
5023         temp |= FDI_LINK_TRAIN_PATTERN_1;
5024         intel_de_write(dev_priv, reg, temp);
5025
5026         reg = FDI_RX_CTL(pipe);
5027         temp = intel_de_read(dev_priv, reg);
5028         if (HAS_PCH_CPT(dev_priv)) {
5029                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5030                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5031         } else {
5032                 temp &= ~FDI_LINK_TRAIN_NONE;
5033                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5034         }
5035         /* BPC in FDI rx is consistent with that in PIPECONF */
5036         temp &= ~(0x07 << 16);
5037         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5038         intel_de_write(dev_priv, reg, temp);
5039
5040         intel_de_posting_read(dev_priv, reg);
5041         udelay(100);
5042 }
5043
5044 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5045 {
5046         struct drm_crtc *crtc;
5047         bool cleanup_done;
5048
5049         drm_for_each_crtc(crtc, &dev_priv->drm) {
5050                 struct drm_crtc_commit *commit;
5051                 spin_lock(&crtc->commit_lock);
5052                 commit = list_first_entry_or_null(&crtc->commit_list,
5053                                                   struct drm_crtc_commit, commit_entry);
5054                 cleanup_done = commit ?
5055                         try_wait_for_completion(&commit->cleanup_done) : true;
5056                 spin_unlock(&crtc->commit_lock);
5057
5058                 if (cleanup_done)
5059                         continue;
5060
5061                 drm_crtc_wait_one_vblank(crtc);
5062
5063                 return true;
5064         }
5065
5066         return false;
5067 }
5068
5069 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5070 {
5071         u32 temp;
5072
5073         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5074
5075         mutex_lock(&dev_priv->sb_lock);
5076
5077         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5078         temp |= SBI_SSCCTL_DISABLE;
5079         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5080
5081         mutex_unlock(&dev_priv->sb_lock);
5082 }
5083
5084 /* Program iCLKIP clock to the desired frequency */
5085 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5086 {
5087         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5089         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5090         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5091         u32 temp;
5092
5093         lpt_disable_iclkip(dev_priv);
5094
5095         /* The iCLK virtual clock root frequency is in MHz,
5096          * but the adjusted_mode->crtc_clock in in KHz. To get the
5097          * divisors, it is necessary to divide one by another, so we
5098          * convert the virtual clock precision to KHz here for higher
5099          * precision.
5100          */
5101         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5102                 u32 iclk_virtual_root_freq = 172800 * 1000;
5103                 u32 iclk_pi_range = 64;
5104                 u32 desired_divisor;
5105
5106                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5107                                                     clock << auxdiv);
5108                 divsel = (desired_divisor / iclk_pi_range) - 2;
5109                 phaseinc = desired_divisor % iclk_pi_range;
5110
5111                 /*
5112                  * Near 20MHz is a corner case which is
5113                  * out of range for the 7-bit divisor
5114                  */
5115                 if (divsel <= 0x7f)
5116                         break;
5117         }
5118
5119         /* This should not happen with any sane values */
5120         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5121                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5122         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5123                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5124
5125         drm_dbg_kms(&dev_priv->drm,
5126                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5127                     clock, auxdiv, divsel, phasedir, phaseinc);
5128
5129         mutex_lock(&dev_priv->sb_lock);
5130
5131         /* Program SSCDIVINTPHASE6 */
5132         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5133         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5134         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5135         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5136         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5137         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5138         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5139         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5140
5141         /* Program SSCAUXDIV */
5142         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5143         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5144         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5145         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5146
5147         /* Enable modulator and associated divider */
5148         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5149         temp &= ~SBI_SSCCTL_DISABLE;
5150         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5151
5152         mutex_unlock(&dev_priv->sb_lock);
5153
5154         /* Wait for initialization time */
5155         udelay(24);
5156
5157         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5158 }
5159
5160 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5161 {
5162         u32 divsel, phaseinc, auxdiv;
5163         u32 iclk_virtual_root_freq = 172800 * 1000;
5164         u32 iclk_pi_range = 64;
5165         u32 desired_divisor;
5166         u32 temp;
5167
5168         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5169                 return 0;
5170
5171         mutex_lock(&dev_priv->sb_lock);
5172
5173         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5174         if (temp & SBI_SSCCTL_DISABLE) {
5175                 mutex_unlock(&dev_priv->sb_lock);
5176                 return 0;
5177         }
5178
5179         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5180         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5181                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5182         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5183                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5184
5185         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5186         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5187                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5188
5189         mutex_unlock(&dev_priv->sb_lock);
5190
5191         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5192
5193         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5194                                  desired_divisor << auxdiv);
5195 }
5196
5197 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5198                                            enum pipe pch_transcoder)
5199 {
5200         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5201         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5202         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5203
5204         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5205                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5206         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5207                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5208         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5209                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5210
5211         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5212                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5213         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5214                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5215         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5216                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5217         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5218                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5219 }
5220
5221 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5222 {
5223         u32 temp;
5224
5225         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5226         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5227                 return;
5228
5229         drm_WARN_ON(&dev_priv->drm,
5230                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5231                     FDI_RX_ENABLE);
5232         drm_WARN_ON(&dev_priv->drm,
5233                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5234                     FDI_RX_ENABLE);
5235
5236         temp &= ~FDI_BC_BIFURCATION_SELECT;
5237         if (enable)
5238                 temp |= FDI_BC_BIFURCATION_SELECT;
5239
5240         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5241                     enable ? "en" : "dis");
5242         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5243         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5244 }
5245
5246 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5247 {
5248         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5249         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5250
5251         switch (crtc->pipe) {
5252         case PIPE_A:
5253                 break;
5254         case PIPE_B:
5255                 if (crtc_state->fdi_lanes > 2)
5256                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5257                 else
5258                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5259
5260                 break;
5261         case PIPE_C:
5262                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5263
5264                 break;
5265         default:
5266                 BUG();
5267         }
5268 }
5269
5270 /*
5271  * Finds the encoder associated with the given CRTC. This can only be
5272  * used when we know that the CRTC isn't feeding multiple encoders!
5273  */
5274 static struct intel_encoder *
5275 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5276                            const struct intel_crtc_state *crtc_state)
5277 {
5278         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5279         const struct drm_connector_state *connector_state;
5280         const struct drm_connector *connector;
5281         struct intel_encoder *encoder = NULL;
5282         int num_encoders = 0;
5283         int i;
5284
5285         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5286                 if (connector_state->crtc != &crtc->base)
5287                         continue;
5288
5289                 encoder = to_intel_encoder(connector_state->best_encoder);
5290                 num_encoders++;
5291         }
5292
5293         drm_WARN(encoder->base.dev, num_encoders != 1,
5294                  "%d encoders for pipe %c\n",
5295                  num_encoders, pipe_name(crtc->pipe));
5296
5297         return encoder;
5298 }
5299
5300 /*
5301  * Enable PCH resources required for PCH ports:
5302  *   - PCH PLLs
5303  *   - FDI training & RX/TX
5304  *   - update transcoder timings
5305  *   - DP transcoding bits
5306  *   - transcoder
5307  */
5308 static void ilk_pch_enable(const struct intel_atomic_state *state,
5309                            const struct intel_crtc_state *crtc_state)
5310 {
5311         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5312         struct drm_device *dev = crtc->base.dev;
5313         struct drm_i915_private *dev_priv = to_i915(dev);
5314         enum pipe pipe = crtc->pipe;
5315         u32 temp;
5316
5317         assert_pch_transcoder_disabled(dev_priv, pipe);
5318
5319         if (IS_IVYBRIDGE(dev_priv))
5320                 ivb_update_fdi_bc_bifurcation(crtc_state);
5321
5322         /* Write the TU size bits before fdi link training, so that error
5323          * detection works. */
5324         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5325                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5326
5327         /* For PCH output, training FDI link */
5328         dev_priv->display.fdi_link_train(crtc, crtc_state);
5329
5330         /* We need to program the right clock selection before writing the pixel
5331          * mutliplier into the DPLL. */
5332         if (HAS_PCH_CPT(dev_priv)) {
5333                 u32 sel;
5334
5335                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5336                 temp |= TRANS_DPLL_ENABLE(pipe);
5337                 sel = TRANS_DPLLB_SEL(pipe);
5338                 if (crtc_state->shared_dpll ==
5339                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5340                         temp |= sel;
5341                 else
5342                         temp &= ~sel;
5343                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5344         }
5345
5346         /* XXX: pch pll's can be enabled any time before we enable the PCH
5347          * transcoder, and we actually should do this to not upset any PCH
5348          * transcoder that already use the clock when we share it.
5349          *
5350          * Note that enable_shared_dpll tries to do the right thing, but
5351          * get_shared_dpll unconditionally resets the pll - we need that to have
5352          * the right LVDS enable sequence. */
5353         intel_enable_shared_dpll(crtc_state);
5354
5355         /* set transcoder timing, panel must allow it */
5356         assert_panel_unlocked(dev_priv, pipe);
5357         ilk_pch_transcoder_set_timings(crtc_state, pipe);
5358
5359         intel_fdi_normal_train(crtc);
5360
5361         /* For PCH DP, enable TRANS_DP_CTL */
5362         if (HAS_PCH_CPT(dev_priv) &&
5363             intel_crtc_has_dp_encoder(crtc_state)) {
5364                 const struct drm_display_mode *adjusted_mode =
5365                         &crtc_state->hw.adjusted_mode;
5366                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5367                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5368                 enum port port;
5369
5370                 temp = intel_de_read(dev_priv, reg);
5371                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5372                           TRANS_DP_SYNC_MASK |
5373                           TRANS_DP_BPC_MASK);
5374                 temp |= TRANS_DP_OUTPUT_ENABLE;
5375                 temp |= bpc << 9; /* same format but at 11:9 */
5376
5377                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5378                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5379                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5380                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5381
5382                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5383                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5384                 temp |= TRANS_DP_PORT_SEL(port);
5385
5386                 intel_de_write(dev_priv, reg, temp);
5387         }
5388
5389         ilk_enable_pch_transcoder(crtc_state);
5390 }
5391
5392 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5393 {
5394         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5395         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5396         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5397
5398         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5399
5400         lpt_program_iclkip(crtc_state);
5401
5402         /* Set transcoder timing. */
5403         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5404
5405         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5406 }
5407
5408 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5409                                enum pipe pipe)
5410 {
5411         i915_reg_t dslreg = PIPEDSL(pipe);
5412         u32 temp;
5413
5414         temp = intel_de_read(dev_priv, dslreg);
5415         udelay(500);
5416         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5417                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5418                         drm_err(&dev_priv->drm,
5419                                 "mode set failed: pipe %c stuck\n",
5420                                 pipe_name(pipe));
5421         }
5422 }
5423
5424 /*
5425  * The hardware phase 0.0 refers to the center of the pixel.
5426  * We want to start from the top/left edge which is phase
5427  * -0.5. That matches how the hardware calculates the scaling
5428  * factors (from top-left of the first pixel to bottom-right
5429  * of the last pixel, as opposed to the pixel centers).
5430  *
5431  * For 4:2:0 subsampled chroma planes we obviously have to
5432  * adjust that so that the chroma sample position lands in
5433  * the right spot.
5434  *
5435  * Note that for packed YCbCr 4:2:2 formats there is no way to
5436  * control chroma siting. The hardware simply replicates the
5437  * chroma samples for both of the luma samples, and thus we don't
5438  * actually get the expected MPEG2 chroma siting convention :(
5439  * The same behaviour is observed on pre-SKL platforms as well.
5440  *
5441  * Theory behind the formula (note that we ignore sub-pixel
5442  * source coordinates):
5443  * s = source sample position
5444  * d = destination sample position
5445  *
5446  * Downscaling 4:1:
5447  * -0.5
5448  * | 0.0
5449  * | |     1.5 (initial phase)
5450  * | |     |
5451  * v v     v
5452  * | s | s | s | s |
5453  * |       d       |
5454  *
5455  * Upscaling 1:4:
5456  * -0.5
5457  * | -0.375 (initial phase)
5458  * | |     0.0
5459  * | |     |
5460  * v v     v
5461  * |       s       |
5462  * | d | d | d | d |
5463  */
5464 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5465 {
5466         int phase = -0x8000;
5467         u16 trip = 0;
5468
5469         if (chroma_cosited)
5470                 phase += (sub - 1) * 0x8000 / sub;
5471
5472         phase += scale / (2 * sub);
5473
5474         /*
5475          * Hardware initial phase limited to [-0.5:1.5].
5476          * Since the max hardware scale factor is 3.0, we
5477          * should never actually excdeed 1.0 here.
5478          */
5479         WARN_ON(phase < -0x8000 || phase > 0x18000);
5480
5481         if (phase < 0)
5482                 phase = 0x10000 + phase;
5483         else
5484                 trip = PS_PHASE_TRIP;
5485
5486         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5487 }
5488
5489 #define SKL_MIN_SRC_W 8
5490 #define SKL_MAX_SRC_W 4096
5491 #define SKL_MIN_SRC_H 8
5492 #define SKL_MAX_SRC_H 4096
5493 #define SKL_MIN_DST_W 8
5494 #define SKL_MAX_DST_W 4096
5495 #define SKL_MIN_DST_H 8
5496 #define SKL_MAX_DST_H 4096
5497 #define ICL_MAX_SRC_W 5120
5498 #define ICL_MAX_SRC_H 4096
5499 #define ICL_MAX_DST_W 5120
5500 #define ICL_MAX_DST_H 4096
5501 #define SKL_MIN_YUV_420_SRC_W 16
5502 #define SKL_MIN_YUV_420_SRC_H 16
5503
5504 static int
5505 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5506                   unsigned int scaler_user, int *scaler_id,
5507                   int src_w, int src_h, int dst_w, int dst_h,
5508                   const struct drm_format_info *format,
5509                   u64 modifier, bool need_scaler)
5510 {
5511         struct intel_crtc_scaler_state *scaler_state =
5512                 &crtc_state->scaler_state;
5513         struct intel_crtc *intel_crtc =
5514                 to_intel_crtc(crtc_state->uapi.crtc);
5515         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5516         const struct drm_display_mode *adjusted_mode =
5517                 &crtc_state->hw.adjusted_mode;
5518
5519         /*
5520          * Src coordinates are already rotated by 270 degrees for
5521          * the 90/270 degree plane rotation cases (to match the
5522          * GTT mapping), hence no need to account for rotation here.
5523          */
5524         if (src_w != dst_w || src_h != dst_h)
5525                 need_scaler = true;
5526
5527         /*
5528          * Scaling/fitting not supported in IF-ID mode in GEN9+
5529          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5530          * Once NV12 is enabled, handle it here while allocating scaler
5531          * for NV12.
5532          */
5533         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5534             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5535                 drm_dbg_kms(&dev_priv->drm,
5536                             "Pipe/Plane scaling not supported with IF-ID mode\n");
5537                 return -EINVAL;
5538         }
5539
5540         /*
5541          * if plane is being disabled or scaler is no more required or force detach
5542          *  - free scaler binded to this plane/crtc
5543          *  - in order to do this, update crtc->scaler_usage
5544          *
5545          * Here scaler state in crtc_state is set free so that
5546          * scaler can be assigned to other user. Actual register
5547          * update to free the scaler is done in plane/panel-fit programming.
5548          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5549          */
5550         if (force_detach || !need_scaler) {
5551                 if (*scaler_id >= 0) {
5552                         scaler_state->scaler_users &= ~(1 << scaler_user);
5553                         scaler_state->scalers[*scaler_id].in_use = 0;
5554
5555                         drm_dbg_kms(&dev_priv->drm,
5556                                     "scaler_user index %u.%u: "
5557                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
5558                                     intel_crtc->pipe, scaler_user, *scaler_id,
5559                                     scaler_state->scaler_users);
5560                         *scaler_id = -1;
5561                 }
5562                 return 0;
5563         }
5564
5565         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
5566             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5567                 drm_dbg_kms(&dev_priv->drm,
5568                             "Planar YUV: src dimensions not met\n");
5569                 return -EINVAL;
5570         }
5571
5572         /* range checks */
5573         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5574             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5575             (INTEL_GEN(dev_priv) >= 11 &&
5576              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5577               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5578             (INTEL_GEN(dev_priv) < 11 &&
5579              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5580               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5581                 drm_dbg_kms(&dev_priv->drm,
5582                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
5583                             "size is out of scaler range\n",
5584                             intel_crtc->pipe, scaler_user, src_w, src_h,
5585                             dst_w, dst_h);
5586                 return -EINVAL;
5587         }
5588
5589         /* mark this plane as a scaler user in crtc_state */
5590         scaler_state->scaler_users |= (1 << scaler_user);
5591         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
5592                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5593                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5594                     scaler_state->scaler_users);
5595
5596         return 0;
5597 }
5598
5599 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
5600 {
5601         const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
5602         int width, height;
5603
5604         if (crtc_state->pch_pfit.enabled) {
5605                 width = drm_rect_width(&crtc_state->pch_pfit.dst);
5606                 height = drm_rect_height(&crtc_state->pch_pfit.dst);
5607         } else {
5608                 width = pipe_mode->crtc_hdisplay;
5609                 height = pipe_mode->crtc_vdisplay;
5610         }
5611         return skl_update_scaler(crtc_state, !crtc_state->hw.active,
5612                                  SKL_CRTC_INDEX,
5613                                  &crtc_state->scaler_state.scaler_id,
5614                                  crtc_state->pipe_src_w, crtc_state->pipe_src_h,
5615                                  width, height, NULL, 0,
5616                                  crtc_state->pch_pfit.enabled);
5617 }
5618
5619 /**
5620  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5621  * @crtc_state: crtc's scaler state
5622  * @plane_state: atomic plane state to update
5623  *
5624  * Return
5625  *     0 - scaler_usage updated successfully
5626  *    error - requested scaling cannot be supported or other error condition
5627  */
5628 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5629                                    struct intel_plane_state *plane_state)
5630 {
5631         struct intel_plane *intel_plane =
5632                 to_intel_plane(plane_state->uapi.plane);
5633         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5634         struct drm_framebuffer *fb = plane_state->hw.fb;
5635         int ret;
5636         bool force_detach = !fb || !plane_state->uapi.visible;
5637         bool need_scaler = false;
5638
5639         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5640         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5641             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
5642                 need_scaler = true;
5643
5644         ret = skl_update_scaler(crtc_state, force_detach,
5645                                 drm_plane_index(&intel_plane->base),
5646                                 &plane_state->scaler_id,
5647                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5648                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5649                                 drm_rect_width(&plane_state->uapi.dst),
5650                                 drm_rect_height(&plane_state->uapi.dst),
5651                                 fb ? fb->format : NULL,
5652                                 fb ? fb->modifier : 0,
5653                                 need_scaler);
5654
5655         if (ret || plane_state->scaler_id < 0)
5656                 return ret;
5657
5658         /* check colorkey */
5659         if (plane_state->ckey.flags) {
5660                 drm_dbg_kms(&dev_priv->drm,
5661                             "[PLANE:%d:%s] scaling with color key not allowed",
5662                             intel_plane->base.base.id,
5663                             intel_plane->base.name);
5664                 return -EINVAL;
5665         }
5666
5667         /* Check src format */
5668         switch (fb->format->format) {
5669         case DRM_FORMAT_RGB565:
5670         case DRM_FORMAT_XBGR8888:
5671         case DRM_FORMAT_XRGB8888:
5672         case DRM_FORMAT_ABGR8888:
5673         case DRM_FORMAT_ARGB8888:
5674         case DRM_FORMAT_XRGB2101010:
5675         case DRM_FORMAT_XBGR2101010:
5676         case DRM_FORMAT_ARGB2101010:
5677         case DRM_FORMAT_ABGR2101010:
5678         case DRM_FORMAT_YUYV:
5679         case DRM_FORMAT_YVYU:
5680         case DRM_FORMAT_UYVY:
5681         case DRM_FORMAT_VYUY:
5682         case DRM_FORMAT_NV12:
5683         case DRM_FORMAT_XYUV8888:
5684         case DRM_FORMAT_P010:
5685         case DRM_FORMAT_P012:
5686         case DRM_FORMAT_P016:
5687         case DRM_FORMAT_Y210:
5688         case DRM_FORMAT_Y212:
5689         case DRM_FORMAT_Y216:
5690         case DRM_FORMAT_XVYU2101010:
5691         case DRM_FORMAT_XVYU12_16161616:
5692         case DRM_FORMAT_XVYU16161616:
5693                 break;
5694         case DRM_FORMAT_XBGR16161616F:
5695         case DRM_FORMAT_ABGR16161616F:
5696         case DRM_FORMAT_XRGB16161616F:
5697         case DRM_FORMAT_ARGB16161616F:
5698                 if (INTEL_GEN(dev_priv) >= 11)
5699                         break;
5700                 fallthrough;
5701         default:
5702                 drm_dbg_kms(&dev_priv->drm,
5703                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5704                             intel_plane->base.base.id, intel_plane->base.name,
5705                             fb->base.id, fb->format->format);
5706                 return -EINVAL;
5707         }
5708
5709         return 0;
5710 }
5711
5712 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
5713 {
5714         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5715         int i;
5716
5717         for (i = 0; i < crtc->num_scalers; i++)
5718                 skl_detach_scaler(crtc, i);
5719 }
5720
5721 static int cnl_coef_tap(int i)
5722 {
5723         return i % 7;
5724 }
5725
5726 static u16 cnl_nearest_filter_coef(int t)
5727 {
5728         return t == 3 ? 0x0800 : 0x3000;
5729 }
5730
5731 /*
5732  *  Theory behind setting nearest-neighbor integer scaling:
5733  *
5734  *  17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
5735  *  The letter represents the filter tap (D is the center tap) and the number
5736  *  represents the coefficient set for a phase (0-16).
5737  *
5738  *         +------------+------------------------+------------------------+
5739  *         |Index value | Data value coeffient 1 | Data value coeffient 2 |
5740  *         +------------+------------------------+------------------------+
5741  *         |   00h      |          B0            |          A0            |
5742  *         +------------+------------------------+------------------------+
5743  *         |   01h      |          D0            |          C0            |
5744  *         +------------+------------------------+------------------------+
5745  *         |   02h      |          F0            |          E0            |
5746  *         +------------+------------------------+------------------------+
5747  *         |   03h      |          A1            |          G0            |
5748  *         +------------+------------------------+------------------------+
5749  *         |   04h      |          C1            |          B1            |
5750  *         +------------+------------------------+------------------------+
5751  *         |   ...      |          ...           |          ...           |
5752  *         +------------+------------------------+------------------------+
5753  *         |   38h      |          B16           |          A16           |
5754  *         +------------+------------------------+------------------------+
5755  *         |   39h      |          D16           |          C16           |
5756  *         +------------+------------------------+------------------------+
5757  *         |   3Ah      |          F16           |          C16           |
5758  *         +------------+------------------------+------------------------+
5759  *         |   3Bh      |        Reserved        |          G16           |
5760  *         +------------+------------------------+------------------------+
5761  *
5762  *  To enable nearest-neighbor scaling:  program scaler coefficents with
5763  *  the center tap (Dxx) values set to 1 and all other values set to 0 as per
5764  *  SCALER_COEFFICIENT_FORMAT
5765  *
5766  */
5767
5768 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
5769                                              enum pipe pipe, int id, int set)
5770 {
5771         int i;
5772
5773         intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
5774                           PS_COEE_INDEX_AUTO_INC);
5775
5776         for (i = 0; i < 17 * 7; i += 2) {
5777                 u32 tmp;
5778                 int t;
5779
5780                 t = cnl_coef_tap(i);
5781                 tmp = cnl_nearest_filter_coef(t);
5782
5783                 t = cnl_coef_tap(i + 1);
5784                 tmp |= cnl_nearest_filter_coef(t) << 16;
5785
5786                 intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
5787                                   tmp);
5788         }
5789
5790         intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
5791 }
5792
5793 inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
5794 {
5795         if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
5796                 return (PS_FILTER_PROGRAMMED |
5797                         PS_Y_VERT_FILTER_SELECT(set) |
5798                         PS_Y_HORZ_FILTER_SELECT(set) |
5799                         PS_UV_VERT_FILTER_SELECT(set) |
5800                         PS_UV_HORZ_FILTER_SELECT(set));
5801         }
5802
5803         return PS_FILTER_MEDIUM;
5804 }
5805
5806 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
5807                              int id, int set, enum drm_scaling_filter filter)
5808 {
5809         switch (filter) {
5810         case DRM_SCALING_FILTER_DEFAULT:
5811                 break;
5812         case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
5813                 cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
5814                 break;
5815         default:
5816                 MISSING_CASE(filter);
5817         }
5818 }
5819
5820 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
5821 {
5822         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5823         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5824         const struct intel_crtc_scaler_state *scaler_state =
5825                 &crtc_state->scaler_state;
5826         struct drm_rect src = {
5827                 .x2 = crtc_state->pipe_src_w << 16,
5828                 .y2 = crtc_state->pipe_src_h << 16,
5829         };
5830         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
5831         u16 uv_rgb_hphase, uv_rgb_vphase;
5832         enum pipe pipe = crtc->pipe;
5833         int width = drm_rect_width(dst);
5834         int height = drm_rect_height(dst);
5835         int x = dst->x1;
5836         int y = dst->y1;
5837         int hscale, vscale;
5838         unsigned long irqflags;
5839         int id;
5840         u32 ps_ctrl;
5841
5842         if (!crtc_state->pch_pfit.enabled)
5843                 return;
5844
5845         if (drm_WARN_ON(&dev_priv->drm,
5846                         crtc_state->scaler_state.scaler_id < 0))
5847                 return;
5848
5849         hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
5850         vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
5851
5852         uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5853         uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5854
5855         id = scaler_state->scaler_id;
5856
5857         ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
5858         ps_ctrl |=  PS_SCALER_EN | scaler_state->scalers[id].mode;
5859
5860         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
5861
5862         skl_scaler_setup_filter(dev_priv, pipe, id, 0,
5863                                 crtc_state->hw.scaling_filter);
5864
5865         intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
5866
5867         intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
5868                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5869         intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
5870                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5871         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
5872                           x << 16 | y);
5873         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
5874                           width << 16 | height);
5875
5876         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5877 }
5878
5879 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
5880 {
5881         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5882         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5883         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
5884         enum pipe pipe = crtc->pipe;
5885         int width = drm_rect_width(dst);
5886         int height = drm_rect_height(dst);
5887         int x = dst->x1;
5888         int y = dst->y1;
5889
5890         if (!crtc_state->pch_pfit.enabled)
5891                 return;
5892
5893         /* Force use of hard-coded filter coefficients
5894          * as some pre-programmed values are broken,
5895          * e.g. x201.
5896          */
5897         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5898                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
5899                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
5900         else
5901                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
5902                                PF_FILTER_MED_3x3);
5903         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
5904         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
5905 }
5906
5907 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5908 {
5909         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5910         struct drm_device *dev = crtc->base.dev;
5911         struct drm_i915_private *dev_priv = to_i915(dev);
5912
5913         if (!crtc_state->ips_enabled)
5914                 return;
5915
5916         /*
5917          * We can only enable IPS after we enable a plane and wait for a vblank
5918          * This function is called from post_plane_update, which is run after
5919          * a vblank wait.
5920          */
5921         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5922
5923         if (IS_BROADWELL(dev_priv)) {
5924                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5925                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
5926                 /* Quoting Art Runyan: "its not safe to expect any particular
5927                  * value in IPS_CTL bit 31 after enabling IPS through the
5928                  * mailbox." Moreover, the mailbox may return a bogus state,
5929                  * so we need to just enable it and continue on.
5930                  */
5931         } else {
5932                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
5933                 /* The bit only becomes 1 in the next vblank, so this wait here
5934                  * is essentially intel_wait_for_vblank. If we don't have this
5935                  * and don't wait for vblanks until the end of crtc_enable, then
5936                  * the HW state readout code will complain that the expected
5937                  * IPS_CTL value is not the one we read. */
5938                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5939                         drm_err(&dev_priv->drm,
5940                                 "Timed out waiting for IPS enable\n");
5941         }
5942 }
5943
5944 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5945 {
5946         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5947         struct drm_device *dev = crtc->base.dev;
5948         struct drm_i915_private *dev_priv = to_i915(dev);
5949
5950         if (!crtc_state->ips_enabled)
5951                 return;
5952
5953         if (IS_BROADWELL(dev_priv)) {
5954                 drm_WARN_ON(dev,
5955                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5956                 /*
5957                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5958                  * 42ms timeout value leads to occasional timeouts so use 100ms
5959                  * instead.
5960                  */
5961                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5962                         drm_err(&dev_priv->drm,
5963                                 "Timed out waiting for IPS disable\n");
5964         } else {
5965                 intel_de_write(dev_priv, IPS_CTL, 0);
5966                 intel_de_posting_read(dev_priv, IPS_CTL);
5967         }
5968
5969         /* We need to wait for a vblank before we can disable the plane. */
5970         intel_wait_for_vblank(dev_priv, crtc->pipe);
5971 }
5972
5973 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5974 {
5975         if (intel_crtc->overlay)
5976                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5977
5978         /* Let userspace switch the overlay on again. In most cases userspace
5979          * has to recompute where to put it anyway.
5980          */
5981 }
5982
5983 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5984                                        const struct intel_crtc_state *new_crtc_state)
5985 {
5986         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5987         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5988
5989         if (!old_crtc_state->ips_enabled)
5990                 return false;
5991
5992         if (intel_crtc_needs_modeset(new_crtc_state))
5993                 return true;
5994
5995         /*
5996          * Workaround : Do not read or write the pipe palette/gamma data while
5997          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5998          *
5999          * Disable IPS before we program the LUT.
6000          */
6001         if (IS_HASWELL(dev_priv) &&
6002             (new_crtc_state->uapi.color_mgmt_changed ||
6003              new_crtc_state->update_pipe) &&
6004             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6005                 return true;
6006
6007         return !new_crtc_state->ips_enabled;
6008 }
6009
6010 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6011                                        const struct intel_crtc_state *new_crtc_state)
6012 {
6013         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6014         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6015
6016         if (!new_crtc_state->ips_enabled)
6017                 return false;
6018
6019         if (intel_crtc_needs_modeset(new_crtc_state))
6020                 return true;
6021
6022         /*
6023          * Workaround : Do not read or write the pipe palette/gamma data while
6024          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6025          *
6026          * Re-enable IPS after the LUT has been programmed.
6027          */
6028         if (IS_HASWELL(dev_priv) &&
6029             (new_crtc_state->uapi.color_mgmt_changed ||
6030              new_crtc_state->update_pipe) &&
6031             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6032                 return true;
6033
6034         /*
6035          * We can't read out IPS on broadwell, assume the worst and
6036          * forcibly enable IPS on the first fastset.
6037          */
6038         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
6039                 return true;
6040
6041         return !old_crtc_state->ips_enabled;
6042 }
6043
6044 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6045 {
6046         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6047
6048         if (!crtc_state->nv12_planes)
6049                 return false;
6050
6051         /* WA Display #0827: Gen9:all */
6052         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6053                 return true;
6054
6055         return false;
6056 }
6057
6058 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6059 {
6060         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6061
6062         /* Wa_2006604312:icl,ehl */
6063         if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6064                 return true;
6065
6066         return false;
6067 }
6068
6069 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6070                             const struct intel_crtc_state *new_crtc_state)
6071 {
6072         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
6073                 new_crtc_state->active_planes;
6074 }
6075
6076 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6077                              const struct intel_crtc_state *new_crtc_state)
6078 {
6079         return old_crtc_state->active_planes &&
6080                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
6081 }
6082
6083 static void intel_post_plane_update(struct intel_atomic_state *state,
6084                                     struct intel_crtc *crtc)
6085 {
6086         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6087         const struct intel_crtc_state *old_crtc_state =
6088                 intel_atomic_get_old_crtc_state(state, crtc);
6089         const struct intel_crtc_state *new_crtc_state =
6090                 intel_atomic_get_new_crtc_state(state, crtc);
6091         enum pipe pipe = crtc->pipe;
6092
6093         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6094
6095         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6096                 intel_update_watermarks(crtc);
6097
6098         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6099                 hsw_enable_ips(new_crtc_state);
6100
6101         intel_fbc_post_update(state, crtc);
6102
6103         if (needs_nv12_wa(old_crtc_state) &&
6104             !needs_nv12_wa(new_crtc_state))
6105                 skl_wa_827(dev_priv, pipe, false);
6106
6107         if (needs_scalerclk_wa(old_crtc_state) &&
6108             !needs_scalerclk_wa(new_crtc_state))
6109                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6110 }
6111
6112 static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
6113                                       struct intel_crtc *crtc,
6114                                       const struct intel_crtc_state *new_crtc_state)
6115 {
6116         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6117         struct intel_plane *plane;
6118         struct intel_plane_state *new_plane_state;
6119         int i;
6120
6121         for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
6122                 u32 update_mask = new_crtc_state->update_planes;
6123                 u32 plane_ctl, surf_addr;
6124                 enum plane_id plane_id;
6125                 unsigned long irqflags;
6126                 enum pipe pipe;
6127
6128                 if (crtc->pipe != plane->pipe ||
6129                     !(update_mask & BIT(plane->id)))
6130                         continue;
6131
6132                 plane_id = plane->id;
6133                 pipe = plane->pipe;
6134
6135                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6136                 plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
6137                 surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
6138
6139                 plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
6140
6141                 intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
6142                 intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
6143                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6144         }
6145
6146         intel_wait_for_vblank(dev_priv, crtc->pipe);
6147 }
6148
6149 static void intel_pre_plane_update(struct intel_atomic_state *state,
6150                                    struct intel_crtc *crtc)
6151 {
6152         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6153         const struct intel_crtc_state *old_crtc_state =
6154                 intel_atomic_get_old_crtc_state(state, crtc);
6155         const struct intel_crtc_state *new_crtc_state =
6156                 intel_atomic_get_new_crtc_state(state, crtc);
6157         enum pipe pipe = crtc->pipe;
6158
6159         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6160                 hsw_disable_ips(old_crtc_state);
6161
6162         if (intel_fbc_pre_update(state, crtc))
6163                 intel_wait_for_vblank(dev_priv, pipe);
6164
6165         /* Display WA 827 */
6166         if (!needs_nv12_wa(old_crtc_state) &&
6167             needs_nv12_wa(new_crtc_state))
6168                 skl_wa_827(dev_priv, pipe, true);
6169
6170         /* Wa_2006604312:icl,ehl */
6171         if (!needs_scalerclk_wa(old_crtc_state) &&
6172             needs_scalerclk_wa(new_crtc_state))
6173                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6174
6175         /*
6176          * Vblank time updates from the shadow to live plane control register
6177          * are blocked if the memory self-refresh mode is active at that
6178          * moment. So to make sure the plane gets truly disabled, disable
6179          * first the self-refresh mode. The self-refresh enable bit in turn
6180          * will be checked/applied by the HW only at the next frame start
6181          * event which is after the vblank start event, so we need to have a
6182          * wait-for-vblank between disabling the plane and the pipe.
6183          */
6184         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6185             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6186                 intel_wait_for_vblank(dev_priv, pipe);
6187
6188         /*
6189          * IVB workaround: must disable low power watermarks for at least
6190          * one frame before enabling scaling.  LP watermarks can be re-enabled
6191          * when scaling is disabled.
6192          *
6193          * WaCxSRDisabledForSpriteScaling:ivb
6194          */
6195         if (old_crtc_state->hw.active &&
6196             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6197                 intel_wait_for_vblank(dev_priv, pipe);
6198
6199         /*
6200          * If we're doing a modeset we don't need to do any
6201          * pre-vblank watermark programming here.
6202          */
6203         if (!intel_crtc_needs_modeset(new_crtc_state)) {
6204                 /*
6205                  * For platforms that support atomic watermarks, program the
6206                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6207                  * will be the intermediate values that are safe for both pre- and
6208                  * post- vblank; when vblank happens, the 'active' values will be set
6209                  * to the final 'target' values and we'll do this again to get the
6210                  * optimal watermarks.  For gen9+ platforms, the values we program here
6211                  * will be the final target values which will get automatically latched
6212                  * at vblank time; no further programming will be necessary.
6213                  *
6214                  * If a platform hasn't been transitioned to atomic watermarks yet,
6215                  * we'll continue to update watermarks the old way, if flags tell
6216                  * us to.
6217                  */
6218                 if (dev_priv->display.initial_watermarks)
6219                         dev_priv->display.initial_watermarks(state, crtc);
6220                 else if (new_crtc_state->update_wm_pre)
6221                         intel_update_watermarks(crtc);
6222         }
6223
6224         /*
6225          * Gen2 reports pipe underruns whenever all planes are disabled.
6226          * So disable underrun reporting before all the planes get disabled.
6227          *
6228          * We do this after .initial_watermarks() so that we have a
6229          * chance of catching underruns with the intermediate watermarks
6230          * vs. the old plane configuration.
6231          */
6232         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6233                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6234
6235         /*
6236          * WA for platforms where async address update enable bit
6237          * is double buffered and only latched at start of vblank.
6238          */
6239         if (old_crtc_state->uapi.async_flip &&
6240             !new_crtc_state->uapi.async_flip &&
6241             IS_GEN_RANGE(dev_priv, 9, 10))
6242                 skl_disable_async_flip_wa(state, crtc, new_crtc_state);
6243 }
6244
6245 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6246                                       struct intel_crtc *crtc)
6247 {
6248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6249         const struct intel_crtc_state *new_crtc_state =
6250                 intel_atomic_get_new_crtc_state(state, crtc);
6251         unsigned int update_mask = new_crtc_state->update_planes;
6252         const struct intel_plane_state *old_plane_state;
6253         struct intel_plane *plane;
6254         unsigned fb_bits = 0;
6255         int i;
6256
6257         intel_crtc_dpms_overlay_disable(crtc);
6258
6259         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6260                 if (crtc->pipe != plane->pipe ||
6261                     !(update_mask & BIT(plane->id)))
6262                         continue;
6263
6264                 intel_disable_plane(plane, new_crtc_state);
6265
6266                 if (old_plane_state->uapi.visible)
6267                         fb_bits |= plane->frontbuffer_bit;
6268         }
6269
6270         intel_frontbuffer_flip(dev_priv, fb_bits);
6271 }
6272
6273 /*
6274  * intel_connector_primary_encoder - get the primary encoder for a connector
6275  * @connector: connector for which to return the encoder
6276  *
6277  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6278  * all connectors to their encoder, except for DP-MST connectors which have
6279  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6280  * pointed to by as many DP-MST connectors as there are pipes.
6281  */
6282 static struct intel_encoder *
6283 intel_connector_primary_encoder(struct intel_connector *connector)
6284 {
6285         struct intel_encoder *encoder;
6286
6287         if (connector->mst_port)
6288                 return &dp_to_dig_port(connector->mst_port)->base;
6289
6290         encoder = intel_attached_encoder(connector);
6291         drm_WARN_ON(connector->base.dev, !encoder);
6292
6293         return encoder;
6294 }
6295
6296 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6297 {
6298         struct drm_connector_state *new_conn_state;
6299         struct drm_connector *connector;
6300         int i;
6301
6302         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6303                                         i) {
6304                 struct intel_connector *intel_connector;
6305                 struct intel_encoder *encoder;
6306                 struct intel_crtc *crtc;
6307
6308                 if (!intel_connector_needs_modeset(state, connector))
6309                         continue;
6310
6311                 intel_connector = to_intel_connector(connector);
6312                 encoder = intel_connector_primary_encoder(intel_connector);
6313                 if (!encoder->update_prepare)
6314                         continue;
6315
6316                 crtc = new_conn_state->crtc ?
6317                         to_intel_crtc(new_conn_state->crtc) : NULL;
6318                 encoder->update_prepare(state, encoder, crtc);
6319         }
6320 }
6321
6322 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6323 {
6324         struct drm_connector_state *new_conn_state;
6325         struct drm_connector *connector;
6326         int i;
6327
6328         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6329                                         i) {
6330                 struct intel_connector *intel_connector;
6331                 struct intel_encoder *encoder;
6332                 struct intel_crtc *crtc;
6333
6334                 if (!intel_connector_needs_modeset(state, connector))
6335                         continue;
6336
6337                 intel_connector = to_intel_connector(connector);
6338                 encoder = intel_connector_primary_encoder(intel_connector);
6339                 if (!encoder->update_complete)
6340                         continue;
6341
6342                 crtc = new_conn_state->crtc ?
6343                         to_intel_crtc(new_conn_state->crtc) : NULL;
6344                 encoder->update_complete(state, encoder, crtc);
6345         }
6346 }
6347
6348 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6349                                           struct intel_crtc *crtc)
6350 {
6351         const struct intel_crtc_state *crtc_state =
6352                 intel_atomic_get_new_crtc_state(state, crtc);
6353         const struct drm_connector_state *conn_state;
6354         struct drm_connector *conn;
6355         int i;
6356
6357         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6358                 struct intel_encoder *encoder =
6359                         to_intel_encoder(conn_state->best_encoder);
6360
6361                 if (conn_state->crtc != &crtc->base)
6362                         continue;
6363
6364                 if (encoder->pre_pll_enable)
6365                         encoder->pre_pll_enable(state, encoder,
6366                                                 crtc_state, conn_state);
6367         }
6368 }
6369
6370 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6371                                       struct intel_crtc *crtc)
6372 {
6373         const struct intel_crtc_state *crtc_state =
6374                 intel_atomic_get_new_crtc_state(state, crtc);
6375         const struct drm_connector_state *conn_state;
6376         struct drm_connector *conn;
6377         int i;
6378
6379         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6380                 struct intel_encoder *encoder =
6381                         to_intel_encoder(conn_state->best_encoder);
6382
6383                 if (conn_state->crtc != &crtc->base)
6384                         continue;
6385
6386                 if (encoder->pre_enable)
6387                         encoder->pre_enable(state, encoder,
6388                                             crtc_state, conn_state);
6389         }
6390 }
6391
6392 static void intel_encoders_enable(struct intel_atomic_state *state,
6393                                   struct intel_crtc *crtc)
6394 {
6395         const struct intel_crtc_state *crtc_state =
6396                 intel_atomic_get_new_crtc_state(state, crtc);
6397         const struct drm_connector_state *conn_state;
6398         struct drm_connector *conn;
6399         int i;
6400
6401         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6402                 struct intel_encoder *encoder =
6403                         to_intel_encoder(conn_state->best_encoder);
6404
6405                 if (conn_state->crtc != &crtc->base)
6406                         continue;
6407
6408                 if (encoder->enable)
6409                         encoder->enable(state, encoder,
6410                                         crtc_state, conn_state);
6411                 intel_opregion_notify_encoder(encoder, true);
6412         }
6413 }
6414
6415 static void intel_encoders_disable(struct intel_atomic_state *state,
6416                                    struct intel_crtc *crtc)
6417 {
6418         const struct intel_crtc_state *old_crtc_state =
6419                 intel_atomic_get_old_crtc_state(state, crtc);
6420         const struct drm_connector_state *old_conn_state;
6421         struct drm_connector *conn;
6422         int i;
6423
6424         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6425                 struct intel_encoder *encoder =
6426                         to_intel_encoder(old_conn_state->best_encoder);
6427
6428                 if (old_conn_state->crtc != &crtc->base)
6429                         continue;
6430
6431                 intel_opregion_notify_encoder(encoder, false);
6432                 if (encoder->disable)
6433                         encoder->disable(state, encoder,
6434                                          old_crtc_state, old_conn_state);
6435         }
6436 }
6437
6438 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6439                                         struct intel_crtc *crtc)
6440 {
6441         const struct intel_crtc_state *old_crtc_state =
6442                 intel_atomic_get_old_crtc_state(state, crtc);
6443         const struct drm_connector_state *old_conn_state;
6444         struct drm_connector *conn;
6445         int i;
6446
6447         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6448                 struct intel_encoder *encoder =
6449                         to_intel_encoder(old_conn_state->best_encoder);
6450
6451                 if (old_conn_state->crtc != &crtc->base)
6452                         continue;
6453
6454                 if (encoder->post_disable)
6455                         encoder->post_disable(state, encoder,
6456                                               old_crtc_state, old_conn_state);
6457         }
6458 }
6459
6460 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6461                                             struct intel_crtc *crtc)
6462 {
6463         const struct intel_crtc_state *old_crtc_state =
6464                 intel_atomic_get_old_crtc_state(state, crtc);
6465         const struct drm_connector_state *old_conn_state;
6466         struct drm_connector *conn;
6467         int i;
6468
6469         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6470                 struct intel_encoder *encoder =
6471                         to_intel_encoder(old_conn_state->best_encoder);
6472
6473                 if (old_conn_state->crtc != &crtc->base)
6474                         continue;
6475
6476                 if (encoder->post_pll_disable)
6477                         encoder->post_pll_disable(state, encoder,
6478                                                   old_crtc_state, old_conn_state);
6479         }
6480 }
6481
6482 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6483                                        struct intel_crtc *crtc)
6484 {
6485         const struct intel_crtc_state *crtc_state =
6486                 intel_atomic_get_new_crtc_state(state, crtc);
6487         const struct drm_connector_state *conn_state;
6488         struct drm_connector *conn;
6489         int i;
6490
6491         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6492                 struct intel_encoder *encoder =
6493                         to_intel_encoder(conn_state->best_encoder);
6494
6495                 if (conn_state->crtc != &crtc->base)
6496                         continue;
6497
6498                 if (encoder->update_pipe)
6499                         encoder->update_pipe(state, encoder,
6500                                              crtc_state, conn_state);
6501         }
6502 }
6503
6504 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6505 {
6506         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6507         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6508
6509         plane->disable_plane(plane, crtc_state);
6510 }
6511
6512 static void ilk_crtc_enable(struct intel_atomic_state *state,
6513                             struct intel_crtc *crtc)
6514 {
6515         const struct intel_crtc_state *new_crtc_state =
6516                 intel_atomic_get_new_crtc_state(state, crtc);
6517         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6518         enum pipe pipe = crtc->pipe;
6519
6520         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6521                 return;
6522
6523         /*
6524          * Sometimes spurious CPU pipe underruns happen during FDI
6525          * training, at least with VGA+HDMI cloning. Suppress them.
6526          *
6527          * On ILK we get an occasional spurious CPU pipe underruns
6528          * between eDP port A enable and vdd enable. Also PCH port
6529          * enable seems to result in the occasional CPU pipe underrun.
6530          *
6531          * Spurious PCH underruns also occur during PCH enabling.
6532          */
6533         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6534         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6535
6536         if (new_crtc_state->has_pch_encoder)
6537                 intel_prepare_shared_dpll(new_crtc_state);
6538
6539         if (intel_crtc_has_dp_encoder(new_crtc_state))
6540                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6541
6542         intel_set_transcoder_timings(new_crtc_state);
6543         intel_set_pipe_src_size(new_crtc_state);
6544
6545         if (new_crtc_state->has_pch_encoder)
6546                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6547                                              &new_crtc_state->fdi_m_n, NULL);
6548
6549         ilk_set_pipeconf(new_crtc_state);
6550
6551         crtc->active = true;
6552
6553         intel_encoders_pre_enable(state, crtc);
6554
6555         if (new_crtc_state->has_pch_encoder) {
6556                 /* Note: FDI PLL enabling _must_ be done before we enable the
6557                  * cpu pipes, hence this is separate from all the other fdi/pch
6558                  * enabling. */
6559                 ilk_fdi_pll_enable(new_crtc_state);
6560         } else {
6561                 assert_fdi_tx_disabled(dev_priv, pipe);
6562                 assert_fdi_rx_disabled(dev_priv, pipe);
6563         }
6564
6565         ilk_pfit_enable(new_crtc_state);
6566
6567         /*
6568          * On ILK+ LUT must be loaded before the pipe is running but with
6569          * clocks enabled
6570          */
6571         intel_color_load_luts(new_crtc_state);
6572         intel_color_commit(new_crtc_state);
6573         /* update DSPCNTR to configure gamma for pipe bottom color */
6574         intel_disable_primary_plane(new_crtc_state);
6575
6576         if (dev_priv->display.initial_watermarks)
6577                 dev_priv->display.initial_watermarks(state, crtc);
6578         intel_enable_pipe(new_crtc_state);
6579
6580         if (new_crtc_state->has_pch_encoder)
6581                 ilk_pch_enable(state, new_crtc_state);
6582
6583         intel_crtc_vblank_on(new_crtc_state);
6584
6585         intel_encoders_enable(state, crtc);
6586
6587         if (HAS_PCH_CPT(dev_priv))
6588                 cpt_verify_modeset(dev_priv, pipe);
6589
6590         /*
6591          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6592          * And a second vblank wait is needed at least on ILK with
6593          * some interlaced HDMI modes. Let's do the double wait always
6594          * in case there are more corner cases we don't know about.
6595          */
6596         if (new_crtc_state->has_pch_encoder) {
6597                 intel_wait_for_vblank(dev_priv, pipe);
6598                 intel_wait_for_vblank(dev_priv, pipe);
6599         }
6600         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6601         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6602 }
6603
6604 /* IPS only exists on ULT machines and is tied to pipe A. */
6605 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6606 {
6607         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6608 }
6609
6610 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6611                                             enum pipe pipe, bool apply)
6612 {
6613         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6614         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6615
6616         if (apply)
6617                 val |= mask;
6618         else
6619                 val &= ~mask;
6620
6621         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6622 }
6623
6624 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6625 {
6626         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6627         enum pipe pipe = crtc->pipe;
6628         u32 val;
6629
6630         val = MBUS_DBOX_A_CREDIT(2);
6631
6632         if (INTEL_GEN(dev_priv) >= 12) {
6633                 val |= MBUS_DBOX_BW_CREDIT(2);
6634                 val |= MBUS_DBOX_B_CREDIT(12);
6635         } else {
6636                 val |= MBUS_DBOX_BW_CREDIT(1);
6637                 val |= MBUS_DBOX_B_CREDIT(8);
6638         }
6639
6640         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6641 }
6642
6643 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6644 {
6645         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6646         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6647
6648         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6649                        HSW_LINETIME(crtc_state->linetime) |
6650                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
6651 }
6652
6653 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6654 {
6655         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6656         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6657         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6658         u32 val;
6659
6660         val = intel_de_read(dev_priv, reg);
6661         val &= ~HSW_FRAME_START_DELAY_MASK;
6662         val |= HSW_FRAME_START_DELAY(0);
6663         intel_de_write(dev_priv, reg, val);
6664 }
6665
6666 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
6667                                          const struct intel_crtc_state *crtc_state)
6668 {
6669         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
6670         struct intel_crtc_state *master_crtc_state;
6671         struct drm_connector_state *conn_state;
6672         struct drm_connector *conn;
6673         struct intel_encoder *encoder = NULL;
6674         int i;
6675
6676         if (crtc_state->bigjoiner_slave)
6677                 master = crtc_state->bigjoiner_linked_crtc;
6678
6679         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
6680
6681         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6682                 if (conn_state->crtc != &master->base)
6683                         continue;
6684
6685                 encoder = to_intel_encoder(conn_state->best_encoder);
6686                 break;
6687         }
6688
6689         if (!crtc_state->bigjoiner_slave) {
6690                 /* need to enable VDSC, which we skipped in pre-enable */
6691                 intel_dsc_enable(encoder, crtc_state);
6692         } else {
6693                 /*
6694                  * Enable sequence steps 1-7 on bigjoiner master
6695                  */
6696                 intel_encoders_pre_pll_enable(state, master);
6697                 intel_enable_shared_dpll(master_crtc_state);
6698                 intel_encoders_pre_enable(state, master);
6699
6700                 /* and DSC on slave */
6701                 intel_dsc_enable(NULL, crtc_state);
6702         }
6703 }
6704
6705 static void hsw_crtc_enable(struct intel_atomic_state *state,
6706                             struct intel_crtc *crtc)
6707 {
6708         const struct intel_crtc_state *new_crtc_state =
6709                 intel_atomic_get_new_crtc_state(state, crtc);
6710         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6711         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6712         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6713         bool psl_clkgate_wa;
6714
6715         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6716                 return;
6717
6718         if (!new_crtc_state->bigjoiner) {
6719                 intel_encoders_pre_pll_enable(state, crtc);
6720
6721                 if (new_crtc_state->shared_dpll)
6722                         intel_enable_shared_dpll(new_crtc_state);
6723
6724                 intel_encoders_pre_enable(state, crtc);
6725         } else {
6726                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
6727         }
6728
6729         intel_set_pipe_src_size(new_crtc_state);
6730         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6731                 bdw_set_pipemisc(new_crtc_state);
6732
6733         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
6734                 intel_set_transcoder_timings(new_crtc_state);
6735
6736                 if (cpu_transcoder != TRANSCODER_EDP)
6737                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
6738                                        new_crtc_state->pixel_multiplier - 1);
6739
6740                 if (new_crtc_state->has_pch_encoder)
6741                         intel_cpu_transcoder_set_m_n(new_crtc_state,
6742                                                      &new_crtc_state->fdi_m_n, NULL);
6743
6744                 hsw_set_frame_start_delay(new_crtc_state);
6745         }
6746
6747         if (!transcoder_is_dsi(cpu_transcoder))
6748                 hsw_set_pipeconf(new_crtc_state);
6749
6750         crtc->active = true;
6751
6752         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6753         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6754                 new_crtc_state->pch_pfit.enabled;
6755         if (psl_clkgate_wa)
6756                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6757
6758         if (INTEL_GEN(dev_priv) >= 9)
6759                 skl_pfit_enable(new_crtc_state);
6760         else
6761                 ilk_pfit_enable(new_crtc_state);
6762
6763         /*
6764          * On ILK+ LUT must be loaded before the pipe is running but with
6765          * clocks enabled
6766          */
6767         intel_color_load_luts(new_crtc_state);
6768         intel_color_commit(new_crtc_state);
6769         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6770         if (INTEL_GEN(dev_priv) < 9)
6771                 intel_disable_primary_plane(new_crtc_state);
6772
6773         hsw_set_linetime_wm(new_crtc_state);
6774
6775         if (INTEL_GEN(dev_priv) >= 11)
6776                 icl_set_pipe_chicken(crtc);
6777
6778         if (dev_priv->display.initial_watermarks)
6779                 dev_priv->display.initial_watermarks(state, crtc);
6780
6781         if (INTEL_GEN(dev_priv) >= 11)
6782                 icl_pipe_mbus_enable(crtc);
6783
6784         if (new_crtc_state->bigjoiner_slave) {
6785                 trace_intel_pipe_enable(crtc);
6786                 intel_crtc_vblank_on(new_crtc_state);
6787         }
6788
6789         intel_encoders_enable(state, crtc);
6790
6791         if (psl_clkgate_wa) {
6792                 intel_wait_for_vblank(dev_priv, pipe);
6793                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6794         }
6795
6796         /* If we change the relative order between pipe/planes enabling, we need
6797          * to change the workaround. */
6798         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
6799         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6800                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6801                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6802         }
6803 }
6804
6805 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6806 {
6807         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6808         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6809         enum pipe pipe = crtc->pipe;
6810
6811         /* To avoid upsetting the power well on haswell only disable the pfit if
6812          * it's in use. The hw state code will make sure we get this right. */
6813         if (!old_crtc_state->pch_pfit.enabled)
6814                 return;
6815
6816         intel_de_write(dev_priv, PF_CTL(pipe), 0);
6817         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
6818         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
6819 }
6820
6821 static void ilk_crtc_disable(struct intel_atomic_state *state,
6822                              struct intel_crtc *crtc)
6823 {
6824         const struct intel_crtc_state *old_crtc_state =
6825                 intel_atomic_get_old_crtc_state(state, crtc);
6826         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6827         enum pipe pipe = crtc->pipe;
6828
6829         /*
6830          * Sometimes spurious CPU pipe underruns happen when the
6831          * pipe is already disabled, but FDI RX/TX is still enabled.
6832          * Happens at least with VGA+HDMI cloning. Suppress them.
6833          */
6834         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6835         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6836
6837         intel_encoders_disable(state, crtc);
6838
6839         intel_crtc_vblank_off(old_crtc_state);
6840
6841         intel_disable_pipe(old_crtc_state);
6842
6843         ilk_pfit_disable(old_crtc_state);
6844
6845         if (old_crtc_state->has_pch_encoder)
6846                 ilk_fdi_disable(crtc);
6847
6848         intel_encoders_post_disable(state, crtc);
6849
6850         if (old_crtc_state->has_pch_encoder) {
6851                 ilk_disable_pch_transcoder(dev_priv, pipe);
6852
6853                 if (HAS_PCH_CPT(dev_priv)) {
6854                         i915_reg_t reg;
6855                         u32 temp;
6856
6857                         /* disable TRANS_DP_CTL */
6858                         reg = TRANS_DP_CTL(pipe);
6859                         temp = intel_de_read(dev_priv, reg);
6860                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6861                                   TRANS_DP_PORT_SEL_MASK);
6862                         temp |= TRANS_DP_PORT_SEL_NONE;
6863                         intel_de_write(dev_priv, reg, temp);
6864
6865                         /* disable DPLL_SEL */
6866                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6867                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6868                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
6869                 }
6870
6871                 ilk_fdi_pll_disable(crtc);
6872         }
6873
6874         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6875         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6876 }
6877
6878 static void hsw_crtc_disable(struct intel_atomic_state *state,
6879                              struct intel_crtc *crtc)
6880 {
6881         /*
6882          * FIXME collapse everything to one hook.
6883          * Need care with mst->ddi interactions.
6884          */
6885         intel_encoders_disable(state, crtc);
6886         intel_encoders_post_disable(state, crtc);
6887 }
6888
6889 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6890 {
6891         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6892         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6893
6894         if (!crtc_state->gmch_pfit.control)
6895                 return;
6896
6897         /*
6898          * The panel fitter should only be adjusted whilst the pipe is disabled,
6899          * according to register description and PRM.
6900          */
6901         drm_WARN_ON(&dev_priv->drm,
6902                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
6903         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
6904
6905         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
6906                        crtc_state->gmch_pfit.pgm_ratios);
6907         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
6908
6909         /* Border color in case we don't scale up to the full screen. Black by
6910          * default, change to something else for debugging. */
6911         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
6912 }
6913
6914 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6915 {
6916         if (phy == PHY_NONE)
6917                 return false;
6918         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
6919                 return phy <= PHY_D;
6920         else if (IS_JSL_EHL(dev_priv))
6921                 return phy <= PHY_C;
6922         else if (INTEL_GEN(dev_priv) >= 11)
6923                 return phy <= PHY_B;
6924         else
6925                 return false;
6926 }
6927
6928 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6929 {
6930         if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
6931                 return false;
6932         else if (INTEL_GEN(dev_priv) >= 12)
6933                 return phy >= PHY_D && phy <= PHY_I;
6934         else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
6935                 return phy >= PHY_C && phy <= PHY_F;
6936         else
6937                 return false;
6938 }
6939
6940 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6941 {
6942         if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
6943                 return PHY_C + port - PORT_TC1;
6944         else if (IS_JSL_EHL(i915) && port == PORT_D)
6945                 return PHY_A;
6946
6947         return PHY_A + port - PORT_A;
6948 }
6949
6950 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6951 {
6952         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6953                 return TC_PORT_NONE;
6954
6955         if (INTEL_GEN(dev_priv) >= 12)
6956                 return TC_PORT_1 + port - PORT_TC1;
6957         else
6958                 return TC_PORT_1 + port - PORT_C;
6959 }
6960
6961 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6962 {
6963         switch (port) {
6964         case PORT_A:
6965                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6966         case PORT_B:
6967                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6968         case PORT_C:
6969                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6970         case PORT_D:
6971                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6972         case PORT_E:
6973                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6974         case PORT_F:
6975                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6976         case PORT_G:
6977                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6978         case PORT_H:
6979                 return POWER_DOMAIN_PORT_DDI_H_LANES;
6980         case PORT_I:
6981                 return POWER_DOMAIN_PORT_DDI_I_LANES;
6982         default:
6983                 MISSING_CASE(port);
6984                 return POWER_DOMAIN_PORT_OTHER;
6985         }
6986 }
6987
6988 enum intel_display_power_domain
6989 intel_aux_power_domain(struct intel_digital_port *dig_port)
6990 {
6991         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6992         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6993
6994         if (intel_phy_is_tc(dev_priv, phy) &&
6995             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6996                 switch (dig_port->aux_ch) {
6997                 case AUX_CH_C:
6998                         return POWER_DOMAIN_AUX_C_TBT;
6999                 case AUX_CH_D:
7000                         return POWER_DOMAIN_AUX_D_TBT;
7001                 case AUX_CH_E:
7002                         return POWER_DOMAIN_AUX_E_TBT;
7003                 case AUX_CH_F:
7004                         return POWER_DOMAIN_AUX_F_TBT;
7005                 case AUX_CH_G:
7006                         return POWER_DOMAIN_AUX_G_TBT;
7007                 case AUX_CH_H:
7008                         return POWER_DOMAIN_AUX_H_TBT;
7009                 case AUX_CH_I:
7010                         return POWER_DOMAIN_AUX_I_TBT;
7011                 default:
7012                         MISSING_CASE(dig_port->aux_ch);
7013                         return POWER_DOMAIN_AUX_C_TBT;
7014                 }
7015         }
7016
7017         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7018 }
7019
7020 /*
7021  * Converts aux_ch to power_domain without caring about TBT ports for that use
7022  * intel_aux_power_domain()
7023  */
7024 enum intel_display_power_domain
7025 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7026 {
7027         switch (aux_ch) {
7028         case AUX_CH_A:
7029                 return POWER_DOMAIN_AUX_A;
7030         case AUX_CH_B:
7031                 return POWER_DOMAIN_AUX_B;
7032         case AUX_CH_C:
7033                 return POWER_DOMAIN_AUX_C;
7034         case AUX_CH_D:
7035                 return POWER_DOMAIN_AUX_D;
7036         case AUX_CH_E:
7037                 return POWER_DOMAIN_AUX_E;
7038         case AUX_CH_F:
7039                 return POWER_DOMAIN_AUX_F;
7040         case AUX_CH_G:
7041                 return POWER_DOMAIN_AUX_G;
7042         case AUX_CH_H:
7043                 return POWER_DOMAIN_AUX_H;
7044         case AUX_CH_I:
7045                 return POWER_DOMAIN_AUX_I;
7046         default:
7047                 MISSING_CASE(aux_ch);
7048                 return POWER_DOMAIN_AUX_A;
7049         }
7050 }
7051
7052 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7053 {
7054         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7055         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7056         struct drm_encoder *encoder;
7057         enum pipe pipe = crtc->pipe;
7058         u64 mask;
7059         enum transcoder transcoder = crtc_state->cpu_transcoder;
7060
7061         if (!crtc_state->hw.active)
7062                 return 0;
7063
7064         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7065         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7066         if (crtc_state->pch_pfit.enabled ||
7067             crtc_state->pch_pfit.force_thru)
7068                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7069
7070         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7071                                   crtc_state->uapi.encoder_mask) {
7072                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7073
7074                 mask |= BIT_ULL(intel_encoder->power_domain);
7075         }
7076
7077         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7078                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7079
7080         if (crtc_state->shared_dpll)
7081                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7082
7083         if (crtc_state->dsc.compression_enable)
7084                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
7085
7086         return mask;
7087 }
7088
7089 static u64
7090 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7091 {
7092         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7093         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7094         enum intel_display_power_domain domain;
7095         u64 domains, new_domains, old_domains;
7096
7097         domains = get_crtc_power_domains(crtc_state);
7098
7099         new_domains = domains & ~crtc->enabled_power_domains.mask;
7100         old_domains = crtc->enabled_power_domains.mask & ~domains;
7101
7102         for_each_power_domain(domain, new_domains)
7103                 intel_display_power_get_in_set(dev_priv,
7104                                                &crtc->enabled_power_domains,
7105                                                domain);
7106
7107         return old_domains;
7108 }
7109
7110 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
7111                                            u64 domains)
7112 {
7113         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
7114                                             &crtc->enabled_power_domains,
7115                                             domains);
7116 }
7117
7118 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7119                                    struct intel_crtc *crtc)
7120 {
7121         const struct intel_crtc_state *new_crtc_state =
7122                 intel_atomic_get_new_crtc_state(state, crtc);
7123         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7124         enum pipe pipe = crtc->pipe;
7125
7126         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7127                 return;
7128
7129         if (intel_crtc_has_dp_encoder(new_crtc_state))
7130                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7131
7132         intel_set_transcoder_timings(new_crtc_state);
7133         intel_set_pipe_src_size(new_crtc_state);
7134
7135         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7136                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7137                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7138         }
7139
7140         i9xx_set_pipeconf(new_crtc_state);
7141
7142         crtc->active = true;
7143
7144         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7145
7146         intel_encoders_pre_pll_enable(state, crtc);
7147
7148         if (IS_CHERRYVIEW(dev_priv)) {
7149                 chv_prepare_pll(crtc, new_crtc_state);
7150                 chv_enable_pll(crtc, new_crtc_state);
7151         } else {
7152                 vlv_prepare_pll(crtc, new_crtc_state);
7153                 vlv_enable_pll(crtc, new_crtc_state);
7154         }
7155
7156         intel_encoders_pre_enable(state, crtc);
7157
7158         i9xx_pfit_enable(new_crtc_state);
7159
7160         intel_color_load_luts(new_crtc_state);
7161         intel_color_commit(new_crtc_state);
7162         /* update DSPCNTR to configure gamma for pipe bottom color */
7163         intel_disable_primary_plane(new_crtc_state);
7164
7165         dev_priv->display.initial_watermarks(state, crtc);
7166         intel_enable_pipe(new_crtc_state);
7167
7168         intel_crtc_vblank_on(new_crtc_state);
7169
7170         intel_encoders_enable(state, crtc);
7171 }
7172
7173 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7174 {
7175         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7176         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7177
7178         intel_de_write(dev_priv, FP0(crtc->pipe),
7179                        crtc_state->dpll_hw_state.fp0);
7180         intel_de_write(dev_priv, FP1(crtc->pipe),
7181                        crtc_state->dpll_hw_state.fp1);
7182 }
7183
7184 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7185                              struct intel_crtc *crtc)
7186 {
7187         const struct intel_crtc_state *new_crtc_state =
7188                 intel_atomic_get_new_crtc_state(state, crtc);
7189         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7190         enum pipe pipe = crtc->pipe;
7191
7192         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7193                 return;
7194
7195         i9xx_set_pll_dividers(new_crtc_state);
7196
7197         if (intel_crtc_has_dp_encoder(new_crtc_state))
7198                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7199
7200         intel_set_transcoder_timings(new_crtc_state);
7201         intel_set_pipe_src_size(new_crtc_state);
7202
7203         i9xx_set_pipeconf(new_crtc_state);
7204
7205         crtc->active = true;
7206
7207         if (!IS_GEN(dev_priv, 2))
7208                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7209
7210         intel_encoders_pre_enable(state, crtc);
7211
7212         i9xx_enable_pll(crtc, new_crtc_state);
7213
7214         i9xx_pfit_enable(new_crtc_state);
7215
7216         intel_color_load_luts(new_crtc_state);
7217         intel_color_commit(new_crtc_state);
7218         /* update DSPCNTR to configure gamma for pipe bottom color */
7219         intel_disable_primary_plane(new_crtc_state);
7220
7221         if (dev_priv->display.initial_watermarks)
7222                 dev_priv->display.initial_watermarks(state, crtc);
7223         else
7224                 intel_update_watermarks(crtc);
7225         intel_enable_pipe(new_crtc_state);
7226
7227         intel_crtc_vblank_on(new_crtc_state);
7228
7229         intel_encoders_enable(state, crtc);
7230
7231         /* prevents spurious underruns */
7232         if (IS_GEN(dev_priv, 2))
7233                 intel_wait_for_vblank(dev_priv, pipe);
7234 }
7235
7236 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7237 {
7238         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7239         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7240
7241         if (!old_crtc_state->gmch_pfit.control)
7242                 return;
7243
7244         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7245
7246         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7247                     intel_de_read(dev_priv, PFIT_CONTROL));
7248         intel_de_write(dev_priv, PFIT_CONTROL, 0);
7249 }
7250
7251 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7252                               struct intel_crtc *crtc)
7253 {
7254         struct intel_crtc_state *old_crtc_state =
7255                 intel_atomic_get_old_crtc_state(state, crtc);
7256         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7257         enum pipe pipe = crtc->pipe;
7258
7259         /*
7260          * On gen2 planes are double buffered but the pipe isn't, so we must
7261          * wait for planes to fully turn off before disabling the pipe.
7262          */
7263         if (IS_GEN(dev_priv, 2))
7264                 intel_wait_for_vblank(dev_priv, pipe);
7265
7266         intel_encoders_disable(state, crtc);
7267
7268         intel_crtc_vblank_off(old_crtc_state);
7269
7270         intel_disable_pipe(old_crtc_state);
7271
7272         i9xx_pfit_disable(old_crtc_state);
7273
7274         intel_encoders_post_disable(state, crtc);
7275
7276         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7277                 if (IS_CHERRYVIEW(dev_priv))
7278                         chv_disable_pll(dev_priv, pipe);
7279                 else if (IS_VALLEYVIEW(dev_priv))
7280                         vlv_disable_pll(dev_priv, pipe);
7281                 else
7282                         i9xx_disable_pll(old_crtc_state);
7283         }
7284
7285         intel_encoders_post_pll_disable(state, crtc);
7286
7287         if (!IS_GEN(dev_priv, 2))
7288                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7289
7290         if (!dev_priv->display.initial_watermarks)
7291                 intel_update_watermarks(crtc);
7292
7293         /* clock the pipe down to 640x480@60 to potentially save power */
7294         if (IS_I830(dev_priv))
7295                 i830_enable_pipe(dev_priv, pipe);
7296 }
7297
7298 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7299                                         struct drm_modeset_acquire_ctx *ctx)
7300 {
7301         struct intel_encoder *encoder;
7302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7303         struct intel_bw_state *bw_state =
7304                 to_intel_bw_state(dev_priv->bw_obj.state);
7305         struct intel_cdclk_state *cdclk_state =
7306                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7307         struct intel_dbuf_state *dbuf_state =
7308                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
7309         struct intel_crtc_state *crtc_state =
7310                 to_intel_crtc_state(crtc->base.state);
7311         struct intel_plane *plane;
7312         struct drm_atomic_state *state;
7313         struct intel_crtc_state *temp_crtc_state;
7314         enum pipe pipe = crtc->pipe;
7315         int ret;
7316
7317         if (!crtc_state->hw.active)
7318                 return;
7319
7320         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7321                 const struct intel_plane_state *plane_state =
7322                         to_intel_plane_state(plane->base.state);
7323
7324                 if (plane_state->uapi.visible)
7325                         intel_plane_disable_noatomic(crtc, plane);
7326         }
7327
7328         state = drm_atomic_state_alloc(&dev_priv->drm);
7329         if (!state) {
7330                 drm_dbg_kms(&dev_priv->drm,
7331                             "failed to disable [CRTC:%d:%s], out of memory",
7332                             crtc->base.base.id, crtc->base.name);
7333                 return;
7334         }
7335
7336         state->acquire_ctx = ctx;
7337
7338         /* Everything's already locked, -EDEADLK can't happen. */
7339         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7340         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7341
7342         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7343
7344         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7345
7346         drm_atomic_state_put(state);
7347
7348         drm_dbg_kms(&dev_priv->drm,
7349                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7350                     crtc->base.base.id, crtc->base.name);
7351
7352         crtc->active = false;
7353         crtc->base.enabled = false;
7354
7355         drm_WARN_ON(&dev_priv->drm,
7356                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7357         crtc_state->uapi.active = false;
7358         crtc_state->uapi.connector_mask = 0;
7359         crtc_state->uapi.encoder_mask = 0;
7360         intel_crtc_free_hw_state(crtc_state);
7361         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7362
7363         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7364                 encoder->base.crtc = NULL;
7365
7366         intel_fbc_disable(crtc);
7367         intel_update_watermarks(crtc);
7368         intel_disable_shared_dpll(crtc_state);
7369
7370         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
7371
7372         dev_priv->active_pipes &= ~BIT(pipe);
7373         cdclk_state->min_cdclk[pipe] = 0;
7374         cdclk_state->min_voltage_level[pipe] = 0;
7375         cdclk_state->active_pipes &= ~BIT(pipe);
7376
7377         dbuf_state->active_pipes &= ~BIT(pipe);
7378
7379         bw_state->data_rate[pipe] = 0;
7380         bw_state->num_active_planes[pipe] = 0;
7381 }
7382
7383 /*
7384  * turn all crtc's off, but do not adjust state
7385  * This has to be paired with a call to intel_modeset_setup_hw_state.
7386  */
7387 int intel_display_suspend(struct drm_device *dev)
7388 {
7389         struct drm_i915_private *dev_priv = to_i915(dev);
7390         struct drm_atomic_state *state;
7391         int ret;
7392
7393         state = drm_atomic_helper_suspend(dev);
7394         ret = PTR_ERR_OR_ZERO(state);
7395         if (ret)
7396                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7397                         ret);
7398         else
7399                 dev_priv->modeset_restore_state = state;
7400         return ret;
7401 }
7402
7403 void intel_encoder_destroy(struct drm_encoder *encoder)
7404 {
7405         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7406
7407         drm_encoder_cleanup(encoder);
7408         kfree(intel_encoder);
7409 }
7410
7411 /* Cross check the actual hw state with our own modeset state tracking (and it's
7412  * internal consistency). */
7413 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7414                                          struct drm_connector_state *conn_state)
7415 {
7416         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7417         struct drm_i915_private *i915 = to_i915(connector->base.dev);
7418
7419         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7420                     connector->base.base.id, connector->base.name);
7421
7422         if (connector->get_hw_state(connector)) {
7423                 struct intel_encoder *encoder = intel_attached_encoder(connector);
7424
7425                 I915_STATE_WARN(!crtc_state,
7426                          "connector enabled without attached crtc\n");
7427
7428                 if (!crtc_state)
7429                         return;
7430
7431                 I915_STATE_WARN(!crtc_state->hw.active,
7432                                 "connector is active, but attached crtc isn't\n");
7433
7434                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7435                         return;
7436
7437                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7438                         "atomic encoder doesn't match attached encoder\n");
7439
7440                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7441                         "attached encoder crtc differs from connector crtc\n");
7442         } else {
7443                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7444                                 "attached crtc is active, but connector isn't\n");
7445                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7446                         "best encoder set without crtc!\n");
7447         }
7448 }
7449
7450 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7451 {
7452         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7453                 return crtc_state->fdi_lanes;
7454
7455         return 0;
7456 }
7457
7458 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7459                                struct intel_crtc_state *pipe_config)
7460 {
7461         struct drm_i915_private *dev_priv = to_i915(dev);
7462         struct drm_atomic_state *state = pipe_config->uapi.state;
7463         struct intel_crtc *other_crtc;
7464         struct intel_crtc_state *other_crtc_state;
7465
7466         drm_dbg_kms(&dev_priv->drm,
7467                     "checking fdi config on pipe %c, lanes %i\n",
7468                     pipe_name(pipe), pipe_config->fdi_lanes);
7469         if (pipe_config->fdi_lanes > 4) {
7470                 drm_dbg_kms(&dev_priv->drm,
7471                             "invalid fdi lane config on pipe %c: %i lanes\n",
7472                             pipe_name(pipe), pipe_config->fdi_lanes);
7473                 return -EINVAL;
7474         }
7475
7476         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7477                 if (pipe_config->fdi_lanes > 2) {
7478                         drm_dbg_kms(&dev_priv->drm,
7479                                     "only 2 lanes on haswell, required: %i lanes\n",
7480                                     pipe_config->fdi_lanes);
7481                         return -EINVAL;
7482                 } else {
7483                         return 0;
7484                 }
7485         }
7486
7487         if (INTEL_NUM_PIPES(dev_priv) == 2)
7488                 return 0;
7489
7490         /* Ivybridge 3 pipe is really complicated */
7491         switch (pipe) {
7492         case PIPE_A:
7493                 return 0;
7494         case PIPE_B:
7495                 if (pipe_config->fdi_lanes <= 2)
7496                         return 0;
7497
7498                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7499                 other_crtc_state =
7500                         intel_atomic_get_crtc_state(state, other_crtc);
7501                 if (IS_ERR(other_crtc_state))
7502                         return PTR_ERR(other_crtc_state);
7503
7504                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7505                         drm_dbg_kms(&dev_priv->drm,
7506                                     "invalid shared fdi lane config on pipe %c: %i lanes\n",
7507                                     pipe_name(pipe), pipe_config->fdi_lanes);
7508                         return -EINVAL;
7509                 }
7510                 return 0;
7511         case PIPE_C:
7512                 if (pipe_config->fdi_lanes > 2) {
7513                         drm_dbg_kms(&dev_priv->drm,
7514                                     "only 2 lanes on pipe %c: required %i lanes\n",
7515                                     pipe_name(pipe), pipe_config->fdi_lanes);
7516                         return -EINVAL;
7517                 }
7518
7519                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7520                 other_crtc_state =
7521                         intel_atomic_get_crtc_state(state, other_crtc);
7522                 if (IS_ERR(other_crtc_state))
7523                         return PTR_ERR(other_crtc_state);
7524
7525                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7526                         drm_dbg_kms(&dev_priv->drm,
7527                                     "fdi link B uses too many lanes to enable link C\n");
7528                         return -EINVAL;
7529                 }
7530                 return 0;
7531         default:
7532                 BUG();
7533         }
7534 }
7535
7536 #define RETRY 1
7537 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7538                                   struct intel_crtc_state *pipe_config)
7539 {
7540         struct drm_device *dev = intel_crtc->base.dev;
7541         struct drm_i915_private *i915 = to_i915(dev);
7542         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7543         int lane, link_bw, fdi_dotclock, ret;
7544         bool needs_recompute = false;
7545
7546 retry:
7547         /* FDI is a binary signal running at ~2.7GHz, encoding
7548          * each output octet as 10 bits. The actual frequency
7549          * is stored as a divider into a 100MHz clock, and the
7550          * mode pixel clock is stored in units of 1KHz.
7551          * Hence the bw of each lane in terms of the mode signal
7552          * is:
7553          */
7554         link_bw = intel_fdi_link_freq(i915, pipe_config);
7555
7556         fdi_dotclock = adjusted_mode->crtc_clock;
7557
7558         lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7559                                       pipe_config->pipe_bpp);
7560
7561         pipe_config->fdi_lanes = lane;
7562
7563         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7564                                link_bw, &pipe_config->fdi_m_n, false, false);
7565
7566         ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7567         if (ret == -EDEADLK)
7568                 return ret;
7569
7570         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7571                 pipe_config->pipe_bpp -= 2*3;
7572                 drm_dbg_kms(&i915->drm,
7573                             "fdi link bw constraint, reducing pipe bpp to %i\n",
7574                             pipe_config->pipe_bpp);
7575                 needs_recompute = true;
7576                 pipe_config->bw_constrained = true;
7577
7578                 goto retry;
7579         }
7580
7581         if (needs_recompute)
7582                 return RETRY;
7583
7584         return ret;
7585 }
7586
7587 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7588 {
7589         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7590         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7591
7592         /* IPS only exists on ULT machines and is tied to pipe A. */
7593         if (!hsw_crtc_supports_ips(crtc))
7594                 return false;
7595
7596         if (!dev_priv->params.enable_ips)
7597                 return false;
7598
7599         if (crtc_state->pipe_bpp > 24)
7600                 return false;
7601
7602         /*
7603          * We compare against max which means we must take
7604          * the increased cdclk requirement into account when
7605          * calculating the new cdclk.
7606          *
7607          * Should measure whether using a lower cdclk w/o IPS
7608          */
7609         if (IS_BROADWELL(dev_priv) &&
7610             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7611                 return false;
7612
7613         return true;
7614 }
7615
7616 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7617 {
7618         struct drm_i915_private *dev_priv =
7619                 to_i915(crtc_state->uapi.crtc->dev);
7620         struct intel_atomic_state *state =
7621                 to_intel_atomic_state(crtc_state->uapi.state);
7622
7623         crtc_state->ips_enabled = false;
7624
7625         if (!hsw_crtc_state_ips_capable(crtc_state))
7626                 return 0;
7627
7628         /*
7629          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7630          * enabled and disabled dynamically based on package C states,
7631          * user space can't make reliable use of the CRCs, so let's just
7632          * completely disable it.
7633          */
7634         if (crtc_state->crc_enabled)
7635                 return 0;
7636
7637         /* IPS should be fine as long as at least one plane is enabled. */
7638         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7639                 return 0;
7640
7641         if (IS_BROADWELL(dev_priv)) {
7642                 const struct intel_cdclk_state *cdclk_state;
7643
7644                 cdclk_state = intel_atomic_get_cdclk_state(state);
7645                 if (IS_ERR(cdclk_state))
7646                         return PTR_ERR(cdclk_state);
7647
7648                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7649                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7650                         return 0;
7651         }
7652
7653         crtc_state->ips_enabled = true;
7654
7655         return 0;
7656 }
7657
7658 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7659 {
7660         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7661
7662         /* GDG double wide on either pipe, otherwise pipe A only */
7663         return INTEL_GEN(dev_priv) < 4 &&
7664                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7665 }
7666
7667 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
7668 {
7669         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
7670         unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
7671
7672         /*
7673          * We only use IF-ID interlacing. If we ever use
7674          * PF-ID we'll need to adjust the pixel_rate here.
7675          */
7676
7677         if (!crtc_state->pch_pfit.enabled)
7678                 return pixel_rate;
7679
7680         pipe_w = crtc_state->pipe_src_w;
7681         pipe_h = crtc_state->pipe_src_h;
7682
7683         pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
7684         pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
7685
7686         if (pipe_w < pfit_w)
7687                 pipe_w = pfit_w;
7688         if (pipe_h < pfit_h)
7689                 pipe_h = pfit_h;
7690
7691         if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
7692                         !pfit_w || !pfit_h))
7693                 return pixel_rate;
7694
7695         return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7696                        pfit_w * pfit_h);
7697 }
7698
7699 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
7700                                          const struct drm_display_mode *timings)
7701 {
7702         mode->hdisplay = timings->crtc_hdisplay;
7703         mode->htotal = timings->crtc_htotal;
7704         mode->hsync_start = timings->crtc_hsync_start;
7705         mode->hsync_end = timings->crtc_hsync_end;
7706
7707         mode->vdisplay = timings->crtc_vdisplay;
7708         mode->vtotal = timings->crtc_vtotal;
7709         mode->vsync_start = timings->crtc_vsync_start;
7710         mode->vsync_end = timings->crtc_vsync_end;
7711
7712         mode->flags = timings->flags;
7713         mode->type = DRM_MODE_TYPE_DRIVER;
7714
7715         mode->clock = timings->crtc_clock;
7716
7717         drm_mode_set_name(mode);
7718 }
7719
7720 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7721 {
7722         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7723
7724         if (HAS_GMCH(dev_priv))
7725                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7726                 crtc_state->pixel_rate =
7727                         crtc_state->hw.pipe_mode.crtc_clock;
7728         else
7729                 crtc_state->pixel_rate =
7730                         ilk_pipe_pixel_rate(crtc_state);
7731 }
7732
7733 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
7734 {
7735         struct drm_display_mode *mode = &crtc_state->hw.mode;
7736         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
7737         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
7738
7739         drm_mode_copy(pipe_mode, adjusted_mode);
7740
7741         if (crtc_state->bigjoiner) {
7742                 /*
7743                  * transcoder is programmed to the full mode,
7744                  * but pipe timings are half of the transcoder mode
7745                  */
7746                 pipe_mode->crtc_hdisplay /= 2;
7747                 pipe_mode->crtc_hblank_start /= 2;
7748                 pipe_mode->crtc_hblank_end /= 2;
7749                 pipe_mode->crtc_hsync_start /= 2;
7750                 pipe_mode->crtc_hsync_end /= 2;
7751                 pipe_mode->crtc_htotal /= 2;
7752                 pipe_mode->crtc_clock /= 2;
7753         }
7754
7755         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
7756         intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
7757
7758         intel_crtc_compute_pixel_rate(crtc_state);
7759
7760         drm_mode_copy(mode, adjusted_mode);
7761         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
7762         mode->vdisplay = crtc_state->pipe_src_h;
7763 }
7764
7765 static void intel_encoder_get_config(struct intel_encoder *encoder,
7766                                      struct intel_crtc_state *crtc_state)
7767 {
7768         encoder->get_config(encoder, crtc_state);
7769
7770         intel_crtc_readout_derived_state(crtc_state);
7771 }
7772
7773 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7774                                      struct intel_crtc_state *pipe_config)
7775 {
7776         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7777         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
7778         int clock_limit = dev_priv->max_dotclk_freq;
7779
7780         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
7781
7782         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
7783         if (pipe_config->bigjoiner) {
7784                 pipe_mode->crtc_clock /= 2;
7785                 pipe_mode->crtc_hdisplay /= 2;
7786                 pipe_mode->crtc_hblank_start /= 2;
7787                 pipe_mode->crtc_hblank_end /= 2;
7788                 pipe_mode->crtc_hsync_start /= 2;
7789                 pipe_mode->crtc_hsync_end /= 2;
7790                 pipe_mode->crtc_htotal /= 2;
7791                 pipe_config->pipe_src_w /= 2;
7792         }
7793
7794         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
7795
7796         if (INTEL_GEN(dev_priv) < 4) {
7797                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7798
7799                 /*
7800                  * Enable double wide mode when the dot clock
7801                  * is > 90% of the (display) core speed.
7802                  */
7803                 if (intel_crtc_supports_double_wide(crtc) &&
7804                     pipe_mode->crtc_clock > clock_limit) {
7805                         clock_limit = dev_priv->max_dotclk_freq;
7806                         pipe_config->double_wide = true;
7807                 }
7808         }
7809
7810         if (pipe_mode->crtc_clock > clock_limit) {
7811                 drm_dbg_kms(&dev_priv->drm,
7812                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7813                             pipe_mode->crtc_clock, clock_limit,
7814                             yesno(pipe_config->double_wide));
7815                 return -EINVAL;
7816         }
7817
7818         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7819              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7820              pipe_config->hw.ctm) {
7821                 /*
7822                  * There is only one pipe CSC unit per pipe, and we need that
7823                  * for output conversion from RGB->YCBCR. So if CTM is already
7824                  * applied we can't support YCBCR420 output.
7825                  */
7826                 drm_dbg_kms(&dev_priv->drm,
7827                             "YCBCR420 and CTM together are not possible\n");
7828                 return -EINVAL;
7829         }
7830
7831         /*
7832          * Pipe horizontal size must be even in:
7833          * - DVO ganged mode
7834          * - LVDS dual channel mode
7835          * - Double wide pipe
7836          */
7837         if (pipe_config->pipe_src_w & 1) {
7838                 if (pipe_config->double_wide) {
7839                         drm_dbg_kms(&dev_priv->drm,
7840                                     "Odd pipe source width not supported with double wide pipe\n");
7841                         return -EINVAL;
7842                 }
7843
7844                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7845                     intel_is_dual_link_lvds(dev_priv)) {
7846                         drm_dbg_kms(&dev_priv->drm,
7847                                     "Odd pipe source width not supported with dual link LVDS\n");
7848                         return -EINVAL;
7849                 }
7850         }
7851
7852         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7853          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7854          */
7855         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7856             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
7857                 return -EINVAL;
7858
7859         intel_crtc_compute_pixel_rate(pipe_config);
7860
7861         if (pipe_config->has_pch_encoder)
7862                 return ilk_fdi_compute_config(crtc, pipe_config);
7863
7864         return 0;
7865 }
7866
7867 static void
7868 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7869 {
7870         while (*num > DATA_LINK_M_N_MASK ||
7871                *den > DATA_LINK_M_N_MASK) {
7872                 *num >>= 1;
7873                 *den >>= 1;
7874         }
7875 }
7876
7877 static void compute_m_n(unsigned int m, unsigned int n,
7878                         u32 *ret_m, u32 *ret_n,
7879                         bool constant_n)
7880 {
7881         /*
7882          * Several DP dongles in particular seem to be fussy about
7883          * too large link M/N values. Give N value as 0x8000 that
7884          * should be acceptable by specific devices. 0x8000 is the
7885          * specified fixed N value for asynchronous clock mode,
7886          * which the devices expect also in synchronous clock mode.
7887          */
7888         if (constant_n)
7889                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
7890         else
7891                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7892
7893         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7894         intel_reduce_m_n_ratio(ret_m, ret_n);
7895 }
7896
7897 void
7898 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7899                        int pixel_clock, int link_clock,
7900                        struct intel_link_m_n *m_n,
7901                        bool constant_n, bool fec_enable)
7902 {
7903         u32 data_clock = bits_per_pixel * pixel_clock;
7904
7905         if (fec_enable)
7906                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7907
7908         m_n->tu = 64;
7909         compute_m_n(data_clock,
7910                     link_clock * nlanes * 8,
7911                     &m_n->gmch_m, &m_n->gmch_n,
7912                     constant_n);
7913
7914         compute_m_n(pixel_clock, link_clock,
7915                     &m_n->link_m, &m_n->link_n,
7916                     constant_n);
7917 }
7918
7919 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7920 {
7921         /*
7922          * There may be no VBT; and if the BIOS enabled SSC we can
7923          * just keep using it to avoid unnecessary flicker.  Whereas if the
7924          * BIOS isn't using it, don't assume it will work even if the VBT
7925          * indicates as much.
7926          */
7927         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7928                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
7929                                                        PCH_DREF_CONTROL) &
7930                         DREF_SSC1_ENABLE;
7931
7932                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7933                         drm_dbg_kms(&dev_priv->drm,
7934                                     "SSC %s by BIOS, overriding VBT which says %s\n",
7935                                     enableddisabled(bios_lvds_use_ssc),
7936                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
7937                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7938                 }
7939         }
7940 }
7941
7942 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7943 {
7944         if (dev_priv->params.panel_use_ssc >= 0)
7945                 return dev_priv->params.panel_use_ssc != 0;
7946         return dev_priv->vbt.lvds_use_ssc
7947                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7948 }
7949
7950 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7951 {
7952         return (1 << dpll->n) << 16 | dpll->m2;
7953 }
7954
7955 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7956 {
7957         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7958 }
7959
7960 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7961                                      struct intel_crtc_state *crtc_state,
7962                                      struct dpll *reduced_clock)
7963 {
7964         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7965         u32 fp, fp2 = 0;
7966
7967         if (IS_PINEVIEW(dev_priv)) {
7968                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7969                 if (reduced_clock)
7970                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7971         } else {
7972                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7973                 if (reduced_clock)
7974                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7975         }
7976
7977         crtc_state->dpll_hw_state.fp0 = fp;
7978
7979         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7980             reduced_clock) {
7981                 crtc_state->dpll_hw_state.fp1 = fp2;
7982         } else {
7983                 crtc_state->dpll_hw_state.fp1 = fp;
7984         }
7985 }
7986
7987 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7988                 pipe)
7989 {
7990         u32 reg_val;
7991
7992         /*
7993          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7994          * and set it to a reasonable value instead.
7995          */
7996         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7997         reg_val &= 0xffffff00;
7998         reg_val |= 0x00000030;
7999         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8000
8001         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8002         reg_val &= 0x00ffffff;
8003         reg_val |= 0x8c000000;
8004         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8005
8006         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8007         reg_val &= 0xffffff00;
8008         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8009
8010         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8011         reg_val &= 0x00ffffff;
8012         reg_val |= 0xb0000000;
8013         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8014 }
8015
8016 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8017                                          const struct intel_link_m_n *m_n)
8018 {
8019         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8020         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8021         enum pipe pipe = crtc->pipe;
8022
8023         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8024                        TU_SIZE(m_n->tu) | m_n->gmch_m);
8025         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8026         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8027         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8028 }
8029
8030 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8031                                  enum transcoder transcoder)
8032 {
8033         if (IS_HASWELL(dev_priv))
8034                 return transcoder == TRANSCODER_EDP;
8035
8036         /*
8037          * Strictly speaking some registers are available before
8038          * gen7, but we only support DRRS on gen7+
8039          */
8040         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8041 }
8042
8043 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8044                                          const struct intel_link_m_n *m_n,
8045                                          const struct intel_link_m_n *m2_n2)
8046 {
8047         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8048         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8049         enum pipe pipe = crtc->pipe;
8050         enum transcoder transcoder = crtc_state->cpu_transcoder;
8051
8052         if (INTEL_GEN(dev_priv) >= 5) {
8053                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8054                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8055                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8056                                m_n->gmch_n);
8057                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8058                                m_n->link_m);
8059                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8060                                m_n->link_n);
8061                 /*
8062                  *  M2_N2 registers are set only if DRRS is supported
8063                  * (to make sure the registers are not unnecessarily accessed).
8064                  */
8065                 if (m2_n2 && crtc_state->has_drrs &&
8066                     transcoder_has_m2_n2(dev_priv, transcoder)) {
8067                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8068                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8069                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8070                                        m2_n2->gmch_n);
8071                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8072                                        m2_n2->link_m);
8073                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8074                                        m2_n2->link_n);
8075                 }
8076         } else {
8077                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8078                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8079                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8080                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8081                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8082         }
8083 }
8084
8085 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8086 {
8087         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8088         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8089
8090         if (m_n == M1_N1) {
8091                 dp_m_n = &crtc_state->dp_m_n;
8092                 dp_m2_n2 = &crtc_state->dp_m2_n2;
8093         } else if (m_n == M2_N2) {
8094
8095                 /*
8096                  * M2_N2 registers are not supported. Hence m2_n2 divider value
8097                  * needs to be programmed into M1_N1.
8098                  */
8099                 dp_m_n = &crtc_state->dp_m2_n2;
8100         } else {
8101                 drm_err(&i915->drm, "Unsupported divider value\n");
8102                 return;
8103         }
8104
8105         if (crtc_state->has_pch_encoder)
8106                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8107         else
8108                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8109 }
8110
8111 static void vlv_compute_dpll(struct intel_crtc *crtc,
8112                              struct intel_crtc_state *pipe_config)
8113 {
8114         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8115                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8116         if (crtc->pipe != PIPE_A)
8117                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8118
8119         /* DPLL not used with DSI, but still need the rest set up */
8120         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8121                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8122                         DPLL_EXT_BUFFER_ENABLE_VLV;
8123
8124         pipe_config->dpll_hw_state.dpll_md =
8125                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8126 }
8127
8128 static void chv_compute_dpll(struct intel_crtc *crtc,
8129                              struct intel_crtc_state *pipe_config)
8130 {
8131         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8132                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8133         if (crtc->pipe != PIPE_A)
8134                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8135
8136         /* DPLL not used with DSI, but still need the rest set up */
8137         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8138                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8139
8140         pipe_config->dpll_hw_state.dpll_md =
8141                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8142 }
8143
8144 static void vlv_prepare_pll(struct intel_crtc *crtc,
8145                             const struct intel_crtc_state *pipe_config)
8146 {
8147         struct drm_device *dev = crtc->base.dev;
8148         struct drm_i915_private *dev_priv = to_i915(dev);
8149         enum pipe pipe = crtc->pipe;
8150         u32 mdiv;
8151         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8152         u32 coreclk, reg_val;
8153
8154         /* Enable Refclk */
8155         intel_de_write(dev_priv, DPLL(pipe),
8156                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8157
8158         /* No need to actually set up the DPLL with DSI */
8159         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8160                 return;
8161
8162         vlv_dpio_get(dev_priv);
8163
8164         bestn = pipe_config->dpll.n;
8165         bestm1 = pipe_config->dpll.m1;
8166         bestm2 = pipe_config->dpll.m2;
8167         bestp1 = pipe_config->dpll.p1;
8168         bestp2 = pipe_config->dpll.p2;
8169
8170         /* See eDP HDMI DPIO driver vbios notes doc */
8171
8172         /* PLL B needs special handling */
8173         if (pipe == PIPE_B)
8174                 vlv_pllb_recal_opamp(dev_priv, pipe);
8175
8176         /* Set up Tx target for periodic Rcomp update */
8177         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8178
8179         /* Disable target IRef on PLL */
8180         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8181         reg_val &= 0x00ffffff;
8182         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8183
8184         /* Disable fast lock */
8185         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8186
8187         /* Set idtafcrecal before PLL is enabled */
8188         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8189         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8190         mdiv |= ((bestn << DPIO_N_SHIFT));
8191         mdiv |= (1 << DPIO_K_SHIFT);
8192
8193         /*
8194          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8195          * but we don't support that).
8196          * Note: don't use the DAC post divider as it seems unstable.
8197          */
8198         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8199         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8200
8201         mdiv |= DPIO_ENABLE_CALIBRATION;
8202         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8203
8204         /* Set HBR and RBR LPF coefficients */
8205         if (pipe_config->port_clock == 162000 ||
8206             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8207             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8208                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8209                                  0x009f0003);
8210         else
8211                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8212                                  0x00d0000f);
8213
8214         if (intel_crtc_has_dp_encoder(pipe_config)) {
8215                 /* Use SSC source */
8216                 if (pipe == PIPE_A)
8217                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8218                                          0x0df40000);
8219                 else
8220                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8221                                          0x0df70000);
8222         } else { /* HDMI or VGA */
8223                 /* Use bend source */
8224                 if (pipe == PIPE_A)
8225                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8226                                          0x0df70000);
8227                 else
8228                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8229                                          0x0df40000);
8230         }
8231
8232         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8233         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8234         if (intel_crtc_has_dp_encoder(pipe_config))
8235                 coreclk |= 0x01000000;
8236         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8237
8238         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8239
8240         vlv_dpio_put(dev_priv);
8241 }
8242
8243 static void chv_prepare_pll(struct intel_crtc *crtc,
8244                             const struct intel_crtc_state *pipe_config)
8245 {
8246         struct drm_device *dev = crtc->base.dev;
8247         struct drm_i915_private *dev_priv = to_i915(dev);
8248         enum pipe pipe = crtc->pipe;
8249         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8250         u32 loopfilter, tribuf_calcntr;
8251         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8252         u32 dpio_val;
8253         int vco;
8254
8255         /* Enable Refclk and SSC */
8256         intel_de_write(dev_priv, DPLL(pipe),
8257                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8258
8259         /* No need to actually set up the DPLL with DSI */
8260         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8261                 return;
8262
8263         bestn = pipe_config->dpll.n;
8264         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8265         bestm1 = pipe_config->dpll.m1;
8266         bestm2 = pipe_config->dpll.m2 >> 22;
8267         bestp1 = pipe_config->dpll.p1;
8268         bestp2 = pipe_config->dpll.p2;
8269         vco = pipe_config->dpll.vco;
8270         dpio_val = 0;
8271         loopfilter = 0;
8272
8273         vlv_dpio_get(dev_priv);
8274
8275         /* p1 and p2 divider */
8276         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8277                         5 << DPIO_CHV_S1_DIV_SHIFT |
8278                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8279                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8280                         1 << DPIO_CHV_K_DIV_SHIFT);
8281
8282         /* Feedback post-divider - m2 */
8283         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8284
8285         /* Feedback refclk divider - n and m1 */
8286         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8287                         DPIO_CHV_M1_DIV_BY_2 |
8288                         1 << DPIO_CHV_N_DIV_SHIFT);
8289
8290         /* M2 fraction division */
8291         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8292
8293         /* M2 fraction division enable */
8294         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8295         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8296         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8297         if (bestm2_frac)
8298                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8299         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8300
8301         /* Program digital lock detect threshold */
8302         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8303         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8304                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8305         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8306         if (!bestm2_frac)
8307                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8308         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8309
8310         /* Loop filter */
8311         if (vco == 5400000) {
8312                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8313                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8314                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8315                 tribuf_calcntr = 0x9;
8316         } else if (vco <= 6200000) {
8317                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8318                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8319                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8320                 tribuf_calcntr = 0x9;
8321         } else if (vco <= 6480000) {
8322                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8323                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8324                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8325                 tribuf_calcntr = 0x8;
8326         } else {
8327                 /* Not supported. Apply the same limits as in the max case */
8328                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8329                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8330                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8331                 tribuf_calcntr = 0;
8332         }
8333         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8334
8335         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8336         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8337         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8338         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8339
8340         /* AFC Recal */
8341         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8342                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8343                         DPIO_AFC_RECAL);
8344
8345         vlv_dpio_put(dev_priv);
8346 }
8347
8348 /**
8349  * vlv_force_pll_on - forcibly enable just the PLL
8350  * @dev_priv: i915 private structure
8351  * @pipe: pipe PLL to enable
8352  * @dpll: PLL configuration
8353  *
8354  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8355  * in cases where we need the PLL enabled even when @pipe is not going to
8356  * be enabled.
8357  */
8358 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8359                      const struct dpll *dpll)
8360 {
8361         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8362         struct intel_crtc_state *pipe_config;
8363
8364         pipe_config = intel_crtc_state_alloc(crtc);
8365         if (!pipe_config)
8366                 return -ENOMEM;
8367
8368         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8369         pipe_config->pixel_multiplier = 1;
8370         pipe_config->dpll = *dpll;
8371
8372         if (IS_CHERRYVIEW(dev_priv)) {
8373                 chv_compute_dpll(crtc, pipe_config);
8374                 chv_prepare_pll(crtc, pipe_config);
8375                 chv_enable_pll(crtc, pipe_config);
8376         } else {
8377                 vlv_compute_dpll(crtc, pipe_config);
8378                 vlv_prepare_pll(crtc, pipe_config);
8379                 vlv_enable_pll(crtc, pipe_config);
8380         }
8381
8382         kfree(pipe_config);
8383
8384         return 0;
8385 }
8386
8387 /**
8388  * vlv_force_pll_off - forcibly disable just the PLL
8389  * @dev_priv: i915 private structure
8390  * @pipe: pipe PLL to disable
8391  *
8392  * Disable the PLL for @pipe. To be used in cases where we need
8393  * the PLL enabled even when @pipe is not going to be enabled.
8394  */
8395 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8396 {
8397         if (IS_CHERRYVIEW(dev_priv))
8398                 chv_disable_pll(dev_priv, pipe);
8399         else
8400                 vlv_disable_pll(dev_priv, pipe);
8401 }
8402
8403 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8404                               struct intel_crtc_state *crtc_state,
8405                               struct dpll *reduced_clock)
8406 {
8407         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8408         u32 dpll;
8409         struct dpll *clock = &crtc_state->dpll;
8410
8411         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8412
8413         dpll = DPLL_VGA_MODE_DIS;
8414
8415         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8416                 dpll |= DPLLB_MODE_LVDS;
8417         else
8418                 dpll |= DPLLB_MODE_DAC_SERIAL;
8419
8420         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8421             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8422                 dpll |= (crtc_state->pixel_multiplier - 1)
8423                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8424         }
8425
8426         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8427             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8428                 dpll |= DPLL_SDVO_HIGH_SPEED;
8429
8430         if (intel_crtc_has_dp_encoder(crtc_state))
8431                 dpll |= DPLL_SDVO_HIGH_SPEED;
8432
8433         /* compute bitmask from p1 value */
8434         if (IS_PINEVIEW(dev_priv))
8435                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8436         else {
8437                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8438                 if (IS_G4X(dev_priv) && reduced_clock)
8439                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8440         }
8441         switch (clock->p2) {
8442         case 5:
8443                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8444                 break;
8445         case 7:
8446                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8447                 break;
8448         case 10:
8449                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8450                 break;
8451         case 14:
8452                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8453                 break;
8454         }
8455         if (INTEL_GEN(dev_priv) >= 4)
8456                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8457
8458         if (crtc_state->sdvo_tv_clock)
8459                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8460         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8461                  intel_panel_use_ssc(dev_priv))
8462                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8463         else
8464                 dpll |= PLL_REF_INPUT_DREFCLK;
8465
8466         dpll |= DPLL_VCO_ENABLE;
8467         crtc_state->dpll_hw_state.dpll = dpll;
8468
8469         if (INTEL_GEN(dev_priv) >= 4) {
8470                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8471                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8472                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8473         }
8474 }
8475
8476 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8477                               struct intel_crtc_state *crtc_state,
8478                               struct dpll *reduced_clock)
8479 {
8480         struct drm_device *dev = crtc->base.dev;
8481         struct drm_i915_private *dev_priv = to_i915(dev);
8482         u32 dpll;
8483         struct dpll *clock = &crtc_state->dpll;
8484
8485         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8486
8487         dpll = DPLL_VGA_MODE_DIS;
8488
8489         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8490                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8491         } else {
8492                 if (clock->p1 == 2)
8493                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8494                 else
8495                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8496                 if (clock->p2 == 4)
8497                         dpll |= PLL_P2_DIVIDE_BY_4;
8498         }
8499
8500         /*
8501          * Bspec:
8502          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8503          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8504          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8505          *  Enable) must be set to “1” in both the DPLL A Control Register
8506          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8507          *
8508          * For simplicity We simply keep both bits always enabled in
8509          * both DPLLS. The spec says we should disable the DVO 2X clock
8510          * when not needed, but this seems to work fine in practice.
8511          */
8512         if (IS_I830(dev_priv) ||
8513             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8514                 dpll |= DPLL_DVO_2X_MODE;
8515
8516         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8517             intel_panel_use_ssc(dev_priv))
8518                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8519         else
8520                 dpll |= PLL_REF_INPUT_DREFCLK;
8521
8522         dpll |= DPLL_VCO_ENABLE;
8523         crtc_state->dpll_hw_state.dpll = dpll;
8524 }
8525
8526 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
8527 {
8528         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8529         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8530         enum pipe pipe = crtc->pipe;
8531         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8532         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8533         u32 crtc_vtotal, crtc_vblank_end;
8534         int vsyncshift = 0;
8535
8536         /* We need to be careful not to changed the adjusted mode, for otherwise
8537          * the hw state checker will get angry at the mismatch. */
8538         crtc_vtotal = adjusted_mode->crtc_vtotal;
8539         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8540
8541         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8542                 /* the chip adds 2 halflines automatically */
8543                 crtc_vtotal -= 1;
8544                 crtc_vblank_end -= 1;
8545
8546                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8547                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8548                 else
8549                         vsyncshift = adjusted_mode->crtc_hsync_start -
8550                                 adjusted_mode->crtc_htotal / 2;
8551                 if (vsyncshift < 0)
8552                         vsyncshift += adjusted_mode->crtc_htotal;
8553         }
8554
8555         if (INTEL_GEN(dev_priv) > 3)
8556                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8557                                vsyncshift);
8558
8559         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8560                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8561         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8562                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8563         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8564                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8565
8566         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8567                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8568         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8569                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8570         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8571                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8572
8573         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8574          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8575          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8576          * bits. */
8577         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8578             (pipe == PIPE_B || pipe == PIPE_C))
8579                 intel_de_write(dev_priv, VTOTAL(pipe),
8580                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8581
8582 }
8583
8584 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8585 {
8586         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8587         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8588         enum pipe pipe = crtc->pipe;
8589
8590         /* pipesrc controls the size that is scaled from, which should
8591          * always be the user's requested size.
8592          */
8593         intel_de_write(dev_priv, PIPESRC(pipe),
8594                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8595 }
8596
8597 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8598 {
8599         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8600         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8601
8602         if (IS_GEN(dev_priv, 2))
8603                 return false;
8604
8605         if (INTEL_GEN(dev_priv) >= 9 ||
8606             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8607                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8608         else
8609                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8610 }
8611
8612 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
8613                                          struct intel_crtc_state *pipe_config)
8614 {
8615         struct drm_device *dev = crtc->base.dev;
8616         struct drm_i915_private *dev_priv = to_i915(dev);
8617         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8618         u32 tmp;
8619
8620         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8621         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8622         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8623
8624         if (!transcoder_is_dsi(cpu_transcoder)) {
8625                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8626                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8627                                                         (tmp & 0xffff) + 1;
8628                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8629                                                 ((tmp >> 16) & 0xffff) + 1;
8630         }
8631         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8632         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8633         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8634
8635         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8636         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8637         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8638
8639         if (!transcoder_is_dsi(cpu_transcoder)) {
8640                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8641                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8642                                                         (tmp & 0xffff) + 1;
8643                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8644                                                 ((tmp >> 16) & 0xffff) + 1;
8645         }
8646         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8647         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8648         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8649
8650         if (intel_pipe_is_interlaced(pipe_config)) {
8651                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8652                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8653                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8654         }
8655 }
8656
8657 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8658                                     struct intel_crtc_state *pipe_config)
8659 {
8660         struct drm_device *dev = crtc->base.dev;
8661         struct drm_i915_private *dev_priv = to_i915(dev);
8662         u32 tmp;
8663
8664         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8665         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8666         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8667 }
8668
8669 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8670 {
8671         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8672         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8673         u32 pipeconf;
8674
8675         pipeconf = 0;
8676
8677         /* we keep both pipes enabled on 830 */
8678         if (IS_I830(dev_priv))
8679                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8680
8681         if (crtc_state->double_wide)
8682                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8683
8684         /* only g4x and later have fancy bpc/dither controls */
8685         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8686             IS_CHERRYVIEW(dev_priv)) {
8687                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8688                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8689                         pipeconf |= PIPECONF_DITHER_EN |
8690                                     PIPECONF_DITHER_TYPE_SP;
8691
8692                 switch (crtc_state->pipe_bpp) {
8693                 case 18:
8694                         pipeconf |= PIPECONF_6BPC;
8695                         break;
8696                 case 24:
8697                         pipeconf |= PIPECONF_8BPC;
8698                         break;
8699                 case 30:
8700                         pipeconf |= PIPECONF_10BPC;
8701                         break;
8702                 default:
8703                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8704                         BUG();
8705                 }
8706         }
8707
8708         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8709                 if (INTEL_GEN(dev_priv) < 4 ||
8710                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8711                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8712                 else
8713                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8714         } else {
8715                 pipeconf |= PIPECONF_PROGRESSIVE;
8716         }
8717
8718         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8719              crtc_state->limited_color_range)
8720                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8721
8722         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8723
8724         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8725
8726         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8727         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8728 }
8729
8730 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8731                                    struct intel_crtc_state *crtc_state)
8732 {
8733         struct drm_device *dev = crtc->base.dev;
8734         struct drm_i915_private *dev_priv = to_i915(dev);
8735         const struct intel_limit *limit;
8736         int refclk = 48000;
8737
8738         memset(&crtc_state->dpll_hw_state, 0,
8739                sizeof(crtc_state->dpll_hw_state));
8740
8741         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8742                 if (intel_panel_use_ssc(dev_priv)) {
8743                         refclk = dev_priv->vbt.lvds_ssc_freq;
8744                         drm_dbg_kms(&dev_priv->drm,
8745                                     "using SSC reference clock of %d kHz\n",
8746                                     refclk);
8747                 }
8748
8749                 limit = &intel_limits_i8xx_lvds;
8750         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8751                 limit = &intel_limits_i8xx_dvo;
8752         } else {
8753                 limit = &intel_limits_i8xx_dac;
8754         }
8755
8756         if (!crtc_state->clock_set &&
8757             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8758                                  refclk, NULL, &crtc_state->dpll)) {
8759                 drm_err(&dev_priv->drm,
8760                         "Couldn't find PLL settings for mode!\n");
8761                 return -EINVAL;
8762         }
8763
8764         i8xx_compute_dpll(crtc, crtc_state, NULL);
8765
8766         return 0;
8767 }
8768
8769 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8770                                   struct intel_crtc_state *crtc_state)
8771 {
8772         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8773         const struct intel_limit *limit;
8774         int refclk = 96000;
8775
8776         memset(&crtc_state->dpll_hw_state, 0,
8777                sizeof(crtc_state->dpll_hw_state));
8778
8779         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8780                 if (intel_panel_use_ssc(dev_priv)) {
8781                         refclk = dev_priv->vbt.lvds_ssc_freq;
8782                         drm_dbg_kms(&dev_priv->drm,
8783                                     "using SSC reference clock of %d kHz\n",
8784                                     refclk);
8785                 }
8786
8787                 if (intel_is_dual_link_lvds(dev_priv))
8788                         limit = &intel_limits_g4x_dual_channel_lvds;
8789                 else
8790                         limit = &intel_limits_g4x_single_channel_lvds;
8791         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8792                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8793                 limit = &intel_limits_g4x_hdmi;
8794         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8795                 limit = &intel_limits_g4x_sdvo;
8796         } else {
8797                 /* The option is for other outputs */
8798                 limit = &intel_limits_i9xx_sdvo;
8799         }
8800
8801         if (!crtc_state->clock_set &&
8802             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8803                                 refclk, NULL, &crtc_state->dpll)) {
8804                 drm_err(&dev_priv->drm,
8805                         "Couldn't find PLL settings for mode!\n");
8806                 return -EINVAL;
8807         }
8808
8809         i9xx_compute_dpll(crtc, crtc_state, NULL);
8810
8811         return 0;
8812 }
8813
8814 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8815                                   struct intel_crtc_state *crtc_state)
8816 {
8817         struct drm_device *dev = crtc->base.dev;
8818         struct drm_i915_private *dev_priv = to_i915(dev);
8819         const struct intel_limit *limit;
8820         int refclk = 96000;
8821
8822         memset(&crtc_state->dpll_hw_state, 0,
8823                sizeof(crtc_state->dpll_hw_state));
8824
8825         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8826                 if (intel_panel_use_ssc(dev_priv)) {
8827                         refclk = dev_priv->vbt.lvds_ssc_freq;
8828                         drm_dbg_kms(&dev_priv->drm,
8829                                     "using SSC reference clock of %d kHz\n",
8830                                     refclk);
8831                 }
8832
8833                 limit = &pnv_limits_lvds;
8834         } else {
8835                 limit = &pnv_limits_sdvo;
8836         }
8837
8838         if (!crtc_state->clock_set &&
8839             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8840                                 refclk, NULL, &crtc_state->dpll)) {
8841                 drm_err(&dev_priv->drm,
8842                         "Couldn't find PLL settings for mode!\n");
8843                 return -EINVAL;
8844         }
8845
8846         i9xx_compute_dpll(crtc, crtc_state, NULL);
8847
8848         return 0;
8849 }
8850
8851 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8852                                    struct intel_crtc_state *crtc_state)
8853 {
8854         struct drm_device *dev = crtc->base.dev;
8855         struct drm_i915_private *dev_priv = to_i915(dev);
8856         const struct intel_limit *limit;
8857         int refclk = 96000;
8858
8859         memset(&crtc_state->dpll_hw_state, 0,
8860                sizeof(crtc_state->dpll_hw_state));
8861
8862         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8863                 if (intel_panel_use_ssc(dev_priv)) {
8864                         refclk = dev_priv->vbt.lvds_ssc_freq;
8865                         drm_dbg_kms(&dev_priv->drm,
8866                                     "using SSC reference clock of %d kHz\n",
8867                                     refclk);
8868                 }
8869
8870                 limit = &intel_limits_i9xx_lvds;
8871         } else {
8872                 limit = &intel_limits_i9xx_sdvo;
8873         }
8874
8875         if (!crtc_state->clock_set &&
8876             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8877                                  refclk, NULL, &crtc_state->dpll)) {
8878                 drm_err(&dev_priv->drm,
8879                         "Couldn't find PLL settings for mode!\n");
8880                 return -EINVAL;
8881         }
8882
8883         i9xx_compute_dpll(crtc, crtc_state, NULL);
8884
8885         return 0;
8886 }
8887
8888 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8889                                   struct intel_crtc_state *crtc_state)
8890 {
8891         int refclk = 100000;
8892         const struct intel_limit *limit = &intel_limits_chv;
8893         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8894
8895         memset(&crtc_state->dpll_hw_state, 0,
8896                sizeof(crtc_state->dpll_hw_state));
8897
8898         if (!crtc_state->clock_set &&
8899             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8900                                 refclk, NULL, &crtc_state->dpll)) {
8901                 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
8902                 return -EINVAL;
8903         }
8904
8905         chv_compute_dpll(crtc, crtc_state);
8906
8907         return 0;
8908 }
8909
8910 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8911                                   struct intel_crtc_state *crtc_state)
8912 {
8913         int refclk = 100000;
8914         const struct intel_limit *limit = &intel_limits_vlv;
8915         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8916
8917         memset(&crtc_state->dpll_hw_state, 0,
8918                sizeof(crtc_state->dpll_hw_state));
8919
8920         if (!crtc_state->clock_set &&
8921             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8922                                 refclk, NULL, &crtc_state->dpll)) {
8923                 drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
8924                 return -EINVAL;
8925         }
8926
8927         vlv_compute_dpll(crtc, crtc_state);
8928
8929         return 0;
8930 }
8931
8932 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8933 {
8934         if (IS_I830(dev_priv))
8935                 return false;
8936
8937         return INTEL_GEN(dev_priv) >= 4 ||
8938                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8939 }
8940
8941 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
8942 {
8943         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8944         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8945         u32 tmp;
8946
8947         if (!i9xx_has_pfit(dev_priv))
8948                 return;
8949
8950         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
8951         if (!(tmp & PFIT_ENABLE))
8952                 return;
8953
8954         /* Check whether the pfit is attached to our pipe. */
8955         if (INTEL_GEN(dev_priv) < 4) {
8956                 if (crtc->pipe != PIPE_B)
8957                         return;
8958         } else {
8959                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8960                         return;
8961         }
8962
8963         crtc_state->gmch_pfit.control = tmp;
8964         crtc_state->gmch_pfit.pgm_ratios =
8965                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
8966 }
8967
8968 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8969                                struct intel_crtc_state *pipe_config)
8970 {
8971         struct drm_device *dev = crtc->base.dev;
8972         struct drm_i915_private *dev_priv = to_i915(dev);
8973         enum pipe pipe = crtc->pipe;
8974         struct dpll clock;
8975         u32 mdiv;
8976         int refclk = 100000;
8977
8978         /* In case of DSI, DPLL will not be used */
8979         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8980                 return;
8981
8982         vlv_dpio_get(dev_priv);
8983         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8984         vlv_dpio_put(dev_priv);
8985
8986         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8987         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8988         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8989         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8990         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8991
8992         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8993 }
8994
8995 static void
8996 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8997                               struct intel_initial_plane_config *plane_config)
8998 {
8999         struct drm_device *dev = crtc->base.dev;
9000         struct drm_i915_private *dev_priv = to_i915(dev);
9001         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9002         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9003         enum pipe pipe;
9004         u32 val, base, offset;
9005         int fourcc, pixel_format;
9006         unsigned int aligned_height;
9007         struct drm_framebuffer *fb;
9008         struct intel_framebuffer *intel_fb;
9009
9010         if (!plane->get_hw_state(plane, &pipe))
9011                 return;
9012
9013         drm_WARN_ON(dev, pipe != crtc->pipe);
9014
9015         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9016         if (!intel_fb) {
9017                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9018                 return;
9019         }
9020
9021         fb = &intel_fb->base;
9022
9023         fb->dev = dev;
9024
9025         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9026
9027         if (INTEL_GEN(dev_priv) >= 4) {
9028                 if (val & DISPPLANE_TILED) {
9029                         plane_config->tiling = I915_TILING_X;
9030                         fb->modifier = I915_FORMAT_MOD_X_TILED;
9031                 }
9032
9033                 if (val & DISPPLANE_ROTATE_180)
9034                         plane_config->rotation = DRM_MODE_ROTATE_180;
9035         }
9036
9037         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9038             val & DISPPLANE_MIRROR)
9039                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9040
9041         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9042         fourcc = i9xx_format_to_fourcc(pixel_format);
9043         fb->format = drm_format_info(fourcc);
9044
9045         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9046                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9047                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9048         } else if (INTEL_GEN(dev_priv) >= 4) {
9049                 if (plane_config->tiling)
9050                         offset = intel_de_read(dev_priv,
9051                                                DSPTILEOFF(i9xx_plane));
9052                 else
9053                         offset = intel_de_read(dev_priv,
9054                                                DSPLINOFF(i9xx_plane));
9055                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9056         } else {
9057                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9058         }
9059         plane_config->base = base;
9060
9061         val = intel_de_read(dev_priv, PIPESRC(pipe));
9062         fb->width = ((val >> 16) & 0xfff) + 1;
9063         fb->height = ((val >> 0) & 0xfff) + 1;
9064
9065         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9066         fb->pitches[0] = val & 0xffffffc0;
9067
9068         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9069
9070         plane_config->size = fb->pitches[0] * aligned_height;
9071
9072         drm_dbg_kms(&dev_priv->drm,
9073                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9074                     crtc->base.name, plane->base.name, fb->width, fb->height,
9075                     fb->format->cpp[0] * 8, base, fb->pitches[0],
9076                     plane_config->size);
9077
9078         plane_config->fb = intel_fb;
9079 }
9080
9081 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9082                                struct intel_crtc_state *pipe_config)
9083 {
9084         struct drm_device *dev = crtc->base.dev;
9085         struct drm_i915_private *dev_priv = to_i915(dev);
9086         enum pipe pipe = crtc->pipe;
9087         enum dpio_channel port = vlv_pipe_to_channel(pipe);
9088         struct dpll clock;
9089         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9090         int refclk = 100000;
9091
9092         /* In case of DSI, DPLL will not be used */
9093         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9094                 return;
9095
9096         vlv_dpio_get(dev_priv);
9097         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9098         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9099         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9100         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9101         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9102         vlv_dpio_put(dev_priv);
9103
9104         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9105         clock.m2 = (pll_dw0 & 0xff) << 22;
9106         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9107                 clock.m2 |= pll_dw2 & 0x3fffff;
9108         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9109         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9110         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9111
9112         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9113 }
9114
9115 static enum intel_output_format
9116 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9117 {
9118         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9119         u32 tmp;
9120
9121         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9122
9123         if (tmp & PIPEMISC_YUV420_ENABLE) {
9124                 /* We support 4:2:0 in full blend mode only */
9125                 drm_WARN_ON(&dev_priv->drm,
9126                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9127
9128                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9129         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9130                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9131         } else {
9132                 return INTEL_OUTPUT_FORMAT_RGB;
9133         }
9134 }
9135
9136 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9137 {
9138         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9139         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9140         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9141         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9142         u32 tmp;
9143
9144         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9145
9146         if (tmp & DISPPLANE_GAMMA_ENABLE)
9147                 crtc_state->gamma_enable = true;
9148
9149         if (!HAS_GMCH(dev_priv) &&
9150             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9151                 crtc_state->csc_enable = true;
9152 }
9153
9154 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9155                                  struct intel_crtc_state *pipe_config)
9156 {
9157         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9158         enum intel_display_power_domain power_domain;
9159         intel_wakeref_t wakeref;
9160         u32 tmp;
9161         bool ret;
9162
9163         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9164         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9165         if (!wakeref)
9166                 return false;
9167
9168         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9169         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9170         pipe_config->shared_dpll = NULL;
9171
9172         ret = false;
9173
9174         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9175         if (!(tmp & PIPECONF_ENABLE))
9176                 goto out;
9177
9178         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9179             IS_CHERRYVIEW(dev_priv)) {
9180                 switch (tmp & PIPECONF_BPC_MASK) {
9181                 case PIPECONF_6BPC:
9182                         pipe_config->pipe_bpp = 18;
9183                         break;
9184                 case PIPECONF_8BPC:
9185                         pipe_config->pipe_bpp = 24;
9186                         break;
9187                 case PIPECONF_10BPC:
9188                         pipe_config->pipe_bpp = 30;
9189                         break;
9190                 default:
9191                         break;
9192                 }
9193         }
9194
9195         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9196             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9197                 pipe_config->limited_color_range = true;
9198
9199         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9200                 PIPECONF_GAMMA_MODE_SHIFT;
9201
9202         if (IS_CHERRYVIEW(dev_priv))
9203                 pipe_config->cgm_mode = intel_de_read(dev_priv,
9204                                                       CGM_PIPE_MODE(crtc->pipe));
9205
9206         i9xx_get_pipe_color_config(pipe_config);
9207         intel_color_get_config(pipe_config);
9208
9209         if (INTEL_GEN(dev_priv) < 4)
9210                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9211
9212         intel_get_transcoder_timings(crtc, pipe_config);
9213         intel_get_pipe_src_size(crtc, pipe_config);
9214
9215         i9xx_get_pfit_config(pipe_config);
9216
9217         if (INTEL_GEN(dev_priv) >= 4) {
9218                 /* No way to read it out on pipes B and C */
9219                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9220                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9221                 else
9222                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9223                 pipe_config->pixel_multiplier =
9224                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9225                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9226                 pipe_config->dpll_hw_state.dpll_md = tmp;
9227         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9228                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9229                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9230                 pipe_config->pixel_multiplier =
9231                         ((tmp & SDVO_MULTIPLIER_MASK)
9232                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9233         } else {
9234                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9235                  * port and will be fixed up in the encoder->get_config
9236                  * function. */
9237                 pipe_config->pixel_multiplier = 1;
9238         }
9239         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9240                                                         DPLL(crtc->pipe));
9241         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9242                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9243                                                                FP0(crtc->pipe));
9244                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9245                                                                FP1(crtc->pipe));
9246         } else {
9247                 /* Mask out read-only status bits. */
9248                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9249                                                      DPLL_PORTC_READY_MASK |
9250                                                      DPLL_PORTB_READY_MASK);
9251         }
9252
9253         if (IS_CHERRYVIEW(dev_priv))
9254                 chv_crtc_clock_get(crtc, pipe_config);
9255         else if (IS_VALLEYVIEW(dev_priv))
9256                 vlv_crtc_clock_get(crtc, pipe_config);
9257         else
9258                 i9xx_crtc_clock_get(crtc, pipe_config);
9259
9260         /*
9261          * Normally the dotclock is filled in by the encoder .get_config()
9262          * but in case the pipe is enabled w/o any ports we need a sane
9263          * default.
9264          */
9265         pipe_config->hw.adjusted_mode.crtc_clock =
9266                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9267
9268         ret = true;
9269
9270 out:
9271         intel_display_power_put(dev_priv, power_domain, wakeref);
9272
9273         return ret;
9274 }
9275
9276 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9277 {
9278         struct intel_encoder *encoder;
9279         int i;
9280         u32 val, final;
9281         bool has_lvds = false;
9282         bool has_cpu_edp = false;
9283         bool has_panel = false;
9284         bool has_ck505 = false;
9285         bool can_ssc = false;
9286         bool using_ssc_source = false;
9287
9288         /* We need to take the global config into account */
9289         for_each_intel_encoder(&dev_priv->drm, encoder) {
9290                 switch (encoder->type) {
9291                 case INTEL_OUTPUT_LVDS:
9292                         has_panel = true;
9293                         has_lvds = true;
9294                         break;
9295                 case INTEL_OUTPUT_EDP:
9296                         has_panel = true;
9297                         if (encoder->port == PORT_A)
9298                                 has_cpu_edp = true;
9299                         break;
9300                 default:
9301                         break;
9302                 }
9303         }
9304
9305         if (HAS_PCH_IBX(dev_priv)) {
9306                 has_ck505 = dev_priv->vbt.display_clock_mode;
9307                 can_ssc = has_ck505;
9308         } else {
9309                 has_ck505 = false;
9310                 can_ssc = true;
9311         }
9312
9313         /* Check if any DPLLs are using the SSC source */
9314         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9315                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9316
9317                 if (!(temp & DPLL_VCO_ENABLE))
9318                         continue;
9319
9320                 if ((temp & PLL_REF_INPUT_MASK) ==
9321                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9322                         using_ssc_source = true;
9323                         break;
9324                 }
9325         }
9326
9327         drm_dbg_kms(&dev_priv->drm,
9328                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9329                     has_panel, has_lvds, has_ck505, using_ssc_source);
9330
9331         /* Ironlake: try to setup display ref clock before DPLL
9332          * enabling. This is only under driver's control after
9333          * PCH B stepping, previous chipset stepping should be
9334          * ignoring this setting.
9335          */
9336         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9337
9338         /* As we must carefully and slowly disable/enable each source in turn,
9339          * compute the final state we want first and check if we need to
9340          * make any changes at all.
9341          */
9342         final = val;
9343         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9344         if (has_ck505)
9345                 final |= DREF_NONSPREAD_CK505_ENABLE;
9346         else
9347                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9348
9349         final &= ~DREF_SSC_SOURCE_MASK;
9350         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9351         final &= ~DREF_SSC1_ENABLE;
9352
9353         if (has_panel) {
9354                 final |= DREF_SSC_SOURCE_ENABLE;
9355
9356                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9357                         final |= DREF_SSC1_ENABLE;
9358
9359                 if (has_cpu_edp) {
9360                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9361                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9362                         else
9363                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9364                 } else
9365                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9366         } else if (using_ssc_source) {
9367                 final |= DREF_SSC_SOURCE_ENABLE;
9368                 final |= DREF_SSC1_ENABLE;
9369         }
9370
9371         if (final == val)
9372                 return;
9373
9374         /* Always enable nonspread source */
9375         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9376
9377         if (has_ck505)
9378                 val |= DREF_NONSPREAD_CK505_ENABLE;
9379         else
9380                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9381
9382         if (has_panel) {
9383                 val &= ~DREF_SSC_SOURCE_MASK;
9384                 val |= DREF_SSC_SOURCE_ENABLE;
9385
9386                 /* SSC must be turned on before enabling the CPU output  */
9387                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9388                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9389                         val |= DREF_SSC1_ENABLE;
9390                 } else
9391                         val &= ~DREF_SSC1_ENABLE;
9392
9393                 /* Get SSC going before enabling the outputs */
9394                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9395                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9396                 udelay(200);
9397
9398                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9399
9400                 /* Enable CPU source on CPU attached eDP */
9401                 if (has_cpu_edp) {
9402                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9403                                 drm_dbg_kms(&dev_priv->drm,
9404                                             "Using SSC on eDP\n");
9405                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9406                         } else
9407                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9408                 } else
9409                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9410
9411                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9412                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9413                 udelay(200);
9414         } else {
9415                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9416
9417                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9418
9419                 /* Turn off CPU output */
9420                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9421
9422                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9423                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9424                 udelay(200);
9425
9426                 if (!using_ssc_source) {
9427                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9428
9429                         /* Turn off the SSC source */
9430                         val &= ~DREF_SSC_SOURCE_MASK;
9431                         val |= DREF_SSC_SOURCE_DISABLE;
9432
9433                         /* Turn off SSC1 */
9434                         val &= ~DREF_SSC1_ENABLE;
9435
9436                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9437                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9438                         udelay(200);
9439                 }
9440         }
9441
9442         BUG_ON(val != final);
9443 }
9444
9445 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9446 {
9447         u32 tmp;
9448
9449         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9450         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9451         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9452
9453         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9454                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9455                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9456
9457         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9458         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9459         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9460
9461         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9462                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9463                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9464 }
9465
9466 /* WaMPhyProgramming:hsw */
9467 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9468 {
9469         u32 tmp;
9470
9471         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9472         tmp &= ~(0xFF << 24);
9473         tmp |= (0x12 << 24);
9474         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9475
9476         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9477         tmp |= (1 << 11);
9478         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9479
9480         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9481         tmp |= (1 << 11);
9482         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9483
9484         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9485         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9486         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9487
9488         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9489         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9490         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9491
9492         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9493         tmp &= ~(7 << 13);
9494         tmp |= (5 << 13);
9495         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9496
9497         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9498         tmp &= ~(7 << 13);
9499         tmp |= (5 << 13);
9500         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9501
9502         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9503         tmp &= ~0xFF;
9504         tmp |= 0x1C;
9505         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9506
9507         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9508         tmp &= ~0xFF;
9509         tmp |= 0x1C;
9510         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9511
9512         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9513         tmp &= ~(0xFF << 16);
9514         tmp |= (0x1C << 16);
9515         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9516
9517         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9518         tmp &= ~(0xFF << 16);
9519         tmp |= (0x1C << 16);
9520         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9521
9522         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9523         tmp |= (1 << 27);
9524         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9525
9526         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9527         tmp |= (1 << 27);
9528         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9529
9530         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9531         tmp &= ~(0xF << 28);
9532         tmp |= (4 << 28);
9533         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9534
9535         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9536         tmp &= ~(0xF << 28);
9537         tmp |= (4 << 28);
9538         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9539 }
9540
9541 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9542  * Programming" based on the parameters passed:
9543  * - Sequence to enable CLKOUT_DP
9544  * - Sequence to enable CLKOUT_DP without spread
9545  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9546  */
9547 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9548                                  bool with_spread, bool with_fdi)
9549 {
9550         u32 reg, tmp;
9551
9552         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9553                      "FDI requires downspread\n"))
9554                 with_spread = true;
9555         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9556                      with_fdi, "LP PCH doesn't have FDI\n"))
9557                 with_fdi = false;
9558
9559         mutex_lock(&dev_priv->sb_lock);
9560
9561         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9562         tmp &= ~SBI_SSCCTL_DISABLE;
9563         tmp |= SBI_SSCCTL_PATHALT;
9564         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9565
9566         udelay(24);
9567
9568         if (with_spread) {
9569                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9570                 tmp &= ~SBI_SSCCTL_PATHALT;
9571                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9572
9573                 if (with_fdi) {
9574                         lpt_reset_fdi_mphy(dev_priv);
9575                         lpt_program_fdi_mphy(dev_priv);
9576                 }
9577         }
9578
9579         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9580         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9581         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9582         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9583
9584         mutex_unlock(&dev_priv->sb_lock);
9585 }
9586
9587 /* Sequence to disable CLKOUT_DP */
9588 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9589 {
9590         u32 reg, tmp;
9591
9592         mutex_lock(&dev_priv->sb_lock);
9593
9594         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9595         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9596         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9597         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9598
9599         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9600         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9601                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9602                         tmp |= SBI_SSCCTL_PATHALT;
9603                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9604                         udelay(32);
9605                 }
9606                 tmp |= SBI_SSCCTL_DISABLE;
9607                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9608         }
9609
9610         mutex_unlock(&dev_priv->sb_lock);
9611 }
9612
9613 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9614
9615 static const u16 sscdivintphase[] = {
9616         [BEND_IDX( 50)] = 0x3B23,
9617         [BEND_IDX( 45)] = 0x3B23,
9618         [BEND_IDX( 40)] = 0x3C23,
9619         [BEND_IDX( 35)] = 0x3C23,
9620         [BEND_IDX( 30)] = 0x3D23,
9621         [BEND_IDX( 25)] = 0x3D23,
9622         [BEND_IDX( 20)] = 0x3E23,
9623         [BEND_IDX( 15)] = 0x3E23,
9624         [BEND_IDX( 10)] = 0x3F23,
9625         [BEND_IDX(  5)] = 0x3F23,
9626         [BEND_IDX(  0)] = 0x0025,
9627         [BEND_IDX( -5)] = 0x0025,
9628         [BEND_IDX(-10)] = 0x0125,
9629         [BEND_IDX(-15)] = 0x0125,
9630         [BEND_IDX(-20)] = 0x0225,
9631         [BEND_IDX(-25)] = 0x0225,
9632         [BEND_IDX(-30)] = 0x0325,
9633         [BEND_IDX(-35)] = 0x0325,
9634         [BEND_IDX(-40)] = 0x0425,
9635         [BEND_IDX(-45)] = 0x0425,
9636         [BEND_IDX(-50)] = 0x0525,
9637 };
9638
9639 /*
9640  * Bend CLKOUT_DP
9641  * steps -50 to 50 inclusive, in steps of 5
9642  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9643  * change in clock period = -(steps / 10) * 5.787 ps
9644  */
9645 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9646 {
9647         u32 tmp;
9648         int idx = BEND_IDX(steps);
9649
9650         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9651                 return;
9652
9653         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9654                 return;
9655
9656         mutex_lock(&dev_priv->sb_lock);
9657
9658         if (steps % 10 != 0)
9659                 tmp = 0xAAAAAAAB;
9660         else
9661                 tmp = 0x00000000;
9662         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9663
9664         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9665         tmp &= 0xffff0000;
9666         tmp |= sscdivintphase[idx];
9667         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9668
9669         mutex_unlock(&dev_priv->sb_lock);
9670 }
9671
9672 #undef BEND_IDX
9673
9674 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9675 {
9676         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9677         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9678
9679         if ((ctl & SPLL_PLL_ENABLE) == 0)
9680                 return false;
9681
9682         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9683             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9684                 return true;
9685
9686         if (IS_BROADWELL(dev_priv) &&
9687             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9688                 return true;
9689
9690         return false;
9691 }
9692
9693 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9694                                enum intel_dpll_id id)
9695 {
9696         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9697         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9698
9699         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9700                 return false;
9701
9702         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9703                 return true;
9704
9705         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9706             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9707             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9708                 return true;
9709
9710         return false;
9711 }
9712
9713 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9714 {
9715         struct intel_encoder *encoder;
9716         bool has_fdi = false;
9717
9718         for_each_intel_encoder(&dev_priv->drm, encoder) {
9719                 switch (encoder->type) {
9720                 case INTEL_OUTPUT_ANALOG:
9721                         has_fdi = true;
9722                         break;
9723                 default:
9724                         break;
9725                 }
9726         }
9727
9728         /*
9729          * The BIOS may have decided to use the PCH SSC
9730          * reference so we must not disable it until the
9731          * relevant PLLs have stopped relying on it. We'll
9732          * just leave the PCH SSC reference enabled in case
9733          * any active PLL is using it. It will get disabled
9734          * after runtime suspend if we don't have FDI.
9735          *
9736          * TODO: Move the whole reference clock handling
9737          * to the modeset sequence proper so that we can
9738          * actually enable/disable/reconfigure these things
9739          * safely. To do that we need to introduce a real
9740          * clock hierarchy. That would also allow us to do
9741          * clock bending finally.
9742          */
9743         dev_priv->pch_ssc_use = 0;
9744
9745         if (spll_uses_pch_ssc(dev_priv)) {
9746                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9747                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9748         }
9749
9750         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9751                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9752                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9753         }
9754
9755         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9756                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9757                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9758         }
9759
9760         if (dev_priv->pch_ssc_use)
9761                 return;
9762
9763         if (has_fdi) {
9764                 lpt_bend_clkout_dp(dev_priv, 0);
9765                 lpt_enable_clkout_dp(dev_priv, true, true);
9766         } else {
9767                 lpt_disable_clkout_dp(dev_priv);
9768         }
9769 }
9770
9771 /*
9772  * Initialize reference clocks when the driver loads
9773  */
9774 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9775 {
9776         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9777                 ilk_init_pch_refclk(dev_priv);
9778         else if (HAS_PCH_LPT(dev_priv))
9779                 lpt_init_pch_refclk(dev_priv);
9780 }
9781
9782 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
9783 {
9784         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9785         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9786         enum pipe pipe = crtc->pipe;
9787         u32 val;
9788
9789         val = 0;
9790
9791         switch (crtc_state->pipe_bpp) {
9792         case 18:
9793                 val |= PIPECONF_6BPC;
9794                 break;
9795         case 24:
9796                 val |= PIPECONF_8BPC;
9797                 break;
9798         case 30:
9799                 val |= PIPECONF_10BPC;
9800                 break;
9801         case 36:
9802                 val |= PIPECONF_12BPC;
9803                 break;
9804         default:
9805                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9806                 BUG();
9807         }
9808
9809         if (crtc_state->dither)
9810                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9811
9812         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9813                 val |= PIPECONF_INTERLACED_ILK;
9814         else
9815                 val |= PIPECONF_PROGRESSIVE;
9816
9817         /*
9818          * This would end up with an odd purple hue over
9819          * the entire display. Make sure we don't do it.
9820          */
9821         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
9822                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9823
9824         if (crtc_state->limited_color_range &&
9825             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
9826                 val |= PIPECONF_COLOR_RANGE_SELECT;
9827
9828         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9829                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9830
9831         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9832
9833         val |= PIPECONF_FRAME_START_DELAY(0);
9834
9835         intel_de_write(dev_priv, PIPECONF(pipe), val);
9836         intel_de_posting_read(dev_priv, PIPECONF(pipe));
9837 }
9838
9839 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
9840 {
9841         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9842         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9843         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9844         u32 val = 0;
9845
9846         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9847                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9848
9849         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9850                 val |= PIPECONF_INTERLACED_ILK;
9851         else
9852                 val |= PIPECONF_PROGRESSIVE;
9853
9854         if (IS_HASWELL(dev_priv) &&
9855             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9856                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9857
9858         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
9859         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
9860 }
9861
9862 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9863 {
9864         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9865         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9866         u32 val = 0;
9867
9868         switch (crtc_state->pipe_bpp) {
9869         case 18:
9870                 val |= PIPEMISC_DITHER_6_BPC;
9871                 break;
9872         case 24:
9873                 val |= PIPEMISC_DITHER_8_BPC;
9874                 break;
9875         case 30:
9876                 val |= PIPEMISC_DITHER_10_BPC;
9877                 break;
9878         case 36:
9879                 val |= PIPEMISC_DITHER_12_BPC;
9880                 break;
9881         default:
9882                 MISSING_CASE(crtc_state->pipe_bpp);
9883                 break;
9884         }
9885
9886         if (crtc_state->dither)
9887                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9888
9889         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9890             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9891                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9892
9893         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9894                 val |= PIPEMISC_YUV420_ENABLE |
9895                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9896
9897         if (INTEL_GEN(dev_priv) >= 11 &&
9898             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9899                                            BIT(PLANE_CURSOR))) == 0)
9900                 val |= PIPEMISC_HDR_MODE_PRECISION;
9901
9902         if (INTEL_GEN(dev_priv) >= 12)
9903                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
9904
9905         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
9906 }
9907
9908 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9909 {
9910         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9911         u32 tmp;
9912
9913         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9914
9915         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9916         case PIPEMISC_DITHER_6_BPC:
9917                 return 18;
9918         case PIPEMISC_DITHER_8_BPC:
9919                 return 24;
9920         case PIPEMISC_DITHER_10_BPC:
9921                 return 30;
9922         case PIPEMISC_DITHER_12_BPC:
9923                 return 36;
9924         default:
9925                 MISSING_CASE(tmp);
9926                 return 0;
9927         }
9928 }
9929
9930 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
9931 {
9932         /*
9933          * Account for spread spectrum to avoid
9934          * oversubscribing the link. Max center spread
9935          * is 2.5%; use 5% for safety's sake.
9936          */
9937         u32 bps = target_clock * bpp * 21 / 20;
9938         return DIV_ROUND_UP(bps, link_bw * 8);
9939 }
9940
9941 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
9942 {
9943         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9944 }
9945
9946 static void ilk_compute_dpll(struct intel_crtc *crtc,
9947                              struct intel_crtc_state *crtc_state,
9948                              struct dpll *reduced_clock)
9949 {
9950         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9951         u32 dpll, fp, fp2;
9952         int factor;
9953
9954         /* Enable autotuning of the PLL clock (if permissible) */
9955         factor = 21;
9956         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9957                 if ((intel_panel_use_ssc(dev_priv) &&
9958                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9959                     (HAS_PCH_IBX(dev_priv) &&
9960                      intel_is_dual_link_lvds(dev_priv)))
9961                         factor = 25;
9962         } else if (crtc_state->sdvo_tv_clock) {
9963                 factor = 20;
9964         }
9965
9966         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9967
9968         if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
9969                 fp |= FP_CB_TUNE;
9970
9971         if (reduced_clock) {
9972                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9973
9974                 if (reduced_clock->m < factor * reduced_clock->n)
9975                         fp2 |= FP_CB_TUNE;
9976         } else {
9977                 fp2 = fp;
9978         }
9979
9980         dpll = 0;
9981
9982         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9983                 dpll |= DPLLB_MODE_LVDS;
9984         else
9985                 dpll |= DPLLB_MODE_DAC_SERIAL;
9986
9987         dpll |= (crtc_state->pixel_multiplier - 1)
9988                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9989
9990         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9991             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9992                 dpll |= DPLL_SDVO_HIGH_SPEED;
9993
9994         if (intel_crtc_has_dp_encoder(crtc_state))
9995                 dpll |= DPLL_SDVO_HIGH_SPEED;
9996
9997         /*
9998          * The high speed IO clock is only really required for
9999          * SDVO/HDMI/DP, but we also enable it for CRT to make it
10000          * possible to share the DPLL between CRT and HDMI. Enabling
10001          * the clock needlessly does no real harm, except use up a
10002          * bit of power potentially.
10003          *
10004          * We'll limit this to IVB with 3 pipes, since it has only two
10005          * DPLLs and so DPLL sharing is the only way to get three pipes
10006          * driving PCH ports at the same time. On SNB we could do this,
10007          * and potentially avoid enabling the second DPLL, but it's not
10008          * clear if it''s a win or loss power wise. No point in doing
10009          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10010          */
10011         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10012             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10013                 dpll |= DPLL_SDVO_HIGH_SPEED;
10014
10015         /* compute bitmask from p1 value */
10016         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10017         /* also FPA1 */
10018         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10019
10020         switch (crtc_state->dpll.p2) {
10021         case 5:
10022                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10023                 break;
10024         case 7:
10025                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10026                 break;
10027         case 10:
10028                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10029                 break;
10030         case 14:
10031                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10032                 break;
10033         }
10034
10035         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10036             intel_panel_use_ssc(dev_priv))
10037                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10038         else
10039                 dpll |= PLL_REF_INPUT_DREFCLK;
10040
10041         dpll |= DPLL_VCO_ENABLE;
10042
10043         crtc_state->dpll_hw_state.dpll = dpll;
10044         crtc_state->dpll_hw_state.fp0 = fp;
10045         crtc_state->dpll_hw_state.fp1 = fp2;
10046 }
10047
10048 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10049                                   struct intel_crtc_state *crtc_state)
10050 {
10051         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10052         struct intel_atomic_state *state =
10053                 to_intel_atomic_state(crtc_state->uapi.state);
10054         const struct intel_limit *limit;
10055         int refclk = 120000;
10056
10057         memset(&crtc_state->dpll_hw_state, 0,
10058                sizeof(crtc_state->dpll_hw_state));
10059
10060         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10061         if (!crtc_state->has_pch_encoder)
10062                 return 0;
10063
10064         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10065                 if (intel_panel_use_ssc(dev_priv)) {
10066                         drm_dbg_kms(&dev_priv->drm,
10067                                     "using SSC reference clock of %d kHz\n",
10068                                     dev_priv->vbt.lvds_ssc_freq);
10069                         refclk = dev_priv->vbt.lvds_ssc_freq;
10070                 }
10071
10072                 if (intel_is_dual_link_lvds(dev_priv)) {
10073                         if (refclk == 100000)
10074                                 limit = &ilk_limits_dual_lvds_100m;
10075                         else
10076                                 limit = &ilk_limits_dual_lvds;
10077                 } else {
10078                         if (refclk == 100000)
10079                                 limit = &ilk_limits_single_lvds_100m;
10080                         else
10081                                 limit = &ilk_limits_single_lvds;
10082                 }
10083         } else {
10084                 limit = &ilk_limits_dac;
10085         }
10086
10087         if (!crtc_state->clock_set &&
10088             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10089                                 refclk, NULL, &crtc_state->dpll)) {
10090                 drm_err(&dev_priv->drm,
10091                         "Couldn't find PLL settings for mode!\n");
10092                 return -EINVAL;
10093         }
10094
10095         ilk_compute_dpll(crtc, crtc_state, NULL);
10096
10097         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10098                 drm_dbg_kms(&dev_priv->drm,
10099                             "failed to find PLL for pipe %c\n",
10100                             pipe_name(crtc->pipe));
10101                 return -EINVAL;
10102         }
10103
10104         return 0;
10105 }
10106
10107 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10108                                          struct intel_link_m_n *m_n)
10109 {
10110         struct drm_device *dev = crtc->base.dev;
10111         struct drm_i915_private *dev_priv = to_i915(dev);
10112         enum pipe pipe = crtc->pipe;
10113
10114         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10115         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10116         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10117                 & ~TU_SIZE_MASK;
10118         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10119         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10120                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10121 }
10122
10123 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10124                                          enum transcoder transcoder,
10125                                          struct intel_link_m_n *m_n,
10126                                          struct intel_link_m_n *m2_n2)
10127 {
10128         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10129         enum pipe pipe = crtc->pipe;
10130
10131         if (INTEL_GEN(dev_priv) >= 5) {
10132                 m_n->link_m = intel_de_read(dev_priv,
10133                                             PIPE_LINK_M1(transcoder));
10134                 m_n->link_n = intel_de_read(dev_priv,
10135                                             PIPE_LINK_N1(transcoder));
10136                 m_n->gmch_m = intel_de_read(dev_priv,
10137                                             PIPE_DATA_M1(transcoder))
10138                         & ~TU_SIZE_MASK;
10139                 m_n->gmch_n = intel_de_read(dev_priv,
10140                                             PIPE_DATA_N1(transcoder));
10141                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10142                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10143
10144                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10145                         m2_n2->link_m = intel_de_read(dev_priv,
10146                                                       PIPE_LINK_M2(transcoder));
10147                         m2_n2->link_n = intel_de_read(dev_priv,
10148                                                              PIPE_LINK_N2(transcoder));
10149                         m2_n2->gmch_m = intel_de_read(dev_priv,
10150                                                              PIPE_DATA_M2(transcoder))
10151                                         & ~TU_SIZE_MASK;
10152                         m2_n2->gmch_n = intel_de_read(dev_priv,
10153                                                              PIPE_DATA_N2(transcoder));
10154                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10155                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10156                 }
10157         } else {
10158                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10159                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10160                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10161                         & ~TU_SIZE_MASK;
10162                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10163                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10164                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10165         }
10166 }
10167
10168 void intel_dp_get_m_n(struct intel_crtc *crtc,
10169                       struct intel_crtc_state *pipe_config)
10170 {
10171         if (pipe_config->has_pch_encoder)
10172                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10173         else
10174                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10175                                              &pipe_config->dp_m_n,
10176                                              &pipe_config->dp_m2_n2);
10177 }
10178
10179 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10180                                    struct intel_crtc_state *pipe_config)
10181 {
10182         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10183                                      &pipe_config->fdi_m_n, NULL);
10184 }
10185
10186 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10187                                   u32 pos, u32 size)
10188 {
10189         drm_rect_init(&crtc_state->pch_pfit.dst,
10190                       pos >> 16, pos & 0xffff,
10191                       size >> 16, size & 0xffff);
10192 }
10193
10194 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10195 {
10196         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10197         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10198         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10199         int id = -1;
10200         int i;
10201
10202         /* find scaler attached to this pipe */
10203         for (i = 0; i < crtc->num_scalers; i++) {
10204                 u32 ctl, pos, size;
10205
10206                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10207                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10208                         continue;
10209
10210                 id = i;
10211                 crtc_state->pch_pfit.enabled = true;
10212
10213                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10214                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10215
10216                 ilk_get_pfit_pos_size(crtc_state, pos, size);
10217
10218                 scaler_state->scalers[i].in_use = true;
10219                 break;
10220         }
10221
10222         scaler_state->scaler_id = id;
10223         if (id >= 0)
10224                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10225         else
10226                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10227 }
10228
10229 static void
10230 skl_get_initial_plane_config(struct intel_crtc *crtc,
10231                              struct intel_initial_plane_config *plane_config)
10232 {
10233         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10234         struct drm_device *dev = crtc->base.dev;
10235         struct drm_i915_private *dev_priv = to_i915(dev);
10236         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10237         enum plane_id plane_id = plane->id;
10238         enum pipe pipe;
10239         u32 val, base, offset, stride_mult, tiling, alpha;
10240         int fourcc, pixel_format;
10241         unsigned int aligned_height;
10242         struct drm_framebuffer *fb;
10243         struct intel_framebuffer *intel_fb;
10244
10245         if (!plane->get_hw_state(plane, &pipe))
10246                 return;
10247
10248         drm_WARN_ON(dev, pipe != crtc->pipe);
10249
10250         if (crtc_state->bigjoiner) {
10251                 drm_dbg_kms(&dev_priv->drm,
10252                             "Unsupported bigjoiner configuration for initial FB\n");
10253                 return;
10254         }
10255
10256         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10257         if (!intel_fb) {
10258                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10259                 return;
10260         }
10261
10262         fb = &intel_fb->base;
10263
10264         fb->dev = dev;
10265
10266         val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10267
10268         if (INTEL_GEN(dev_priv) >= 11)
10269                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10270         else
10271                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10272
10273         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10274                 alpha = intel_de_read(dev_priv,
10275                                       PLANE_COLOR_CTL(pipe, plane_id));
10276                 alpha &= PLANE_COLOR_ALPHA_MASK;
10277         } else {
10278                 alpha = val & PLANE_CTL_ALPHA_MASK;
10279         }
10280
10281         fourcc = skl_format_to_fourcc(pixel_format,
10282                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10283         fb->format = drm_format_info(fourcc);
10284
10285         tiling = val & PLANE_CTL_TILED_MASK;
10286         switch (tiling) {
10287         case PLANE_CTL_TILED_LINEAR:
10288                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10289                 break;
10290         case PLANE_CTL_TILED_X:
10291                 plane_config->tiling = I915_TILING_X;
10292                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10293                 break;
10294         case PLANE_CTL_TILED_Y:
10295                 plane_config->tiling = I915_TILING_Y;
10296                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10297                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10298                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10299                                 I915_FORMAT_MOD_Y_TILED_CCS;
10300                 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10301                         fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10302                 else
10303                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10304                 break;
10305         case PLANE_CTL_TILED_YF:
10306                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10307                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10308                 else
10309                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10310                 break;
10311         default:
10312                 MISSING_CASE(tiling);
10313                 goto error;
10314         }
10315
10316         /*
10317          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10318          * while i915 HW rotation is clockwise, thats why this swapping.
10319          */
10320         switch (val & PLANE_CTL_ROTATE_MASK) {
10321         case PLANE_CTL_ROTATE_0:
10322                 plane_config->rotation = DRM_MODE_ROTATE_0;
10323                 break;
10324         case PLANE_CTL_ROTATE_90:
10325                 plane_config->rotation = DRM_MODE_ROTATE_270;
10326                 break;
10327         case PLANE_CTL_ROTATE_180:
10328                 plane_config->rotation = DRM_MODE_ROTATE_180;
10329                 break;
10330         case PLANE_CTL_ROTATE_270:
10331                 plane_config->rotation = DRM_MODE_ROTATE_90;
10332                 break;
10333         }
10334
10335         if (INTEL_GEN(dev_priv) >= 10 &&
10336             val & PLANE_CTL_FLIP_HORIZONTAL)
10337                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10338
10339         /* 90/270 degree rotation would require extra work */
10340         if (drm_rotation_90_or_270(plane_config->rotation))
10341                 goto error;
10342
10343         base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10344         plane_config->base = base;
10345
10346         offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10347
10348         val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10349         fb->height = ((val >> 16) & 0xffff) + 1;
10350         fb->width = ((val >> 0) & 0xffff) + 1;
10351
10352         val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10353         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10354         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10355
10356         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10357
10358         plane_config->size = fb->pitches[0] * aligned_height;
10359
10360         drm_dbg_kms(&dev_priv->drm,
10361                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10362                     crtc->base.name, plane->base.name, fb->width, fb->height,
10363                     fb->format->cpp[0] * 8, base, fb->pitches[0],
10364                     plane_config->size);
10365
10366         plane_config->fb = intel_fb;
10367         return;
10368
10369 error:
10370         kfree(intel_fb);
10371 }
10372
10373 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10374 {
10375         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10376         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10377         u32 ctl, pos, size;
10378
10379         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10380         if ((ctl & PF_ENABLE) == 0)
10381                 return;
10382
10383         crtc_state->pch_pfit.enabled = true;
10384
10385         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10386         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10387
10388         ilk_get_pfit_pos_size(crtc_state, pos, size);
10389
10390         /*
10391          * We currently do not free assignements of panel fitters on
10392          * ivb/hsw (since we don't use the higher upscaling modes which
10393          * differentiates them) so just WARN about this case for now.
10394          */
10395         drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10396                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10397 }
10398
10399 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10400                                 struct intel_crtc_state *pipe_config)
10401 {
10402         struct drm_device *dev = crtc->base.dev;
10403         struct drm_i915_private *dev_priv = to_i915(dev);
10404         enum intel_display_power_domain power_domain;
10405         intel_wakeref_t wakeref;
10406         u32 tmp;
10407         bool ret;
10408
10409         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10410         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10411         if (!wakeref)
10412                 return false;
10413
10414         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10415         pipe_config->shared_dpll = NULL;
10416
10417         ret = false;
10418         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10419         if (!(tmp & PIPECONF_ENABLE))
10420                 goto out;
10421
10422         switch (tmp & PIPECONF_BPC_MASK) {
10423         case PIPECONF_6BPC:
10424                 pipe_config->pipe_bpp = 18;
10425                 break;
10426         case PIPECONF_8BPC:
10427                 pipe_config->pipe_bpp = 24;
10428                 break;
10429         case PIPECONF_10BPC:
10430                 pipe_config->pipe_bpp = 30;
10431                 break;
10432         case PIPECONF_12BPC:
10433                 pipe_config->pipe_bpp = 36;
10434                 break;
10435         default:
10436                 break;
10437         }
10438
10439         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10440                 pipe_config->limited_color_range = true;
10441
10442         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10443         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10444         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10445                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10446                 break;
10447         default:
10448                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10449                 break;
10450         }
10451
10452         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10453                 PIPECONF_GAMMA_MODE_SHIFT;
10454
10455         pipe_config->csc_mode = intel_de_read(dev_priv,
10456                                               PIPE_CSC_MODE(crtc->pipe));
10457
10458         i9xx_get_pipe_color_config(pipe_config);
10459         intel_color_get_config(pipe_config);
10460
10461         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10462                 struct intel_shared_dpll *pll;
10463                 enum intel_dpll_id pll_id;
10464                 bool pll_active;
10465
10466                 pipe_config->has_pch_encoder = true;
10467
10468                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10469                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10470                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10471
10472                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10473
10474                 if (HAS_PCH_IBX(dev_priv)) {
10475                         /*
10476                          * The pipe->pch transcoder and pch transcoder->pll
10477                          * mapping is fixed.
10478                          */
10479                         pll_id = (enum intel_dpll_id) crtc->pipe;
10480                 } else {
10481                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10482                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10483                                 pll_id = DPLL_ID_PCH_PLL_B;
10484                         else
10485                                 pll_id= DPLL_ID_PCH_PLL_A;
10486                 }
10487
10488                 pipe_config->shared_dpll =
10489                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10490                 pll = pipe_config->shared_dpll;
10491
10492                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10493                                                      &pipe_config->dpll_hw_state);
10494                 drm_WARN_ON(dev, !pll_active);
10495
10496                 tmp = pipe_config->dpll_hw_state.dpll;
10497                 pipe_config->pixel_multiplier =
10498                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10499                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10500
10501                 ilk_pch_clock_get(crtc, pipe_config);
10502         } else {
10503                 pipe_config->pixel_multiplier = 1;
10504         }
10505
10506         intel_get_transcoder_timings(crtc, pipe_config);
10507         intel_get_pipe_src_size(crtc, pipe_config);
10508
10509         ilk_get_pfit_config(pipe_config);
10510
10511         ret = true;
10512
10513 out:
10514         intel_display_power_put(dev_priv, power_domain, wakeref);
10515
10516         return ret;
10517 }
10518
10519 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10520                                   struct intel_crtc_state *crtc_state)
10521 {
10522         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10523         struct intel_atomic_state *state =
10524                 to_intel_atomic_state(crtc_state->uapi.state);
10525
10526         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10527             INTEL_GEN(dev_priv) >= 11) {
10528                 struct intel_encoder *encoder =
10529                         intel_get_crtc_new_encoder(state, crtc_state);
10530
10531                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10532                         drm_dbg_kms(&dev_priv->drm,
10533                                     "failed to find PLL for pipe %c\n",
10534                                     pipe_name(crtc->pipe));
10535                         return -EINVAL;
10536                 }
10537         }
10538
10539         return 0;
10540 }
10541
10542 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10543                             struct intel_crtc_state *pipe_config)
10544 {
10545         enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10546         enum phy phy = intel_port_to_phy(dev_priv, port);
10547         struct icl_port_dpll *port_dpll;
10548         struct intel_shared_dpll *pll;
10549         enum intel_dpll_id id;
10550         bool pll_active;
10551         u32 clk_sel;
10552
10553         clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10554         id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
10555
10556         if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
10557                 return;
10558
10559         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10560         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
10561
10562         port_dpll->pll = pll;
10563         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10564                                              &port_dpll->hw_state);
10565         drm_WARN_ON(&dev_priv->drm, !pll_active);
10566
10567         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10568 }
10569
10570 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10571                             struct intel_crtc_state *pipe_config)
10572 {
10573         enum phy phy = intel_port_to_phy(dev_priv, port);
10574         enum icl_port_dpll_id port_dpll_id;
10575         struct icl_port_dpll *port_dpll;
10576         struct intel_shared_dpll *pll;
10577         enum intel_dpll_id id;
10578         bool pll_active;
10579         u32 temp;
10580
10581         if (intel_phy_is_combo(dev_priv, phy)) {
10582                 u32 mask, shift;
10583
10584                 if (IS_ROCKETLAKE(dev_priv)) {
10585                         mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10586                         shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10587                 } else {
10588                         mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10589                         shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10590                 }
10591
10592                 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
10593                 id = temp >> shift;
10594                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10595         } else if (intel_phy_is_tc(dev_priv, phy)) {
10596                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10597
10598                 if (clk_sel == DDI_CLK_SEL_MG) {
10599                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10600                                                                     port));
10601                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10602                 } else {
10603                         drm_WARN_ON(&dev_priv->drm,
10604                                     clk_sel < DDI_CLK_SEL_TBT_162);
10605                         id = DPLL_ID_ICL_TBTPLL;
10606                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10607                 }
10608         } else {
10609                 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10610                 return;
10611         }
10612
10613         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10614         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
10615
10616         port_dpll->pll = pll;
10617         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10618                                              &port_dpll->hw_state);
10619         drm_WARN_ON(&dev_priv->drm, !pll_active);
10620
10621         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10622 }
10623
10624 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10625                             struct intel_crtc_state *pipe_config)
10626 {
10627         struct intel_shared_dpll *pll;
10628         enum intel_dpll_id id;
10629         bool pll_active;
10630         u32 temp;
10631
10632         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10633         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10634
10635         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10636                 return;
10637
10638         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10639
10640         pipe_config->shared_dpll = pll;
10641         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10642                                              &pipe_config->dpll_hw_state);
10643         drm_WARN_ON(&dev_priv->drm, !pll_active);
10644 }
10645
10646 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10647                                 enum port port,
10648                                 struct intel_crtc_state *pipe_config)
10649 {
10650         struct intel_shared_dpll *pll;
10651         enum intel_dpll_id id;
10652         bool pll_active;
10653
10654         switch (port) {
10655         case PORT_A:
10656                 id = DPLL_ID_SKL_DPLL0;
10657                 break;
10658         case PORT_B:
10659                 id = DPLL_ID_SKL_DPLL1;
10660                 break;
10661         case PORT_C:
10662                 id = DPLL_ID_SKL_DPLL2;
10663                 break;
10664         default:
10665                 drm_err(&dev_priv->drm, "Incorrect port type\n");
10666                 return;
10667         }
10668
10669         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10670
10671         pipe_config->shared_dpll = pll;
10672         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10673                                              &pipe_config->dpll_hw_state);
10674         drm_WARN_ON(&dev_priv->drm, !pll_active);
10675 }
10676
10677 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10678                             struct intel_crtc_state *pipe_config)
10679 {
10680         struct intel_shared_dpll *pll;
10681         enum intel_dpll_id id;
10682         bool pll_active;
10683         u32 temp;
10684
10685         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10686         id = temp >> (port * 3 + 1);
10687
10688         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10689                 return;
10690
10691         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10692
10693         pipe_config->shared_dpll = pll;
10694         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10695                                              &pipe_config->dpll_hw_state);
10696         drm_WARN_ON(&dev_priv->drm, !pll_active);
10697 }
10698
10699 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10700                             struct intel_crtc_state *pipe_config)
10701 {
10702         struct intel_shared_dpll *pll;
10703         enum intel_dpll_id id;
10704         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10705         bool pll_active;
10706
10707         switch (ddi_pll_sel) {
10708         case PORT_CLK_SEL_WRPLL1:
10709                 id = DPLL_ID_WRPLL1;
10710                 break;
10711         case PORT_CLK_SEL_WRPLL2:
10712                 id = DPLL_ID_WRPLL2;
10713                 break;
10714         case PORT_CLK_SEL_SPLL:
10715                 id = DPLL_ID_SPLL;
10716                 break;
10717         case PORT_CLK_SEL_LCPLL_810:
10718                 id = DPLL_ID_LCPLL_810;
10719                 break;
10720         case PORT_CLK_SEL_LCPLL_1350:
10721                 id = DPLL_ID_LCPLL_1350;
10722                 break;
10723         case PORT_CLK_SEL_LCPLL_2700:
10724                 id = DPLL_ID_LCPLL_2700;
10725                 break;
10726         default:
10727                 MISSING_CASE(ddi_pll_sel);
10728                 fallthrough;
10729         case PORT_CLK_SEL_NONE:
10730                 return;
10731         }
10732
10733         pll = intel_get_shared_dpll_by_id(dev_priv, id);
10734
10735         pipe_config->shared_dpll = pll;
10736         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10737                                              &pipe_config->dpll_hw_state);
10738         drm_WARN_ON(&dev_priv->drm, !pll_active);
10739 }
10740
10741 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10742                                      struct intel_crtc_state *pipe_config,
10743                                      struct intel_display_power_domain_set *power_domain_set)
10744 {
10745         struct drm_device *dev = crtc->base.dev;
10746         struct drm_i915_private *dev_priv = to_i915(dev);
10747         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
10748         unsigned long enabled_panel_transcoders = 0;
10749         enum transcoder panel_transcoder;
10750         u32 tmp;
10751
10752         if (INTEL_GEN(dev_priv) >= 11)
10753                 panel_transcoder_mask |=
10754                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10755
10756         /*
10757          * The pipe->transcoder mapping is fixed with the exception of the eDP
10758          * and DSI transcoders handled below.
10759          */
10760         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10761
10762         /*
10763          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10764          * consistency and less surprising code; it's in always on power).
10765          */
10766         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
10767                                        panel_transcoder_mask) {
10768                 bool force_thru = false;
10769                 enum pipe trans_pipe;
10770
10771                 tmp = intel_de_read(dev_priv,
10772                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
10773                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10774                         continue;
10775
10776                 /*
10777                  * Log all enabled ones, only use the first one.
10778                  *
10779                  * FIXME: This won't work for two separate DSI displays.
10780                  */
10781                 enabled_panel_transcoders |= BIT(panel_transcoder);
10782                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10783                         continue;
10784
10785                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10786                 default:
10787                         drm_WARN(dev, 1,
10788                                  "unknown pipe linked to transcoder %s\n",
10789                                  transcoder_name(panel_transcoder));
10790                         fallthrough;
10791                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10792                         force_thru = true;
10793                         fallthrough;
10794                 case TRANS_DDI_EDP_INPUT_A_ON:
10795                         trans_pipe = PIPE_A;
10796                         break;
10797                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10798                         trans_pipe = PIPE_B;
10799                         break;
10800                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10801                         trans_pipe = PIPE_C;
10802                         break;
10803                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10804                         trans_pipe = PIPE_D;
10805                         break;
10806                 }
10807
10808                 if (trans_pipe == crtc->pipe) {
10809                         pipe_config->cpu_transcoder = panel_transcoder;
10810                         pipe_config->pch_pfit.force_thru = force_thru;
10811                 }
10812         }
10813
10814         /*
10815          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10816          */
10817         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10818                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10819
10820         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
10821                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
10822                 return false;
10823
10824         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10825
10826         return tmp & PIPECONF_ENABLE;
10827 }
10828
10829 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10830                                          struct intel_crtc_state *pipe_config,
10831                                          struct intel_display_power_domain_set *power_domain_set)
10832 {
10833         struct drm_device *dev = crtc->base.dev;
10834         struct drm_i915_private *dev_priv = to_i915(dev);
10835         enum transcoder cpu_transcoder;
10836         enum port port;
10837         u32 tmp;
10838
10839         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10840                 if (port == PORT_A)
10841                         cpu_transcoder = TRANSCODER_DSI_A;
10842                 else
10843                         cpu_transcoder = TRANSCODER_DSI_C;
10844
10845                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
10846                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
10847                         continue;
10848
10849                 /*
10850                  * The PLL needs to be enabled with a valid divider
10851                  * configuration, otherwise accessing DSI registers will hang
10852                  * the machine. See BSpec North Display Engine
10853                  * registers/MIPI[BXT]. We can break out here early, since we
10854                  * need the same DSI PLL to be enabled for both DSI ports.
10855                  */
10856                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10857                         break;
10858
10859                 /* XXX: this works for video mode only */
10860                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
10861                 if (!(tmp & DPI_ENABLE))
10862                         continue;
10863
10864                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
10865                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10866                         continue;
10867
10868                 pipe_config->cpu_transcoder = cpu_transcoder;
10869                 break;
10870         }
10871
10872         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10873 }
10874
10875 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
10876                                    struct intel_crtc_state *pipe_config)
10877 {
10878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10879         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10880         enum port port;
10881         u32 tmp;
10882
10883         if (transcoder_is_dsi(cpu_transcoder)) {
10884                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10885                                                 PORT_A : PORT_B;
10886         } else {
10887                 tmp = intel_de_read(dev_priv,
10888                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
10889                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10890                         return;
10891                 if (INTEL_GEN(dev_priv) >= 12)
10892                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10893                 else
10894                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10895         }
10896
10897         if (IS_DG1(dev_priv))
10898                 dg1_get_ddi_pll(dev_priv, port, pipe_config);
10899         else if (INTEL_GEN(dev_priv) >= 11)
10900                 icl_get_ddi_pll(dev_priv, port, pipe_config);
10901         else if (IS_CANNONLAKE(dev_priv))
10902                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
10903         else if (IS_GEN9_LP(dev_priv))
10904                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10905         else if (IS_GEN9_BC(dev_priv))
10906                 skl_get_ddi_pll(dev_priv, port, pipe_config);
10907         else
10908                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
10909
10910         /*
10911          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10912          * DDI E. So just check whether this pipe is wired to DDI E and whether
10913          * the PCH transcoder is on.
10914          */
10915         if (INTEL_GEN(dev_priv) < 9 &&
10916             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
10917                 pipe_config->has_pch_encoder = true;
10918
10919                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
10920                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10921                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10922
10923                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10924         }
10925 }
10926
10927 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
10928                                 struct intel_crtc_state *pipe_config)
10929 {
10930         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10931         struct intel_display_power_domain_set power_domain_set = { };
10932         bool active;
10933         u32 tmp;
10934
10935         pipe_config->master_transcoder = INVALID_TRANSCODER;
10936
10937         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
10938                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
10939                 return false;
10940
10941         pipe_config->shared_dpll = NULL;
10942
10943         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
10944
10945         if (IS_GEN9_LP(dev_priv) &&
10946             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
10947                 drm_WARN_ON(&dev_priv->drm, active);
10948                 active = true;
10949         }
10950
10951         intel_dsc_get_config(pipe_config);
10952
10953         if (!active) {
10954                 /* bigjoiner slave doesn't enable transcoder */
10955                 if (!pipe_config->bigjoiner_slave)
10956                         goto out;
10957
10958                 active = true;
10959                 pipe_config->pixel_multiplier = 1;
10960
10961                 /* we cannot read out most state, so don't bother.. */
10962                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
10963         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10964             INTEL_GEN(dev_priv) >= 11) {
10965                 hsw_get_ddi_port_state(crtc, pipe_config);
10966                 intel_get_transcoder_timings(crtc, pipe_config);
10967         }
10968
10969         intel_get_pipe_src_size(crtc, pipe_config);
10970
10971         if (IS_HASWELL(dev_priv)) {
10972                 u32 tmp = intel_de_read(dev_priv,
10973                                         PIPECONF(pipe_config->cpu_transcoder));
10974
10975                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10976                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10977                 else
10978                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10979         } else {
10980                 pipe_config->output_format =
10981                         bdw_get_pipemisc_output_format(crtc);
10982         }
10983
10984         pipe_config->gamma_mode = intel_de_read(dev_priv,
10985                                                 GAMMA_MODE(crtc->pipe));
10986
10987         pipe_config->csc_mode = intel_de_read(dev_priv,
10988                                               PIPE_CSC_MODE(crtc->pipe));
10989
10990         if (INTEL_GEN(dev_priv) >= 9) {
10991                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
10992
10993                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10994                         pipe_config->gamma_enable = true;
10995
10996                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10997                         pipe_config->csc_enable = true;
10998         } else {
10999                 i9xx_get_pipe_color_config(pipe_config);
11000         }
11001
11002         intel_color_get_config(pipe_config);
11003
11004         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11005         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11006         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11007                 pipe_config->ips_linetime =
11008                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11009
11010         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
11011                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
11012                 if (INTEL_GEN(dev_priv) >= 9)
11013                         skl_get_pfit_config(pipe_config);
11014                 else
11015                         ilk_get_pfit_config(pipe_config);
11016         }
11017
11018         if (hsw_crtc_supports_ips(crtc)) {
11019                 if (IS_HASWELL(dev_priv))
11020                         pipe_config->ips_enabled = intel_de_read(dev_priv,
11021                                                                  IPS_CTL) & IPS_ENABLE;
11022                 else {
11023                         /*
11024                          * We cannot readout IPS state on broadwell, set to
11025                          * true so we can set it to a defined state on first
11026                          * commit.
11027                          */
11028                         pipe_config->ips_enabled = true;
11029                 }
11030         }
11031
11032         if (pipe_config->bigjoiner_slave) {
11033                 /* Cannot be read out as a slave, set to 0. */
11034                 pipe_config->pixel_multiplier = 0;
11035         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11036             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11037                 pipe_config->pixel_multiplier =
11038                         intel_de_read(dev_priv,
11039                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11040         } else {
11041                 pipe_config->pixel_multiplier = 1;
11042         }
11043
11044 out:
11045         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
11046
11047         return active;
11048 }
11049
11050 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
11051 {
11052         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11053         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
11054
11055         if (!i915->display.get_pipe_config(crtc, crtc_state))
11056                 return false;
11057
11058         crtc_state->hw.active = true;
11059
11060         intel_crtc_readout_derived_state(crtc_state);
11061
11062         return true;
11063 }
11064
11065 /* VESA 640x480x72Hz mode to set on the pipe */
11066 static const struct drm_display_mode load_detect_mode = {
11067         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11068                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11069 };
11070
11071 struct drm_framebuffer *
11072 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11073                          struct drm_mode_fb_cmd2 *mode_cmd)
11074 {
11075         struct intel_framebuffer *intel_fb;
11076         int ret;
11077
11078         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11079         if (!intel_fb)
11080                 return ERR_PTR(-ENOMEM);
11081
11082         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11083         if (ret)
11084                 goto err;
11085
11086         return &intel_fb->base;
11087
11088 err:
11089         kfree(intel_fb);
11090         return ERR_PTR(ret);
11091 }
11092
11093 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11094                                         struct drm_crtc *crtc)
11095 {
11096         struct drm_plane *plane;
11097         struct drm_plane_state *plane_state;
11098         int ret, i;
11099
11100         ret = drm_atomic_add_affected_planes(state, crtc);
11101         if (ret)
11102                 return ret;
11103
11104         for_each_new_plane_in_state(state, plane, plane_state, i) {
11105                 if (plane_state->crtc != crtc)
11106                         continue;
11107
11108                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11109                 if (ret)
11110                         return ret;
11111
11112                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11113         }
11114
11115         return 0;
11116 }
11117
11118 int intel_get_load_detect_pipe(struct drm_connector *connector,
11119                                struct intel_load_detect_pipe *old,
11120                                struct drm_modeset_acquire_ctx *ctx)
11121 {
11122         struct intel_crtc *intel_crtc;
11123         struct intel_encoder *intel_encoder =
11124                 intel_attached_encoder(to_intel_connector(connector));
11125         struct drm_crtc *possible_crtc;
11126         struct drm_encoder *encoder = &intel_encoder->base;
11127         struct drm_crtc *crtc = NULL;
11128         struct drm_device *dev = encoder->dev;
11129         struct drm_i915_private *dev_priv = to_i915(dev);
11130         struct drm_mode_config *config = &dev->mode_config;
11131         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11132         struct drm_connector_state *connector_state;
11133         struct intel_crtc_state *crtc_state;
11134         int ret, i = -1;
11135
11136         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11137                     connector->base.id, connector->name,
11138                     encoder->base.id, encoder->name);
11139
11140         old->restore_state = NULL;
11141
11142         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11143
11144         /*
11145          * Algorithm gets a little messy:
11146          *
11147          *   - if the connector already has an assigned crtc, use it (but make
11148          *     sure it's on first)
11149          *
11150          *   - try to find the first unused crtc that can drive this connector,
11151          *     and use that if we find one
11152          */
11153
11154         /* See if we already have a CRTC for this connector */
11155         if (connector->state->crtc) {
11156                 crtc = connector->state->crtc;
11157
11158                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11159                 if (ret)
11160                         goto fail;
11161
11162                 /* Make sure the crtc and connector are running */
11163                 goto found;
11164         }
11165
11166         /* Find an unused one (if possible) */
11167         for_each_crtc(dev, possible_crtc) {
11168                 i++;
11169                 if (!(encoder->possible_crtcs & (1 << i)))
11170                         continue;
11171
11172                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11173                 if (ret)
11174                         goto fail;
11175
11176                 if (possible_crtc->state->enable) {
11177                         drm_modeset_unlock(&possible_crtc->mutex);
11178                         continue;
11179                 }
11180
11181                 crtc = possible_crtc;
11182                 break;
11183         }
11184
11185         /*
11186          * If we didn't find an unused CRTC, don't use any.
11187          */
11188         if (!crtc) {
11189                 drm_dbg_kms(&dev_priv->drm,
11190                             "no pipe available for load-detect\n");
11191                 ret = -ENODEV;
11192                 goto fail;
11193         }
11194
11195 found:
11196         intel_crtc = to_intel_crtc(crtc);
11197
11198         state = drm_atomic_state_alloc(dev);
11199         restore_state = drm_atomic_state_alloc(dev);
11200         if (!state || !restore_state) {
11201                 ret = -ENOMEM;
11202                 goto fail;
11203         }
11204
11205         state->acquire_ctx = ctx;
11206         restore_state->acquire_ctx = ctx;
11207
11208         connector_state = drm_atomic_get_connector_state(state, connector);
11209         if (IS_ERR(connector_state)) {
11210                 ret = PTR_ERR(connector_state);
11211                 goto fail;
11212         }
11213
11214         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11215         if (ret)
11216                 goto fail;
11217
11218         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11219         if (IS_ERR(crtc_state)) {
11220                 ret = PTR_ERR(crtc_state);
11221                 goto fail;
11222         }
11223
11224         crtc_state->uapi.active = true;
11225
11226         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11227                                            &load_detect_mode);
11228         if (ret)
11229                 goto fail;
11230
11231         ret = intel_modeset_disable_planes(state, crtc);
11232         if (ret)
11233                 goto fail;
11234
11235         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11236         if (!ret)
11237                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11238         if (!ret)
11239                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11240         if (ret) {
11241                 drm_dbg_kms(&dev_priv->drm,
11242                             "Failed to create a copy of old state to restore: %i\n",
11243                             ret);
11244                 goto fail;
11245         }
11246
11247         ret = drm_atomic_commit(state);
11248         if (ret) {
11249                 drm_dbg_kms(&dev_priv->drm,
11250                             "failed to set mode on load-detect pipe\n");
11251                 goto fail;
11252         }
11253
11254         old->restore_state = restore_state;
11255         drm_atomic_state_put(state);
11256
11257         /* let the connector get through one full cycle before testing */
11258         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11259         return true;
11260
11261 fail:
11262         if (state) {
11263                 drm_atomic_state_put(state);
11264                 state = NULL;
11265         }
11266         if (restore_state) {
11267                 drm_atomic_state_put(restore_state);
11268                 restore_state = NULL;
11269         }
11270
11271         if (ret == -EDEADLK)
11272                 return ret;
11273
11274         return false;
11275 }
11276
11277 void intel_release_load_detect_pipe(struct drm_connector *connector,
11278                                     struct intel_load_detect_pipe *old,
11279                                     struct drm_modeset_acquire_ctx *ctx)
11280 {
11281         struct intel_encoder *intel_encoder =
11282                 intel_attached_encoder(to_intel_connector(connector));
11283         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
11284         struct drm_encoder *encoder = &intel_encoder->base;
11285         struct drm_atomic_state *state = old->restore_state;
11286         int ret;
11287
11288         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11289                     connector->base.id, connector->name,
11290                     encoder->base.id, encoder->name);
11291
11292         if (!state)
11293                 return;
11294
11295         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11296         if (ret)
11297                 drm_dbg_kms(&i915->drm,
11298                             "Couldn't release load detect pipe: %i\n", ret);
11299         drm_atomic_state_put(state);
11300 }
11301
11302 static int i9xx_pll_refclk(struct drm_device *dev,
11303                            const struct intel_crtc_state *pipe_config)
11304 {
11305         struct drm_i915_private *dev_priv = to_i915(dev);
11306         u32 dpll = pipe_config->dpll_hw_state.dpll;
11307
11308         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11309                 return dev_priv->vbt.lvds_ssc_freq;
11310         else if (HAS_PCH_SPLIT(dev_priv))
11311                 return 120000;
11312         else if (!IS_GEN(dev_priv, 2))
11313                 return 96000;
11314         else
11315                 return 48000;
11316 }
11317
11318 /* Returns the clock of the currently programmed mode of the given pipe. */
11319 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11320                                 struct intel_crtc_state *pipe_config)
11321 {
11322         struct drm_device *dev = crtc->base.dev;
11323         struct drm_i915_private *dev_priv = to_i915(dev);
11324         enum pipe pipe = crtc->pipe;
11325         u32 dpll = pipe_config->dpll_hw_state.dpll;
11326         u32 fp;
11327         struct dpll clock;
11328         int port_clock;
11329         int refclk = i9xx_pll_refclk(dev, pipe_config);
11330
11331         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11332                 fp = pipe_config->dpll_hw_state.fp0;
11333         else
11334                 fp = pipe_config->dpll_hw_state.fp1;
11335
11336         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11337         if (IS_PINEVIEW(dev_priv)) {
11338                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11339                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11340         } else {
11341                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11342                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11343         }
11344
11345         if (!IS_GEN(dev_priv, 2)) {
11346                 if (IS_PINEVIEW(dev_priv))
11347                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11348                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11349                 else
11350                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11351                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11352
11353                 switch (dpll & DPLL_MODE_MASK) {
11354                 case DPLLB_MODE_DAC_SERIAL:
11355                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11356                                 5 : 10;
11357                         break;
11358                 case DPLLB_MODE_LVDS:
11359                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11360                                 7 : 14;
11361                         break;
11362                 default:
11363                         drm_dbg_kms(&dev_priv->drm,
11364                                     "Unknown DPLL mode %08x in programmed "
11365                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
11366                         return;
11367                 }
11368
11369                 if (IS_PINEVIEW(dev_priv))
11370                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11371                 else
11372                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11373         } else {
11374                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
11375                                                                  LVDS);
11376                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11377
11378                 if (is_lvds) {
11379                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11380                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11381
11382                         if (lvds & LVDS_CLKB_POWER_UP)
11383                                 clock.p2 = 7;
11384                         else
11385                                 clock.p2 = 14;
11386                 } else {
11387                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11388                                 clock.p1 = 2;
11389                         else {
11390                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11391                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11392                         }
11393                         if (dpll & PLL_P2_DIVIDE_BY_4)
11394                                 clock.p2 = 4;
11395                         else
11396                                 clock.p2 = 2;
11397                 }
11398
11399                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11400         }
11401
11402         /*
11403          * This value includes pixel_multiplier. We will use
11404          * port_clock to compute adjusted_mode.crtc_clock in the
11405          * encoder's get_config() function.
11406          */
11407         pipe_config->port_clock = port_clock;
11408 }
11409
11410 int intel_dotclock_calculate(int link_freq,
11411                              const struct intel_link_m_n *m_n)
11412 {
11413         /*
11414          * The calculation for the data clock is:
11415          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11416          * But we want to avoid losing precison if possible, so:
11417          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11418          *
11419          * and the link clock is simpler:
11420          * link_clock = (m * link_clock) / n
11421          */
11422
11423         if (!m_n->link_n)
11424                 return 0;
11425
11426         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11427 }
11428
11429 static void ilk_pch_clock_get(struct intel_crtc *crtc,
11430                               struct intel_crtc_state *pipe_config)
11431 {
11432         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11433
11434         /* read out port_clock from the DPLL */
11435         i9xx_crtc_clock_get(crtc, pipe_config);
11436
11437         /*
11438          * In case there is an active pipe without active ports,
11439          * we may need some idea for the dotclock anyway.
11440          * Calculate one based on the FDI configuration.
11441          */
11442         pipe_config->hw.adjusted_mode.crtc_clock =
11443                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11444                                          &pipe_config->fdi_m_n);
11445 }
11446
11447 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
11448                                    struct intel_crtc *crtc)
11449 {
11450         memset(crtc_state, 0, sizeof(*crtc_state));
11451
11452         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
11453
11454         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
11455         crtc_state->master_transcoder = INVALID_TRANSCODER;
11456         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11457         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
11458         crtc_state->scaler_state.scaler_id = -1;
11459         crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
11460 }
11461
11462 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
11463 {
11464         struct intel_crtc_state *crtc_state;
11465
11466         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
11467
11468         if (crtc_state)
11469                 intel_crtc_state_reset(crtc_state, crtc);
11470
11471         return crtc_state;
11472 }
11473
11474 /* Returns the currently programmed mode of the given encoder. */
11475 struct drm_display_mode *
11476 intel_encoder_current_mode(struct intel_encoder *encoder)
11477 {
11478         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11479         struct intel_crtc_state *crtc_state;
11480         struct drm_display_mode *mode;
11481         struct intel_crtc *crtc;
11482         enum pipe pipe;
11483
11484         if (!encoder->get_hw_state(encoder, &pipe))
11485                 return NULL;
11486
11487         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11488
11489         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11490         if (!mode)
11491                 return NULL;
11492
11493         crtc_state = intel_crtc_state_alloc(crtc);
11494         if (!crtc_state) {
11495                 kfree(mode);
11496                 return NULL;
11497         }
11498
11499         if (!intel_crtc_get_pipe_config(crtc_state)) {
11500                 kfree(crtc_state);
11501                 kfree(mode);
11502                 return NULL;
11503         }
11504
11505         intel_encoder_get_config(encoder, crtc_state);
11506
11507         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
11508
11509         kfree(crtc_state);
11510
11511         return mode;
11512 }
11513
11514 static void intel_crtc_destroy(struct drm_crtc *crtc)
11515 {
11516         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11517
11518         drm_crtc_cleanup(crtc);
11519         kfree(intel_crtc);
11520 }
11521
11522 /**
11523  * intel_wm_need_update - Check whether watermarks need updating
11524  * @cur: current plane state
11525  * @new: new plane state
11526  *
11527  * Check current plane state versus the new one to determine whether
11528  * watermarks need to be recalculated.
11529  *
11530  * Returns true or false.
11531  */
11532 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11533                                  struct intel_plane_state *new)
11534 {
11535         /* Update watermarks on tiling or size changes. */
11536         if (new->uapi.visible != cur->uapi.visible)
11537                 return true;
11538
11539         if (!cur->hw.fb || !new->hw.fb)
11540                 return false;
11541
11542         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11543             cur->hw.rotation != new->hw.rotation ||
11544             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11545             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11546             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11547             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11548                 return true;
11549
11550         return false;
11551 }
11552
11553 static bool needs_scaling(const struct intel_plane_state *state)
11554 {
11555         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11556         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11557         int dst_w = drm_rect_width(&state->uapi.dst);
11558         int dst_h = drm_rect_height(&state->uapi.dst);
11559
11560         return (src_w != dst_w || src_h != dst_h);
11561 }
11562
11563 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11564                                     struct intel_crtc_state *crtc_state,
11565                                     const struct intel_plane_state *old_plane_state,
11566                                     struct intel_plane_state *plane_state)
11567 {
11568         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11569         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11570         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11571         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11572         bool was_crtc_enabled = old_crtc_state->hw.active;
11573         bool is_crtc_enabled = crtc_state->hw.active;
11574         bool turn_off, turn_on, visible, was_visible;
11575         int ret;
11576
11577         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11578                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11579                 if (ret)
11580                         return ret;
11581         }
11582
11583         was_visible = old_plane_state->uapi.visible;
11584         visible = plane_state->uapi.visible;
11585
11586         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
11587                 was_visible = false;
11588
11589         /*
11590          * Visibility is calculated as if the crtc was on, but
11591          * after scaler setup everything depends on it being off
11592          * when the crtc isn't active.
11593          *
11594          * FIXME this is wrong for watermarks. Watermarks should also
11595          * be computed as if the pipe would be active. Perhaps move
11596          * per-plane wm computation to the .check_plane() hook, and
11597          * only combine the results from all planes in the current place?
11598          */
11599         if (!is_crtc_enabled) {
11600                 intel_plane_set_invisible(crtc_state, plane_state);
11601                 visible = false;
11602         }
11603
11604         if (!was_visible && !visible)
11605                 return 0;
11606
11607         turn_off = was_visible && (!visible || mode_changed);
11608         turn_on = visible && (!was_visible || mode_changed);
11609
11610         drm_dbg_atomic(&dev_priv->drm,
11611                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11612                        crtc->base.base.id, crtc->base.name,
11613                        plane->base.base.id, plane->base.name,
11614                        was_visible, visible,
11615                        turn_off, turn_on, mode_changed);
11616
11617         if (turn_on) {
11618                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11619                         crtc_state->update_wm_pre = true;
11620
11621                 /* must disable cxsr around plane enable/disable */
11622                 if (plane->id != PLANE_CURSOR)
11623                         crtc_state->disable_cxsr = true;
11624         } else if (turn_off) {
11625                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11626                         crtc_state->update_wm_post = true;
11627
11628                 /* must disable cxsr around plane enable/disable */
11629                 if (plane->id != PLANE_CURSOR)
11630                         crtc_state->disable_cxsr = true;
11631         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11632                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11633                         /* FIXME bollocks */
11634                         crtc_state->update_wm_pre = true;
11635                         crtc_state->update_wm_post = true;
11636                 }
11637         }
11638
11639         if (visible || was_visible)
11640                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11641
11642         /*
11643          * ILK/SNB DVSACNTR/Sprite Enable
11644          * IVB SPR_CTL/Sprite Enable
11645          * "When in Self Refresh Big FIFO mode, a write to enable the
11646          *  plane will be internally buffered and delayed while Big FIFO
11647          *  mode is exiting."
11648          *
11649          * Which means that enabling the sprite can take an extra frame
11650          * when we start in big FIFO mode (LP1+). Thus we need to drop
11651          * down to LP0 and wait for vblank in order to make sure the
11652          * sprite gets enabled on the next vblank after the register write.
11653          * Doing otherwise would risk enabling the sprite one frame after
11654          * we've already signalled flip completion. We can resume LP1+
11655          * once the sprite has been enabled.
11656          *
11657          *
11658          * WaCxSRDisabledForSpriteScaling:ivb
11659          * IVB SPR_SCALE/Scaling Enable
11660          * "Low Power watermarks must be disabled for at least one
11661          *  frame before enabling sprite scaling, and kept disabled
11662          *  until sprite scaling is disabled."
11663          *
11664          * ILK/SNB DVSASCALE/Scaling Enable
11665          * "When in Self Refresh Big FIFO mode, scaling enable will be
11666          *  masked off while Big FIFO mode is exiting."
11667          *
11668          * Despite the w/a only being listed for IVB we assume that
11669          * the ILK/SNB note has similar ramifications, hence we apply
11670          * the w/a on all three platforms.
11671          *
11672          * With experimental results seems this is needed also for primary
11673          * plane, not only sprite plane.
11674          */
11675         if (plane->id != PLANE_CURSOR &&
11676             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11677              IS_IVYBRIDGE(dev_priv)) &&
11678             (turn_on || (!needs_scaling(old_plane_state) &&
11679                          needs_scaling(plane_state))))
11680                 crtc_state->disable_lp_wm = true;
11681
11682         return 0;
11683 }
11684
11685 static bool encoders_cloneable(const struct intel_encoder *a,
11686                                const struct intel_encoder *b)
11687 {
11688         /* masks could be asymmetric, so check both ways */
11689         return a == b || (a->cloneable & (1 << b->type) &&
11690                           b->cloneable & (1 << a->type));
11691 }
11692
11693 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
11694                                          struct intel_crtc *crtc,
11695                                          struct intel_encoder *encoder)
11696 {
11697         struct intel_encoder *source_encoder;
11698         struct drm_connector *connector;
11699         struct drm_connector_state *connector_state;
11700         int i;
11701
11702         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
11703                 if (connector_state->crtc != &crtc->base)
11704                         continue;
11705
11706                 source_encoder =
11707                         to_intel_encoder(connector_state->best_encoder);
11708                 if (!encoders_cloneable(encoder, source_encoder))
11709                         return false;
11710         }
11711
11712         return true;
11713 }
11714
11715 static int icl_add_linked_planes(struct intel_atomic_state *state)
11716 {
11717         struct intel_plane *plane, *linked;
11718         struct intel_plane_state *plane_state, *linked_plane_state;
11719         int i;
11720
11721         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11722                 linked = plane_state->planar_linked_plane;
11723
11724                 if (!linked)
11725                         continue;
11726
11727                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11728                 if (IS_ERR(linked_plane_state))
11729                         return PTR_ERR(linked_plane_state);
11730
11731                 drm_WARN_ON(state->base.dev,
11732                             linked_plane_state->planar_linked_plane != plane);
11733                 drm_WARN_ON(state->base.dev,
11734                             linked_plane_state->planar_slave == plane_state->planar_slave);
11735         }
11736
11737         return 0;
11738 }
11739
11740 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11741 {
11742         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11743         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11744         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
11745         struct intel_plane *plane, *linked;
11746         struct intel_plane_state *plane_state;
11747         int i;
11748
11749         if (INTEL_GEN(dev_priv) < 11)
11750                 return 0;
11751
11752         /*
11753          * Destroy all old plane links and make the slave plane invisible
11754          * in the crtc_state->active_planes mask.
11755          */
11756         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11757                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11758                         continue;
11759
11760                 plane_state->planar_linked_plane = NULL;
11761                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
11762                         crtc_state->enabled_planes &= ~BIT(plane->id);
11763                         crtc_state->active_planes &= ~BIT(plane->id);
11764                         crtc_state->update_planes |= BIT(plane->id);
11765                 }
11766
11767                 plane_state->planar_slave = false;
11768         }
11769
11770         if (!crtc_state->nv12_planes)
11771                 return 0;
11772
11773         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11774                 struct intel_plane_state *linked_state = NULL;
11775
11776                 if (plane->pipe != crtc->pipe ||
11777                     !(crtc_state->nv12_planes & BIT(plane->id)))
11778                         continue;
11779
11780                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11781                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
11782                                 continue;
11783
11784                         if (crtc_state->active_planes & BIT(linked->id))
11785                                 continue;
11786
11787                         linked_state = intel_atomic_get_plane_state(state, linked);
11788                         if (IS_ERR(linked_state))
11789                                 return PTR_ERR(linked_state);
11790
11791                         break;
11792                 }
11793
11794                 if (!linked_state) {
11795                         drm_dbg_kms(&dev_priv->drm,
11796                                     "Need %d free Y planes for planar YUV\n",
11797                                     hweight8(crtc_state->nv12_planes));
11798
11799                         return -EINVAL;
11800                 }
11801
11802                 plane_state->planar_linked_plane = linked;
11803
11804                 linked_state->planar_slave = true;
11805                 linked_state->planar_linked_plane = plane;
11806                 crtc_state->enabled_planes |= BIT(linked->id);
11807                 crtc_state->active_planes |= BIT(linked->id);
11808                 crtc_state->update_planes |= BIT(linked->id);
11809                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
11810                             linked->base.name, plane->base.name);
11811
11812                 /* Copy parameters to slave plane */
11813                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
11814                 linked_state->color_ctl = plane_state->color_ctl;
11815                 linked_state->view = plane_state->view;
11816                 memcpy(linked_state->color_plane, plane_state->color_plane,
11817                        sizeof(linked_state->color_plane));
11818
11819                 intel_plane_copy_hw_state(linked_state, plane_state);
11820                 linked_state->uapi.src = plane_state->uapi.src;
11821                 linked_state->uapi.dst = plane_state->uapi.dst;
11822
11823                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
11824                         if (linked->id == PLANE_SPRITE5)
11825                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
11826                         else if (linked->id == PLANE_SPRITE4)
11827                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
11828                         else if (linked->id == PLANE_SPRITE3)
11829                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
11830                         else if (linked->id == PLANE_SPRITE2)
11831                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
11832                         else
11833                                 MISSING_CASE(linked->id);
11834                 }
11835         }
11836
11837         return 0;
11838 }
11839
11840 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11841 {
11842         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
11843         struct intel_atomic_state *state =
11844                 to_intel_atomic_state(new_crtc_state->uapi.state);
11845         const struct intel_crtc_state *old_crtc_state =
11846                 intel_atomic_get_old_crtc_state(state, crtc);
11847
11848         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11849 }
11850
11851 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
11852 {
11853         const struct drm_display_mode *pipe_mode =
11854                 &crtc_state->hw.pipe_mode;
11855         int linetime_wm;
11856
11857         if (!crtc_state->hw.enable)
11858                 return 0;
11859
11860         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
11861                                         pipe_mode->crtc_clock);
11862
11863         return min(linetime_wm, 0x1ff);
11864 }
11865
11866 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
11867                                const struct intel_cdclk_state *cdclk_state)
11868 {
11869         const struct drm_display_mode *pipe_mode =
11870                 &crtc_state->hw.pipe_mode;
11871         int linetime_wm;
11872
11873         if (!crtc_state->hw.enable)
11874                 return 0;
11875
11876         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
11877                                         cdclk_state->logical.cdclk);
11878
11879         return min(linetime_wm, 0x1ff);
11880 }
11881
11882 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
11883 {
11884         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11885         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11886         const struct drm_display_mode *pipe_mode =
11887                 &crtc_state->hw.pipe_mode;
11888         int linetime_wm;
11889
11890         if (!crtc_state->hw.enable)
11891                 return 0;
11892
11893         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
11894                                    crtc_state->pixel_rate);
11895
11896         /* Display WA #1135: BXT:ALL GLK:ALL */
11897         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
11898                 linetime_wm /= 2;
11899
11900         return min(linetime_wm, 0x1ff);
11901 }
11902
11903 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
11904                                    struct intel_crtc *crtc)
11905 {
11906         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11907         struct intel_crtc_state *crtc_state =
11908                 intel_atomic_get_new_crtc_state(state, crtc);
11909         const struct intel_cdclk_state *cdclk_state;
11910
11911         if (INTEL_GEN(dev_priv) >= 9)
11912                 crtc_state->linetime = skl_linetime_wm(crtc_state);
11913         else
11914                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
11915
11916         if (!hsw_crtc_supports_ips(crtc))
11917                 return 0;
11918
11919         cdclk_state = intel_atomic_get_cdclk_state(state);
11920         if (IS_ERR(cdclk_state))
11921                 return PTR_ERR(cdclk_state);
11922
11923         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
11924                                                        cdclk_state);
11925
11926         return 0;
11927 }
11928
11929 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
11930                                    struct intel_crtc *crtc)
11931 {
11932         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11933         struct intel_crtc_state *crtc_state =
11934                 intel_atomic_get_new_crtc_state(state, crtc);
11935         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11936         int ret;
11937
11938         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11939             mode_changed && !crtc_state->hw.active)
11940                 crtc_state->update_wm_post = true;
11941
11942         if (mode_changed && crtc_state->hw.enable &&
11943             dev_priv->display.crtc_compute_clock &&
11944             !crtc_state->bigjoiner_slave &&
11945             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
11946                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
11947                 if (ret)
11948                         return ret;
11949         }
11950
11951         /*
11952          * May need to update pipe gamma enable bits
11953          * when C8 planes are getting enabled/disabled.
11954          */
11955         if (c8_planes_changed(crtc_state))
11956                 crtc_state->uapi.color_mgmt_changed = true;
11957
11958         if (mode_changed || crtc_state->update_pipe ||
11959             crtc_state->uapi.color_mgmt_changed) {
11960                 ret = intel_color_check(crtc_state);
11961                 if (ret)
11962                         return ret;
11963         }
11964
11965         if (dev_priv->display.compute_pipe_wm) {
11966                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
11967                 if (ret) {
11968                         drm_dbg_kms(&dev_priv->drm,
11969                                     "Target pipe watermarks are invalid\n");
11970                         return ret;
11971                 }
11972         }
11973
11974         if (dev_priv->display.compute_intermediate_wm) {
11975                 if (drm_WARN_ON(&dev_priv->drm,
11976                                 !dev_priv->display.compute_pipe_wm))
11977                         return 0;
11978
11979                 /*
11980                  * Calculate 'intermediate' watermarks that satisfy both the
11981                  * old state and the new state.  We can program these
11982                  * immediately.
11983                  */
11984                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
11985                 if (ret) {
11986                         drm_dbg_kms(&dev_priv->drm,
11987                                     "No valid intermediate pipe watermarks are possible\n");
11988                         return ret;
11989                 }
11990         }
11991
11992         if (INTEL_GEN(dev_priv) >= 9) {
11993                 if (mode_changed || crtc_state->update_pipe) {
11994                         ret = skl_update_scaler_crtc(crtc_state);
11995                         if (ret)
11996                                 return ret;
11997                 }
11998
11999                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12000                 if (ret)
12001                         return ret;
12002         }
12003
12004         if (HAS_IPS(dev_priv)) {
12005                 ret = hsw_compute_ips_config(crtc_state);
12006                 if (ret)
12007                         return ret;
12008         }
12009
12010         if (INTEL_GEN(dev_priv) >= 9 ||
12011             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12012                 ret = hsw_compute_linetime_wm(state, crtc);
12013                 if (ret)
12014                         return ret;
12015
12016         }
12017
12018         if (!mode_changed) {
12019                 ret = intel_psr2_sel_fetch_update(state, crtc);
12020                 if (ret)
12021                         return ret;
12022         }
12023
12024         return 0;
12025 }
12026
12027 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12028 {
12029         struct intel_connector *connector;
12030         struct drm_connector_list_iter conn_iter;
12031
12032         drm_connector_list_iter_begin(dev, &conn_iter);
12033         for_each_intel_connector_iter(connector, &conn_iter) {
12034                 if (connector->base.state->crtc)
12035                         drm_connector_put(&connector->base);
12036
12037                 if (connector->base.encoder) {
12038                         connector->base.state->best_encoder =
12039                                 connector->base.encoder;
12040                         connector->base.state->crtc =
12041                                 connector->base.encoder->crtc;
12042
12043                         drm_connector_get(&connector->base);
12044                 } else {
12045                         connector->base.state->best_encoder = NULL;
12046                         connector->base.state->crtc = NULL;
12047                 }
12048         }
12049         drm_connector_list_iter_end(&conn_iter);
12050 }
12051
12052 static int
12053 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12054                       struct intel_crtc_state *pipe_config)
12055 {
12056         struct drm_connector *connector = conn_state->connector;
12057         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12058         const struct drm_display_info *info = &connector->display_info;
12059         int bpp;
12060
12061         switch (conn_state->max_bpc) {
12062         case 6 ... 7:
12063                 bpp = 6 * 3;
12064                 break;
12065         case 8 ... 9:
12066                 bpp = 8 * 3;
12067                 break;
12068         case 10 ... 11:
12069                 bpp = 10 * 3;
12070                 break;
12071         case 12 ... 16:
12072                 bpp = 12 * 3;
12073                 break;
12074         default:
12075                 MISSING_CASE(conn_state->max_bpc);
12076                 return -EINVAL;
12077         }
12078
12079         if (bpp < pipe_config->pipe_bpp) {
12080                 drm_dbg_kms(&i915->drm,
12081                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12082                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12083                             connector->base.id, connector->name,
12084                             bpp, 3 * info->bpc,
12085                             3 * conn_state->max_requested_bpc,
12086                             pipe_config->pipe_bpp);
12087
12088                 pipe_config->pipe_bpp = bpp;
12089         }
12090
12091         return 0;
12092 }
12093
12094 static int
12095 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12096                           struct intel_crtc_state *pipe_config)
12097 {
12098         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12099         struct drm_atomic_state *state = pipe_config->uapi.state;
12100         struct drm_connector *connector;
12101         struct drm_connector_state *connector_state;
12102         int bpp, i;
12103
12104         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12105             IS_CHERRYVIEW(dev_priv)))
12106                 bpp = 10*3;
12107         else if (INTEL_GEN(dev_priv) >= 5)
12108                 bpp = 12*3;
12109         else
12110                 bpp = 8*3;
12111
12112         pipe_config->pipe_bpp = bpp;
12113
12114         /* Clamp display bpp to connector max bpp */
12115         for_each_new_connector_in_state(state, connector, connector_state, i) {
12116                 int ret;
12117
12118                 if (connector_state->crtc != &crtc->base)
12119                         continue;
12120
12121                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12122                 if (ret)
12123                         return ret;
12124         }
12125
12126         return 0;
12127 }
12128
12129 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12130                                     const struct drm_display_mode *mode)
12131 {
12132         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12133                     "type: 0x%x flags: 0x%x\n",
12134                     mode->crtc_clock,
12135                     mode->crtc_hdisplay, mode->crtc_hsync_start,
12136                     mode->crtc_hsync_end, mode->crtc_htotal,
12137                     mode->crtc_vdisplay, mode->crtc_vsync_start,
12138                     mode->crtc_vsync_end, mode->crtc_vtotal,
12139                     mode->type, mode->flags);
12140 }
12141
12142 static void
12143 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12144                       const char *id, unsigned int lane_count,
12145                       const struct intel_link_m_n *m_n)
12146 {
12147         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12148
12149         drm_dbg_kms(&i915->drm,
12150                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12151                     id, lane_count,
12152                     m_n->gmch_m, m_n->gmch_n,
12153                     m_n->link_m, m_n->link_n, m_n->tu);
12154 }
12155
12156 static void
12157 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12158                      const union hdmi_infoframe *frame)
12159 {
12160         if (!drm_debug_enabled(DRM_UT_KMS))
12161                 return;
12162
12163         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12164 }
12165
12166 static void
12167 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
12168                       const struct drm_dp_vsc_sdp *vsc)
12169 {
12170         if (!drm_debug_enabled(DRM_UT_KMS))
12171                 return;
12172
12173         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
12174 }
12175
12176 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12177
12178 static const char * const output_type_str[] = {
12179         OUTPUT_TYPE(UNUSED),
12180         OUTPUT_TYPE(ANALOG),
12181         OUTPUT_TYPE(DVO),
12182         OUTPUT_TYPE(SDVO),
12183         OUTPUT_TYPE(LVDS),
12184         OUTPUT_TYPE(TVOUT),
12185         OUTPUT_TYPE(HDMI),
12186         OUTPUT_TYPE(DP),
12187         OUTPUT_TYPE(EDP),
12188         OUTPUT_TYPE(DSI),
12189         OUTPUT_TYPE(DDI),
12190         OUTPUT_TYPE(DP_MST),
12191 };
12192
12193 #undef OUTPUT_TYPE
12194
12195 static void snprintf_output_types(char *buf, size_t len,
12196                                   unsigned int output_types)
12197 {
12198         char *str = buf;
12199         int i;
12200
12201         str[0] = '\0';
12202
12203         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12204                 int r;
12205
12206                 if ((output_types & BIT(i)) == 0)
12207                         continue;
12208
12209                 r = snprintf(str, len, "%s%s",
12210                              str != buf ? "," : "", output_type_str[i]);
12211                 if (r >= len)
12212                         break;
12213                 str += r;
12214                 len -= r;
12215
12216                 output_types &= ~BIT(i);
12217         }
12218
12219         WARN_ON_ONCE(output_types != 0);
12220 }
12221
12222 static const char * const output_format_str[] = {
12223         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12224         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12225         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12226         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12227 };
12228
12229 static const char *output_formats(enum intel_output_format format)
12230 {
12231         if (format >= ARRAY_SIZE(output_format_str))
12232                 format = INTEL_OUTPUT_FORMAT_INVALID;
12233         return output_format_str[format];
12234 }
12235
12236 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12237 {
12238         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12239         struct drm_i915_private *i915 = to_i915(plane->base.dev);
12240         const struct drm_framebuffer *fb = plane_state->hw.fb;
12241         struct drm_format_name_buf format_name;
12242
12243         if (!fb) {
12244                 drm_dbg_kms(&i915->drm,
12245                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12246                             plane->base.base.id, plane->base.name,
12247                             yesno(plane_state->uapi.visible));
12248                 return;
12249         }
12250
12251         drm_dbg_kms(&i915->drm,
12252                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
12253                     plane->base.base.id, plane->base.name,
12254                     fb->base.id, fb->width, fb->height,
12255                     drm_get_format_name(fb->format->format, &format_name),
12256                     fb->modifier, yesno(plane_state->uapi.visible));
12257         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
12258                     plane_state->hw.rotation, plane_state->scaler_id);
12259         if (plane_state->uapi.visible)
12260                 drm_dbg_kms(&i915->drm,
12261                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12262                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
12263                             DRM_RECT_ARG(&plane_state->uapi.dst));
12264 }
12265
12266 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12267                                    struct intel_atomic_state *state,
12268                                    const char *context)
12269 {
12270         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12271         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12272         const struct intel_plane_state *plane_state;
12273         struct intel_plane *plane;
12274         char buf[64];
12275         int i;
12276
12277         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
12278                     crtc->base.base.id, crtc->base.name,
12279                     yesno(pipe_config->hw.enable), context);
12280
12281         if (!pipe_config->hw.enable)
12282                 goto dump_planes;
12283
12284         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12285         drm_dbg_kms(&dev_priv->drm,
12286                     "active: %s, output_types: %s (0x%x), output format: %s\n",
12287                     yesno(pipe_config->hw.active),
12288                     buf, pipe_config->output_types,
12289                     output_formats(pipe_config->output_format));
12290
12291         drm_dbg_kms(&dev_priv->drm,
12292                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12293                     transcoder_name(pipe_config->cpu_transcoder),
12294                     pipe_config->pipe_bpp, pipe_config->dither);
12295
12296         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
12297                     transcoder_name(pipe_config->mst_master_transcoder));
12298
12299         drm_dbg_kms(&dev_priv->drm,
12300                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
12301                     transcoder_name(pipe_config->master_transcoder),
12302                     pipe_config->sync_mode_slaves_mask);
12303
12304         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
12305                     pipe_config->bigjoiner_slave ? "slave" :
12306                     pipe_config->bigjoiner ? "master" : "no");
12307
12308         if (pipe_config->has_pch_encoder)
12309                 intel_dump_m_n_config(pipe_config, "fdi",
12310                                       pipe_config->fdi_lanes,
12311                                       &pipe_config->fdi_m_n);
12312
12313         if (intel_crtc_has_dp_encoder(pipe_config)) {
12314                 intel_dump_m_n_config(pipe_config, "dp m_n",
12315                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12316                 if (pipe_config->has_drrs)
12317                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12318                                               pipe_config->lane_count,
12319                                               &pipe_config->dp_m2_n2);
12320         }
12321
12322         drm_dbg_kms(&dev_priv->drm,
12323                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12324                     pipe_config->has_audio, pipe_config->has_infoframe,
12325                     pipe_config->infoframes.enable);
12326
12327         if (pipe_config->infoframes.enable &
12328             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12329                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
12330                             pipe_config->infoframes.gcp);
12331         if (pipe_config->infoframes.enable &
12332             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12333                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12334         if (pipe_config->infoframes.enable &
12335             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12336                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12337         if (pipe_config->infoframes.enable &
12338             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12339                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12340         if (pipe_config->infoframes.enable &
12341             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
12342                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
12343         if (pipe_config->infoframes.enable &
12344             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
12345                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
12346         if (pipe_config->infoframes.enable &
12347             intel_hdmi_infoframe_enable(DP_SDP_VSC))
12348                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
12349
12350         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
12351         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12352         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
12353         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12354         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
12355         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
12356         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
12357         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
12358         drm_dbg_kms(&dev_priv->drm,
12359                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12360                     pipe_config->port_clock,
12361                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12362                     pipe_config->pixel_rate);
12363
12364         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
12365                     pipe_config->linetime, pipe_config->ips_linetime);
12366
12367         if (INTEL_GEN(dev_priv) >= 9)
12368                 drm_dbg_kms(&dev_priv->drm,
12369                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12370                             crtc->num_scalers,
12371                             pipe_config->scaler_state.scaler_users,
12372                             pipe_config->scaler_state.scaler_id);
12373
12374         if (HAS_GMCH(dev_priv))
12375                 drm_dbg_kms(&dev_priv->drm,
12376                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12377                             pipe_config->gmch_pfit.control,
12378                             pipe_config->gmch_pfit.pgm_ratios,
12379                             pipe_config->gmch_pfit.lvds_border_bits);
12380         else
12381                 drm_dbg_kms(&dev_priv->drm,
12382                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
12383                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
12384                             enableddisabled(pipe_config->pch_pfit.enabled),
12385                             yesno(pipe_config->pch_pfit.force_thru));
12386
12387         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
12388                     pipe_config->ips_enabled, pipe_config->double_wide);
12389
12390         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12391
12392         if (IS_CHERRYVIEW(dev_priv))
12393                 drm_dbg_kms(&dev_priv->drm,
12394                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12395                             pipe_config->cgm_mode, pipe_config->gamma_mode,
12396                             pipe_config->gamma_enable, pipe_config->csc_enable);
12397         else
12398                 drm_dbg_kms(&dev_priv->drm,
12399                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12400                             pipe_config->csc_mode, pipe_config->gamma_mode,
12401                             pipe_config->gamma_enable, pipe_config->csc_enable);
12402
12403         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
12404                     pipe_config->hw.degamma_lut ?
12405                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
12406                     pipe_config->hw.gamma_lut ?
12407                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
12408
12409 dump_planes:
12410         if (!state)
12411                 return;
12412
12413         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12414                 if (plane->pipe == crtc->pipe)
12415                         intel_dump_plane_state(plane_state);
12416         }
12417 }
12418
12419 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12420 {
12421         struct drm_device *dev = state->base.dev;
12422         struct drm_connector *connector;
12423         struct drm_connector_list_iter conn_iter;
12424         unsigned int used_ports = 0;
12425         unsigned int used_mst_ports = 0;
12426         bool ret = true;
12427
12428         /*
12429          * We're going to peek into connector->state,
12430          * hence connection_mutex must be held.
12431          */
12432         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12433
12434         /*
12435          * Walk the connector list instead of the encoder
12436          * list to detect the problem on ddi platforms
12437          * where there's just one encoder per digital port.
12438          */
12439         drm_connector_list_iter_begin(dev, &conn_iter);
12440         drm_for_each_connector_iter(connector, &conn_iter) {
12441                 struct drm_connector_state *connector_state;
12442                 struct intel_encoder *encoder;
12443
12444                 connector_state =
12445                         drm_atomic_get_new_connector_state(&state->base,
12446                                                            connector);
12447                 if (!connector_state)
12448                         connector_state = connector->state;
12449
12450                 if (!connector_state->best_encoder)
12451                         continue;
12452
12453                 encoder = to_intel_encoder(connector_state->best_encoder);
12454
12455                 drm_WARN_ON(dev, !connector_state->crtc);
12456
12457                 switch (encoder->type) {
12458                 case INTEL_OUTPUT_DDI:
12459                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
12460                                 break;
12461                         fallthrough;
12462                 case INTEL_OUTPUT_DP:
12463                 case INTEL_OUTPUT_HDMI:
12464                 case INTEL_OUTPUT_EDP:
12465                         /* the same port mustn't appear more than once */
12466                         if (used_ports & BIT(encoder->port))
12467                                 ret = false;
12468
12469                         used_ports |= BIT(encoder->port);
12470                         break;
12471                 case INTEL_OUTPUT_DP_MST:
12472                         used_mst_ports |=
12473                                 1 << encoder->port;
12474                         break;
12475                 default:
12476                         break;
12477                 }
12478         }
12479         drm_connector_list_iter_end(&conn_iter);
12480
12481         /* can't mix MST and SST/HDMI on the same port */
12482         if (used_ports & used_mst_ports)
12483                 return false;
12484
12485         return ret;
12486 }
12487
12488 static void
12489 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
12490                                            struct intel_crtc_state *crtc_state)
12491 {
12492         const struct intel_crtc_state *from_crtc_state = crtc_state;
12493
12494         if (crtc_state->bigjoiner_slave) {
12495                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
12496                                                                   crtc_state->bigjoiner_linked_crtc);
12497
12498                 /* No need to copy state if the master state is unchanged */
12499                 if (!from_crtc_state)
12500                         return;
12501         }
12502
12503         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
12504 }
12505
12506 static void
12507 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
12508                                  struct intel_crtc_state *crtc_state)
12509 {
12510         crtc_state->hw.enable = crtc_state->uapi.enable;
12511         crtc_state->hw.active = crtc_state->uapi.active;
12512         crtc_state->hw.mode = crtc_state->uapi.mode;
12513         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12514         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
12515
12516         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
12517 }
12518
12519 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12520 {
12521         if (crtc_state->bigjoiner_slave)
12522                 return;
12523
12524         crtc_state->uapi.enable = crtc_state->hw.enable;
12525         crtc_state->uapi.active = crtc_state->hw.active;
12526         drm_WARN_ON(crtc_state->uapi.crtc->dev,
12527                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12528
12529         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12530         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
12531
12532         /* copy color blobs to uapi */
12533         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12534                                   crtc_state->hw.degamma_lut);
12535         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12536                                   crtc_state->hw.gamma_lut);
12537         drm_property_replace_blob(&crtc_state->uapi.ctm,
12538                                   crtc_state->hw.ctm);
12539 }
12540
12541 static int
12542 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
12543                           const struct intel_crtc_state *from_crtc_state)
12544 {
12545         struct intel_crtc_state *saved_state;
12546         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12547
12548         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
12549         if (!saved_state)
12550                 return -ENOMEM;
12551
12552         saved_state->uapi = crtc_state->uapi;
12553         saved_state->scaler_state = crtc_state->scaler_state;
12554         saved_state->shared_dpll = crtc_state->shared_dpll;
12555         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12556         saved_state->crc_enabled = crtc_state->crc_enabled;
12557
12558         intel_crtc_free_hw_state(crtc_state);
12559         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12560         kfree(saved_state);
12561
12562         /* Re-init hw state */
12563         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
12564         crtc_state->hw.enable = from_crtc_state->hw.enable;
12565         crtc_state->hw.active = from_crtc_state->hw.active;
12566         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
12567         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
12568
12569         /* Some fixups */
12570         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
12571         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
12572         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
12573         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
12574         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
12575         crtc_state->bigjoiner_slave = true;
12576         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
12577         crtc_state->has_audio = false;
12578
12579         return 0;
12580 }
12581
12582 static int
12583 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
12584                                  struct intel_crtc_state *crtc_state)
12585 {
12586         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12587         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12588         struct intel_crtc_state *saved_state;
12589
12590         saved_state = intel_crtc_state_alloc(crtc);
12591         if (!saved_state)
12592                 return -ENOMEM;
12593
12594         /* free the old crtc_state->hw members */
12595         intel_crtc_free_hw_state(crtc_state);
12596
12597         /* FIXME: before the switch to atomic started, a new pipe_config was
12598          * kzalloc'd. Code that depends on any field being zero should be
12599          * fixed, so that the crtc_state can be safely duplicated. For now,
12600          * only fields that are know to not cause problems are preserved. */
12601
12602         saved_state->uapi = crtc_state->uapi;
12603         saved_state->scaler_state = crtc_state->scaler_state;
12604         saved_state->shared_dpll = crtc_state->shared_dpll;
12605         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12606         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12607                sizeof(saved_state->icl_port_dplls));
12608         saved_state->crc_enabled = crtc_state->crc_enabled;
12609         if (IS_G4X(dev_priv) ||
12610             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12611                 saved_state->wm = crtc_state->wm;
12612
12613         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12614         kfree(saved_state);
12615
12616         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
12617
12618         return 0;
12619 }
12620
12621 static int
12622 intel_modeset_pipe_config(struct intel_atomic_state *state,
12623                           struct intel_crtc_state *pipe_config)
12624 {
12625         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12626         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12627         struct drm_connector *connector;
12628         struct drm_connector_state *connector_state;
12629         int base_bpp, ret, i;
12630         bool retry = true;
12631
12632         pipe_config->cpu_transcoder =
12633                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12634
12635         /*
12636          * Sanitize sync polarity flags based on requested ones. If neither
12637          * positive or negative polarity is requested, treat this as meaning
12638          * negative polarity.
12639          */
12640         if (!(pipe_config->hw.adjusted_mode.flags &
12641               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12642                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12643
12644         if (!(pipe_config->hw.adjusted_mode.flags &
12645               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12646                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12647
12648         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12649                                         pipe_config);
12650         if (ret)
12651                 return ret;
12652
12653         base_bpp = pipe_config->pipe_bpp;
12654
12655         /*
12656          * Determine the real pipe dimensions. Note that stereo modes can
12657          * increase the actual pipe size due to the frame doubling and
12658          * insertion of additional space for blanks between the frame. This
12659          * is stored in the crtc timings. We use the requested mode to do this
12660          * computation to clearly distinguish it from the adjusted mode, which
12661          * can be changed by the connectors in the below retry loop.
12662          */
12663         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12664                                &pipe_config->pipe_src_w,
12665                                &pipe_config->pipe_src_h);
12666
12667         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12668                 struct intel_encoder *encoder =
12669                         to_intel_encoder(connector_state->best_encoder);
12670
12671                 if (connector_state->crtc != crtc)
12672                         continue;
12673
12674                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12675                         drm_dbg_kms(&i915->drm,
12676                                     "rejecting invalid cloning configuration\n");
12677                         return -EINVAL;
12678                 }
12679
12680                 /*
12681                  * Determine output_types before calling the .compute_config()
12682                  * hooks so that the hooks can use this information safely.
12683                  */
12684                 if (encoder->compute_output_type)
12685                         pipe_config->output_types |=
12686                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12687                                                                  connector_state));
12688                 else
12689                         pipe_config->output_types |= BIT(encoder->type);
12690         }
12691
12692 encoder_retry:
12693         /* Ensure the port clock defaults are reset when retrying. */
12694         pipe_config->port_clock = 0;
12695         pipe_config->pixel_multiplier = 1;
12696
12697         /* Fill in default crtc timings, allow encoders to overwrite them. */
12698         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12699                               CRTC_STEREO_DOUBLE);
12700
12701         /* Pass our mode to the connectors and the CRTC to give them a chance to
12702          * adjust it according to limitations or connector properties, and also
12703          * a chance to reject the mode entirely.
12704          */
12705         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12706                 struct intel_encoder *encoder =
12707                         to_intel_encoder(connector_state->best_encoder);
12708
12709                 if (connector_state->crtc != crtc)
12710                         continue;
12711
12712                 ret = encoder->compute_config(encoder, pipe_config,
12713                                               connector_state);
12714                 if (ret < 0) {
12715                         if (ret != -EDEADLK)
12716                                 drm_dbg_kms(&i915->drm,
12717                                             "Encoder config failure: %d\n",
12718                                             ret);
12719                         return ret;
12720                 }
12721         }
12722
12723         /* Set default port clock if not overwritten by the encoder. Needs to be
12724          * done afterwards in case the encoder adjusts the mode. */
12725         if (!pipe_config->port_clock)
12726                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12727                         * pipe_config->pixel_multiplier;
12728
12729         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12730         if (ret == -EDEADLK)
12731                 return ret;
12732         if (ret < 0) {
12733                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
12734                 return ret;
12735         }
12736
12737         if (ret == RETRY) {
12738                 if (drm_WARN(&i915->drm, !retry,
12739                              "loop in pipe configuration computation\n"))
12740                         return -EINVAL;
12741
12742                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
12743                 retry = false;
12744                 goto encoder_retry;
12745         }
12746
12747         /* Dithering seems to not pass-through bits correctly when it should, so
12748          * only enable it on 6bpc panels and when its not a compliance
12749          * test requesting 6bpc video pattern.
12750          */
12751         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12752                 !pipe_config->dither_force_disable;
12753         drm_dbg_kms(&i915->drm,
12754                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12755                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12756
12757         return 0;
12758 }
12759
12760 static int
12761 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
12762 {
12763         struct intel_atomic_state *state =
12764                 to_intel_atomic_state(crtc_state->uapi.state);
12765         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12766         struct drm_connector_state *conn_state;
12767         struct drm_connector *connector;
12768         int i;
12769
12770         for_each_new_connector_in_state(&state->base, connector,
12771                                         conn_state, i) {
12772                 struct intel_encoder *encoder =
12773                         to_intel_encoder(conn_state->best_encoder);
12774                 int ret;
12775
12776                 if (conn_state->crtc != &crtc->base ||
12777                     !encoder->compute_config_late)
12778                         continue;
12779
12780                 ret = encoder->compute_config_late(encoder, crtc_state,
12781                                                    conn_state);
12782                 if (ret)
12783                         return ret;
12784         }
12785
12786         return 0;
12787 }
12788
12789 bool intel_fuzzy_clock_check(int clock1, int clock2)
12790 {
12791         int diff;
12792
12793         if (clock1 == clock2)
12794                 return true;
12795
12796         if (!clock1 || !clock2)
12797                 return false;
12798
12799         diff = abs(clock1 - clock2);
12800
12801         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12802                 return true;
12803
12804         return false;
12805 }
12806
12807 static bool
12808 intel_compare_m_n(unsigned int m, unsigned int n,
12809                   unsigned int m2, unsigned int n2,
12810                   bool exact)
12811 {
12812         if (m == m2 && n == n2)
12813                 return true;
12814
12815         if (exact || !m || !n || !m2 || !n2)
12816                 return false;
12817
12818         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12819
12820         if (n > n2) {
12821                 while (n > n2) {
12822                         m2 <<= 1;
12823                         n2 <<= 1;
12824                 }
12825         } else if (n < n2) {
12826                 while (n < n2) {
12827                         m <<= 1;
12828                         n <<= 1;
12829                 }
12830         }
12831
12832         if (n != n2)
12833                 return false;
12834
12835         return intel_fuzzy_clock_check(m, m2);
12836 }
12837
12838 static bool
12839 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12840                        const struct intel_link_m_n *m2_n2,
12841                        bool exact)
12842 {
12843         return m_n->tu == m2_n2->tu &&
12844                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12845                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12846                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12847                                   m2_n2->link_m, m2_n2->link_n, exact);
12848 }
12849
12850 static bool
12851 intel_compare_infoframe(const union hdmi_infoframe *a,
12852                         const union hdmi_infoframe *b)
12853 {
12854         return memcmp(a, b, sizeof(*a)) == 0;
12855 }
12856
12857 static bool
12858 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
12859                          const struct drm_dp_vsc_sdp *b)
12860 {
12861         return memcmp(a, b, sizeof(*a)) == 0;
12862 }
12863
12864 static void
12865 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12866                                bool fastset, const char *name,
12867                                const union hdmi_infoframe *a,
12868                                const union hdmi_infoframe *b)
12869 {
12870         if (fastset) {
12871                 if (!drm_debug_enabled(DRM_UT_KMS))
12872                         return;
12873
12874                 drm_dbg_kms(&dev_priv->drm,
12875                             "fastset mismatch in %s infoframe\n", name);
12876                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
12877                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12878                 drm_dbg_kms(&dev_priv->drm, "found:\n");
12879                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12880         } else {
12881                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
12882                 drm_err(&dev_priv->drm, "expected:\n");
12883                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12884                 drm_err(&dev_priv->drm, "found:\n");
12885                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12886         }
12887 }
12888
12889 static void
12890 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
12891                                 bool fastset, const char *name,
12892                                 const struct drm_dp_vsc_sdp *a,
12893                                 const struct drm_dp_vsc_sdp *b)
12894 {
12895         if (fastset) {
12896                 if (!drm_debug_enabled(DRM_UT_KMS))
12897                         return;
12898
12899                 drm_dbg_kms(&dev_priv->drm,
12900                             "fastset mismatch in %s dp sdp\n", name);
12901                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
12902                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
12903                 drm_dbg_kms(&dev_priv->drm, "found:\n");
12904                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
12905         } else {
12906                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
12907                 drm_err(&dev_priv->drm, "expected:\n");
12908                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
12909                 drm_err(&dev_priv->drm, "found:\n");
12910                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
12911         }
12912 }
12913
12914 static void __printf(4, 5)
12915 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12916                      const char *name, const char *format, ...)
12917 {
12918         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
12919         struct va_format vaf;
12920         va_list args;
12921
12922         va_start(args, format);
12923         vaf.fmt = format;
12924         vaf.va = &args;
12925
12926         if (fastset)
12927                 drm_dbg_kms(&i915->drm,
12928                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12929                             crtc->base.base.id, crtc->base.name, name, &vaf);
12930         else
12931                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
12932                         crtc->base.base.id, crtc->base.name, name, &vaf);
12933
12934         va_end(args);
12935 }
12936
12937 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12938 {
12939         if (dev_priv->params.fastboot != -1)
12940                 return dev_priv->params.fastboot;
12941
12942         /* Enable fastboot by default on Skylake and newer */
12943         if (INTEL_GEN(dev_priv) >= 9)
12944                 return true;
12945
12946         /* Enable fastboot by default on VLV and CHV */
12947         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12948                 return true;
12949
12950         /* Disabled by default on all others */
12951         return false;
12952 }
12953
12954 static bool
12955 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12956                           const struct intel_crtc_state *pipe_config,
12957                           bool fastset)
12958 {
12959         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
12960         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12961         bool ret = true;
12962         u32 bp_gamma = 0;
12963         bool fixup_inherited = fastset &&
12964                 current_config->inherited && !pipe_config->inherited;
12965
12966         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12967                 drm_dbg_kms(&dev_priv->drm,
12968                             "initial modeset and fastboot not set\n");
12969                 ret = false;
12970         }
12971
12972 #define PIPE_CONF_CHECK_X(name) do { \
12973         if (current_config->name != pipe_config->name) { \
12974                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
12975                                      "(expected 0x%08x, found 0x%08x)", \
12976                                      current_config->name, \
12977                                      pipe_config->name); \
12978                 ret = false; \
12979         } \
12980 } while (0)
12981
12982 #define PIPE_CONF_CHECK_I(name) do { \
12983         if (current_config->name != pipe_config->name) { \
12984                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
12985                                      "(expected %i, found %i)", \
12986                                      current_config->name, \
12987                                      pipe_config->name); \
12988                 ret = false; \
12989         } \
12990 } while (0)
12991
12992 #define PIPE_CONF_CHECK_BOOL(name) do { \
12993         if (current_config->name != pipe_config->name) { \
12994                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
12995                                      "(expected %s, found %s)", \
12996                                      yesno(current_config->name), \
12997                                      yesno(pipe_config->name)); \
12998                 ret = false; \
12999         } \
13000 } while (0)
13001
13002 /*
13003  * Checks state where we only read out the enabling, but not the entire
13004  * state itself (like full infoframes or ELD for audio). These states
13005  * require a full modeset on bootup to fix up.
13006  */
13007 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13008         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13009                 PIPE_CONF_CHECK_BOOL(name); \
13010         } else { \
13011                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13012                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13013                                      yesno(current_config->name), \
13014                                      yesno(pipe_config->name)); \
13015                 ret = false; \
13016         } \
13017 } while (0)
13018
13019 #define PIPE_CONF_CHECK_P(name) do { \
13020         if (current_config->name != pipe_config->name) { \
13021                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13022                                      "(expected %p, found %p)", \
13023                                      current_config->name, \
13024                                      pipe_config->name); \
13025                 ret = false; \
13026         } \
13027 } while (0)
13028
13029 #define PIPE_CONF_CHECK_M_N(name) do { \
13030         if (!intel_compare_link_m_n(&current_config->name, \
13031                                     &pipe_config->name,\
13032                                     !fastset)) { \
13033                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13034                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13035                                      "found tu %i, gmch %i/%i link %i/%i)", \
13036                                      current_config->name.tu, \
13037                                      current_config->name.gmch_m, \
13038                                      current_config->name.gmch_n, \
13039                                      current_config->name.link_m, \
13040                                      current_config->name.link_n, \
13041                                      pipe_config->name.tu, \
13042                                      pipe_config->name.gmch_m, \
13043                                      pipe_config->name.gmch_n, \
13044                                      pipe_config->name.link_m, \
13045                                      pipe_config->name.link_n); \
13046                 ret = false; \
13047         } \
13048 } while (0)
13049
13050 /* This is required for BDW+ where there is only one set of registers for
13051  * switching between high and low RR.
13052  * This macro can be used whenever a comparison has to be made between one
13053  * hw state and multiple sw state variables.
13054  */
13055 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13056         if (!intel_compare_link_m_n(&current_config->name, \
13057                                     &pipe_config->name, !fastset) && \
13058             !intel_compare_link_m_n(&current_config->alt_name, \
13059                                     &pipe_config->name, !fastset)) { \
13060                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13061                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13062                                      "or tu %i gmch %i/%i link %i/%i, " \
13063                                      "found tu %i, gmch %i/%i link %i/%i)", \
13064                                      current_config->name.tu, \
13065                                      current_config->name.gmch_m, \
13066                                      current_config->name.gmch_n, \
13067                                      current_config->name.link_m, \
13068                                      current_config->name.link_n, \
13069                                      current_config->alt_name.tu, \
13070                                      current_config->alt_name.gmch_m, \
13071                                      current_config->alt_name.gmch_n, \
13072                                      current_config->alt_name.link_m, \
13073                                      current_config->alt_name.link_n, \
13074                                      pipe_config->name.tu, \
13075                                      pipe_config->name.gmch_m, \
13076                                      pipe_config->name.gmch_n, \
13077                                      pipe_config->name.link_m, \
13078                                      pipe_config->name.link_n); \
13079                 ret = false; \
13080         } \
13081 } while (0)
13082
13083 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13084         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13085                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13086                                      "(%x) (expected %i, found %i)", \
13087                                      (mask), \
13088                                      current_config->name & (mask), \
13089                                      pipe_config->name & (mask)); \
13090                 ret = false; \
13091         } \
13092 } while (0)
13093
13094 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13095         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13096                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13097                                      "(expected %i, found %i)", \
13098                                      current_config->name, \
13099                                      pipe_config->name); \
13100                 ret = false; \
13101         } \
13102 } while (0)
13103
13104 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13105         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13106                                      &pipe_config->infoframes.name)) { \
13107                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13108                                                &current_config->infoframes.name, \
13109                                                &pipe_config->infoframes.name); \
13110                 ret = false; \
13111         } \
13112 } while (0)
13113
13114 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
13115         if (!current_config->has_psr && !pipe_config->has_psr && \
13116             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
13117                                       &pipe_config->infoframes.name)) { \
13118                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
13119                                                 &current_config->infoframes.name, \
13120                                                 &pipe_config->infoframes.name); \
13121                 ret = false; \
13122         } \
13123 } while (0)
13124
13125 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13126         if (current_config->name1 != pipe_config->name1) { \
13127                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13128                                 "(expected %i, found %i, won't compare lut values)", \
13129                                 current_config->name1, \
13130                                 pipe_config->name1); \
13131                 ret = false;\
13132         } else { \
13133                 if (!intel_color_lut_equal(current_config->name2, \
13134                                         pipe_config->name2, pipe_config->name1, \
13135                                         bit_precision)) { \
13136                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13137                                         "hw_state doesn't match sw_state"); \
13138                         ret = false; \
13139                 } \
13140         } \
13141 } while (0)
13142
13143 #define PIPE_CONF_QUIRK(quirk) \
13144         ((current_config->quirks | pipe_config->quirks) & (quirk))
13145
13146         PIPE_CONF_CHECK_I(cpu_transcoder);
13147
13148         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13149         PIPE_CONF_CHECK_I(fdi_lanes);
13150         PIPE_CONF_CHECK_M_N(fdi_m_n);
13151
13152         PIPE_CONF_CHECK_I(lane_count);
13153         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13154
13155         if (INTEL_GEN(dev_priv) < 8) {
13156                 PIPE_CONF_CHECK_M_N(dp_m_n);
13157
13158                 if (current_config->has_drrs)
13159                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13160         } else
13161                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13162
13163         PIPE_CONF_CHECK_X(output_types);
13164
13165         /* FIXME do the readout properly and get rid of this quirk */
13166         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
13167                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
13168                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
13169                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
13170                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
13171                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
13172                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
13173
13174                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
13175                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
13176                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
13177                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
13178                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
13179                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
13180
13181                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13182                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13183                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13184                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13185                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13186                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13187
13188                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13189                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13190                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13191                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13192                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13193                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13194
13195                 PIPE_CONF_CHECK_I(pixel_multiplier);
13196
13197                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13198                                       DRM_MODE_FLAG_INTERLACE);
13199
13200                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13201                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13202                                               DRM_MODE_FLAG_PHSYNC);
13203                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13204                                               DRM_MODE_FLAG_NHSYNC);
13205                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13206                                               DRM_MODE_FLAG_PVSYNC);
13207                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13208                                               DRM_MODE_FLAG_NVSYNC);
13209                 }
13210         }
13211
13212         PIPE_CONF_CHECK_I(output_format);
13213         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13214         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13215             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13216                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13217
13218         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13219         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13220         PIPE_CONF_CHECK_BOOL(has_infoframe);
13221         /* FIXME do the readout properly and get rid of this quirk */
13222         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
13223                 PIPE_CONF_CHECK_BOOL(fec_enable);
13224
13225         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13226
13227         PIPE_CONF_CHECK_X(gmch_pfit.control);
13228         /* pfit ratios are autocomputed by the hw on gen4+ */
13229         if (INTEL_GEN(dev_priv) < 4)
13230                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13231         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13232
13233         /*
13234          * Changing the EDP transcoder input mux
13235          * (A_ONOFF vs. A_ON) requires a full modeset.
13236          */
13237         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13238
13239         if (!fastset) {
13240                 PIPE_CONF_CHECK_I(pipe_src_w);
13241                 PIPE_CONF_CHECK_I(pipe_src_h);
13242
13243                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13244                 if (current_config->pch_pfit.enabled) {
13245                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
13246                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
13247                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
13248                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
13249                 }
13250
13251                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13252                 /* FIXME do the readout properly and get rid of this quirk */
13253                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
13254                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13255
13256                 PIPE_CONF_CHECK_X(gamma_mode);
13257                 if (IS_CHERRYVIEW(dev_priv))
13258                         PIPE_CONF_CHECK_X(cgm_mode);
13259                 else
13260                         PIPE_CONF_CHECK_X(csc_mode);
13261                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13262                 PIPE_CONF_CHECK_BOOL(csc_enable);
13263
13264                 PIPE_CONF_CHECK_I(linetime);
13265                 PIPE_CONF_CHECK_I(ips_linetime);
13266
13267                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13268                 if (bp_gamma)
13269                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13270         }
13271
13272         PIPE_CONF_CHECK_BOOL(double_wide);
13273
13274         PIPE_CONF_CHECK_P(shared_dpll);
13275
13276         /* FIXME do the readout properly and get rid of this quirk */
13277         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
13278                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13279                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13280                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13281                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13282                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13283                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13284                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13285                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13286                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13287                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13288                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13289                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13290                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13291                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13292                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13293                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13294                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13295                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13296                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13297                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13298                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13299                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13300                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13301                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13302                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13303                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13304                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13305                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13306                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13307                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13308                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13309
13310                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13311                 PIPE_CONF_CHECK_X(dsi_pll.div);
13312
13313                 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13314                         PIPE_CONF_CHECK_I(pipe_bpp);
13315
13316                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
13317                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13318                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13319
13320                 PIPE_CONF_CHECK_I(min_voltage_level);
13321         }
13322
13323         PIPE_CONF_CHECK_X(infoframes.enable);
13324         PIPE_CONF_CHECK_X(infoframes.gcp);
13325         PIPE_CONF_CHECK_INFOFRAME(avi);
13326         PIPE_CONF_CHECK_INFOFRAME(spd);
13327         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13328         PIPE_CONF_CHECK_INFOFRAME(drm);
13329         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
13330
13331         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13332         PIPE_CONF_CHECK_I(master_transcoder);
13333         PIPE_CONF_CHECK_BOOL(bigjoiner);
13334         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
13335         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
13336
13337         PIPE_CONF_CHECK_I(dsc.compression_enable);
13338         PIPE_CONF_CHECK_I(dsc.dsc_split);
13339         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13340
13341         PIPE_CONF_CHECK_I(mst_master_transcoder);
13342
13343 #undef PIPE_CONF_CHECK_X
13344 #undef PIPE_CONF_CHECK_I
13345 #undef PIPE_CONF_CHECK_BOOL
13346 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13347 #undef PIPE_CONF_CHECK_P
13348 #undef PIPE_CONF_CHECK_FLAGS
13349 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13350 #undef PIPE_CONF_CHECK_COLOR_LUT
13351 #undef PIPE_CONF_QUIRK
13352
13353         return ret;
13354 }
13355
13356 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13357                                            const struct intel_crtc_state *pipe_config)
13358 {
13359         if (pipe_config->has_pch_encoder) {
13360                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13361                                                             &pipe_config->fdi_m_n);
13362                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13363
13364                 /*
13365                  * FDI already provided one idea for the dotclock.
13366                  * Yell if the encoder disagrees.
13367                  */
13368                 drm_WARN(&dev_priv->drm,
13369                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13370                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13371                          fdi_dotclock, dotclock);
13372         }
13373 }
13374
13375 static void verify_wm_state(struct intel_crtc *crtc,
13376                             struct intel_crtc_state *new_crtc_state)
13377 {
13378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13379         struct skl_hw_state {
13380                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13381                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13382                 struct skl_pipe_wm wm;
13383         } *hw;
13384         struct skl_pipe_wm *sw_wm;
13385         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13386         u8 hw_enabled_slices;
13387         const enum pipe pipe = crtc->pipe;
13388         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13389
13390         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13391                 return;
13392
13393         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13394         if (!hw)
13395                 return;
13396
13397         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13398         sw_wm = &new_crtc_state->wm.skl.optimal;
13399
13400         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13401
13402         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
13403
13404         if (INTEL_GEN(dev_priv) >= 11 &&
13405             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
13406                 drm_err(&dev_priv->drm,
13407                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
13408                         dev_priv->dbuf.enabled_slices,
13409                         hw_enabled_slices);
13410
13411         /* planes */
13412         for_each_universal_plane(dev_priv, pipe, plane) {
13413                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13414
13415                 hw_plane_wm = &hw->wm.planes[plane];
13416                 sw_plane_wm = &sw_wm->planes[plane];
13417
13418                 /* Watermarks */
13419                 for (level = 0; level <= max_level; level++) {
13420                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13421                                                 &sw_plane_wm->wm[level]) ||
13422                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
13423                                                                &sw_plane_wm->sagv_wm0)))
13424                                 continue;
13425
13426                         drm_err(&dev_priv->drm,
13427                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13428                                 pipe_name(pipe), plane + 1, level,
13429                                 sw_plane_wm->wm[level].plane_en,
13430                                 sw_plane_wm->wm[level].plane_res_b,
13431                                 sw_plane_wm->wm[level].plane_res_l,
13432                                 hw_plane_wm->wm[level].plane_en,
13433                                 hw_plane_wm->wm[level].plane_res_b,
13434                                 hw_plane_wm->wm[level].plane_res_l);
13435                 }
13436
13437                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13438                                          &sw_plane_wm->trans_wm)) {
13439                         drm_err(&dev_priv->drm,
13440                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13441                                 pipe_name(pipe), plane + 1,
13442                                 sw_plane_wm->trans_wm.plane_en,
13443                                 sw_plane_wm->trans_wm.plane_res_b,
13444                                 sw_plane_wm->trans_wm.plane_res_l,
13445                                 hw_plane_wm->trans_wm.plane_en,
13446                                 hw_plane_wm->trans_wm.plane_res_b,
13447                                 hw_plane_wm->trans_wm.plane_res_l);
13448                 }
13449
13450                 /* DDB */
13451                 hw_ddb_entry = &hw->ddb_y[plane];
13452                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13453
13454                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13455                         drm_err(&dev_priv->drm,
13456                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13457                                 pipe_name(pipe), plane + 1,
13458                                 sw_ddb_entry->start, sw_ddb_entry->end,
13459                                 hw_ddb_entry->start, hw_ddb_entry->end);
13460                 }
13461         }
13462
13463         /*
13464          * cursor
13465          * If the cursor plane isn't active, we may not have updated it's ddb
13466          * allocation. In that case since the ddb allocation will be updated
13467          * once the plane becomes visible, we can skip this check
13468          */
13469         if (1) {
13470                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13471
13472                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13473                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13474
13475                 /* Watermarks */
13476                 for (level = 0; level <= max_level; level++) {
13477                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13478                                                 &sw_plane_wm->wm[level]) ||
13479                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
13480                                                                &sw_plane_wm->sagv_wm0)))
13481                                 continue;
13482
13483                         drm_err(&dev_priv->drm,
13484                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13485                                 pipe_name(pipe), level,
13486                                 sw_plane_wm->wm[level].plane_en,
13487                                 sw_plane_wm->wm[level].plane_res_b,
13488                                 sw_plane_wm->wm[level].plane_res_l,
13489                                 hw_plane_wm->wm[level].plane_en,
13490                                 hw_plane_wm->wm[level].plane_res_b,
13491                                 hw_plane_wm->wm[level].plane_res_l);
13492                 }
13493
13494                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13495                                          &sw_plane_wm->trans_wm)) {
13496                         drm_err(&dev_priv->drm,
13497                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13498                                 pipe_name(pipe),
13499                                 sw_plane_wm->trans_wm.plane_en,
13500                                 sw_plane_wm->trans_wm.plane_res_b,
13501                                 sw_plane_wm->trans_wm.plane_res_l,
13502                                 hw_plane_wm->trans_wm.plane_en,
13503                                 hw_plane_wm->trans_wm.plane_res_b,
13504                                 hw_plane_wm->trans_wm.plane_res_l);
13505                 }
13506
13507                 /* DDB */
13508                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13509                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13510
13511                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13512                         drm_err(&dev_priv->drm,
13513                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13514                                 pipe_name(pipe),
13515                                 sw_ddb_entry->start, sw_ddb_entry->end,
13516                                 hw_ddb_entry->start, hw_ddb_entry->end);
13517                 }
13518         }
13519
13520         kfree(hw);
13521 }
13522
13523 static void
13524 verify_connector_state(struct intel_atomic_state *state,
13525                        struct intel_crtc *crtc)
13526 {
13527         struct drm_connector *connector;
13528         struct drm_connector_state *new_conn_state;
13529         int i;
13530
13531         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13532                 struct drm_encoder *encoder = connector->encoder;
13533                 struct intel_crtc_state *crtc_state = NULL;
13534
13535                 if (new_conn_state->crtc != &crtc->base)
13536                         continue;
13537
13538                 if (crtc)
13539                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13540
13541                 intel_connector_verify_state(crtc_state, new_conn_state);
13542
13543                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13544                      "connector's atomic encoder doesn't match legacy encoder\n");
13545         }
13546 }
13547
13548 static void
13549 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13550 {
13551         struct intel_encoder *encoder;
13552         struct drm_connector *connector;
13553         struct drm_connector_state *old_conn_state, *new_conn_state;
13554         int i;
13555
13556         for_each_intel_encoder(&dev_priv->drm, encoder) {
13557                 bool enabled = false, found = false;
13558                 enum pipe pipe;
13559
13560                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
13561                             encoder->base.base.id,
13562                             encoder->base.name);
13563
13564                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13565                                                    new_conn_state, i) {
13566                         if (old_conn_state->best_encoder == &encoder->base)
13567                                 found = true;
13568
13569                         if (new_conn_state->best_encoder != &encoder->base)
13570                                 continue;
13571                         found = enabled = true;
13572
13573                         I915_STATE_WARN(new_conn_state->crtc !=
13574                                         encoder->base.crtc,
13575                              "connector's crtc doesn't match encoder crtc\n");
13576                 }
13577
13578                 if (!found)
13579                         continue;
13580
13581                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13582                      "encoder's enabled state mismatch "
13583                      "(expected %i, found %i)\n",
13584                      !!encoder->base.crtc, enabled);
13585
13586                 if (!encoder->base.crtc) {
13587                         bool active;
13588
13589                         active = encoder->get_hw_state(encoder, &pipe);
13590                         I915_STATE_WARN(active,
13591                              "encoder detached but still enabled on pipe %c.\n",
13592                              pipe_name(pipe));
13593                 }
13594         }
13595 }
13596
13597 static void
13598 verify_crtc_state(struct intel_crtc *crtc,
13599                   struct intel_crtc_state *old_crtc_state,
13600                   struct intel_crtc_state *new_crtc_state)
13601 {
13602         struct drm_device *dev = crtc->base.dev;
13603         struct drm_i915_private *dev_priv = to_i915(dev);
13604         struct intel_encoder *encoder;
13605         struct intel_crtc_state *pipe_config = old_crtc_state;
13606         struct drm_atomic_state *state = old_crtc_state->uapi.state;
13607         struct intel_crtc *master = crtc;
13608
13609         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13610         intel_crtc_free_hw_state(old_crtc_state);
13611         intel_crtc_state_reset(old_crtc_state, crtc);
13612         old_crtc_state->uapi.state = state;
13613
13614         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
13615                     crtc->base.name);
13616
13617         pipe_config->hw.enable = new_crtc_state->hw.enable;
13618
13619         intel_crtc_get_pipe_config(pipe_config);
13620
13621         /* we keep both pipes enabled on 830 */
13622         if (IS_I830(dev_priv) && pipe_config->hw.active)
13623                 pipe_config->hw.active = new_crtc_state->hw.active;
13624
13625         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
13626                         "crtc active state doesn't match with hw state "
13627                         "(expected %i, found %i)\n",
13628                         new_crtc_state->hw.active, pipe_config->hw.active);
13629
13630         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13631                         "transitional active state does not match atomic hw state "
13632                         "(expected %i, found %i)\n",
13633                         new_crtc_state->hw.active, crtc->active);
13634
13635         if (new_crtc_state->bigjoiner_slave)
13636                 master = new_crtc_state->bigjoiner_linked_crtc;
13637
13638         for_each_encoder_on_crtc(dev, &master->base, encoder) {
13639                 enum pipe pipe;
13640                 bool active;
13641
13642                 active = encoder->get_hw_state(encoder, &pipe);
13643                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13644                                 "[ENCODER:%i] active %i with crtc active %i\n",
13645                                 encoder->base.base.id, active,
13646                                 new_crtc_state->hw.active);
13647
13648                 I915_STATE_WARN(active && master->pipe != pipe,
13649                                 "Encoder connected to wrong pipe %c\n",
13650                                 pipe_name(pipe));
13651
13652                 if (active)
13653                         intel_encoder_get_config(encoder, pipe_config);
13654         }
13655
13656         if (!new_crtc_state->hw.active)
13657                 return;
13658
13659         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13660
13661         if (!intel_pipe_config_compare(new_crtc_state,
13662                                        pipe_config, false)) {
13663                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13664                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13665                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13666         }
13667 }
13668
13669 static void
13670 intel_verify_planes(struct intel_atomic_state *state)
13671 {
13672         struct intel_plane *plane;
13673         const struct intel_plane_state *plane_state;
13674         int i;
13675
13676         for_each_new_intel_plane_in_state(state, plane,
13677                                           plane_state, i)
13678                 assert_plane(plane, plane_state->planar_slave ||
13679                              plane_state->uapi.visible);
13680 }
13681
13682 static void
13683 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13684                          struct intel_shared_dpll *pll,
13685                          struct intel_crtc *crtc,
13686                          struct intel_crtc_state *new_crtc_state)
13687 {
13688         struct intel_dpll_hw_state dpll_hw_state;
13689         unsigned int crtc_mask;
13690         bool active;
13691
13692         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13693
13694         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
13695
13696         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
13697
13698         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13699                 I915_STATE_WARN(!pll->on && pll->active_mask,
13700                      "pll in active use but not on in sw tracking\n");
13701                 I915_STATE_WARN(pll->on && !pll->active_mask,
13702                      "pll is on but not used by any active crtc\n");
13703                 I915_STATE_WARN(pll->on != active,
13704                      "pll on state mismatch (expected %i, found %i)\n",
13705                      pll->on, active);
13706         }
13707
13708         if (!crtc) {
13709                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13710                                 "more active pll users than references: %x vs %x\n",
13711                                 pll->active_mask, pll->state.crtc_mask);
13712
13713                 return;
13714         }
13715
13716         crtc_mask = drm_crtc_mask(&crtc->base);
13717
13718         if (new_crtc_state->hw.active)
13719                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13720                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13721                                 pipe_name(crtc->pipe), pll->active_mask);
13722         else
13723                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13724                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13725                                 pipe_name(crtc->pipe), pll->active_mask);
13726
13727         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13728                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13729                         crtc_mask, pll->state.crtc_mask);
13730
13731         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13732                                           &dpll_hw_state,
13733                                           sizeof(dpll_hw_state)),
13734                         "pll hw state mismatch\n");
13735 }
13736
13737 static void
13738 verify_shared_dpll_state(struct intel_crtc *crtc,
13739                          struct intel_crtc_state *old_crtc_state,
13740                          struct intel_crtc_state *new_crtc_state)
13741 {
13742         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13743
13744         if (new_crtc_state->shared_dpll)
13745                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13746
13747         if (old_crtc_state->shared_dpll &&
13748             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13749                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13750                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13751
13752                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13753                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13754                                 pipe_name(crtc->pipe));
13755                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13756                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13757                                 pipe_name(crtc->pipe));
13758         }
13759 }
13760
13761 static void
13762 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13763                           struct intel_atomic_state *state,
13764                           struct intel_crtc_state *old_crtc_state,
13765                           struct intel_crtc_state *new_crtc_state)
13766 {
13767         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13768                 return;
13769
13770         verify_wm_state(crtc, new_crtc_state);
13771         verify_connector_state(state, crtc);
13772         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13773         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13774 }
13775
13776 static void
13777 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13778 {
13779         int i;
13780
13781         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
13782                 verify_single_dpll_state(dev_priv,
13783                                          &dev_priv->dpll.shared_dplls[i],
13784                                          NULL, NULL);
13785 }
13786
13787 static void
13788 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13789                               struct intel_atomic_state *state)
13790 {
13791         verify_encoder_state(dev_priv, state);
13792         verify_connector_state(state, NULL);
13793         verify_disabled_dpll_state(dev_priv);
13794 }
13795
13796 static void
13797 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13798 {
13799         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13800         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13801         const struct drm_display_mode *adjusted_mode =
13802                 &crtc_state->hw.adjusted_mode;
13803
13804         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13805
13806         crtc->mode_flags = crtc_state->mode_flags;
13807
13808         /*
13809          * The scanline counter increments at the leading edge of hsync.
13810          *
13811          * On most platforms it starts counting from vtotal-1 on the
13812          * first active line. That means the scanline counter value is
13813          * always one less than what we would expect. Ie. just after
13814          * start of vblank, which also occurs at start of hsync (on the
13815          * last active line), the scanline counter will read vblank_start-1.
13816          *
13817          * On gen2 the scanline counter starts counting from 1 instead
13818          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13819          * to keep the value positive), instead of adding one.
13820          *
13821          * On HSW+ the behaviour of the scanline counter depends on the output
13822          * type. For DP ports it behaves like most other platforms, but on HDMI
13823          * there's an extra 1 line difference. So we need to add two instead of
13824          * one to the value.
13825          *
13826          * On VLV/CHV DSI the scanline counter would appear to increment
13827          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13828          * that means we can't tell whether we're in vblank or not while
13829          * we're on that particular line. We must still set scanline_offset
13830          * to 1 so that the vblank timestamps come out correct when we query
13831          * the scanline counter from within the vblank interrupt handler.
13832          * However if queried just before the start of vblank we'll get an
13833          * answer that's slightly in the future.
13834          */
13835         if (IS_GEN(dev_priv, 2)) {
13836                 int vtotal;
13837
13838                 vtotal = adjusted_mode->crtc_vtotal;
13839                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13840                         vtotal /= 2;
13841
13842                 crtc->scanline_offset = vtotal - 1;
13843         } else if (HAS_DDI(dev_priv) &&
13844                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13845                 crtc->scanline_offset = 2;
13846         } else {
13847                 crtc->scanline_offset = 1;
13848         }
13849 }
13850
13851 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13852 {
13853         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13854         struct intel_crtc_state *new_crtc_state;
13855         struct intel_crtc *crtc;
13856         int i;
13857
13858         if (!dev_priv->display.crtc_compute_clock)
13859                 return;
13860
13861         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13862                 if (!intel_crtc_needs_modeset(new_crtc_state))
13863                         continue;
13864
13865                 intel_release_shared_dplls(state, crtc);
13866         }
13867 }
13868
13869 /*
13870  * This implements the workaround described in the "notes" section of the mode
13871  * set sequence documentation. When going from no pipes or single pipe to
13872  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13873  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13874  */
13875 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
13876 {
13877         struct intel_crtc_state *crtc_state;
13878         struct intel_crtc *crtc;
13879         struct intel_crtc_state *first_crtc_state = NULL;
13880         struct intel_crtc_state *other_crtc_state = NULL;
13881         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13882         int i;
13883
13884         /* look at all crtc's that are going to be enabled in during modeset */
13885         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13886                 if (!crtc_state->hw.active ||
13887                     !intel_crtc_needs_modeset(crtc_state))
13888                         continue;
13889
13890                 if (first_crtc_state) {
13891                         other_crtc_state = crtc_state;
13892                         break;
13893                 } else {
13894                         first_crtc_state = crtc_state;
13895                         first_pipe = crtc->pipe;
13896                 }
13897         }
13898
13899         /* No workaround needed? */
13900         if (!first_crtc_state)
13901                 return 0;
13902
13903         /* w/a possibly needed, check how many crtc's are already enabled. */
13904         for_each_intel_crtc(state->base.dev, crtc) {
13905                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13906                 if (IS_ERR(crtc_state))
13907                         return PTR_ERR(crtc_state);
13908
13909                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13910
13911                 if (!crtc_state->hw.active ||
13912                     intel_crtc_needs_modeset(crtc_state))
13913                         continue;
13914
13915                 /* 2 or more enabled crtcs means no need for w/a */
13916                 if (enabled_pipe != INVALID_PIPE)
13917                         return 0;
13918
13919                 enabled_pipe = crtc->pipe;
13920         }
13921
13922         if (enabled_pipe != INVALID_PIPE)
13923                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13924         else if (other_crtc_state)
13925                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13926
13927         return 0;
13928 }
13929
13930 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
13931                            u8 active_pipes)
13932 {
13933         const struct intel_crtc_state *crtc_state;
13934         struct intel_crtc *crtc;
13935         int i;
13936
13937         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13938                 if (crtc_state->hw.active)
13939                         active_pipes |= BIT(crtc->pipe);
13940                 else
13941                         active_pipes &= ~BIT(crtc->pipe);
13942         }
13943
13944         return active_pipes;
13945 }
13946
13947 static int intel_modeset_checks(struct intel_atomic_state *state)
13948 {
13949         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13950
13951         state->modeset = true;
13952
13953         if (IS_HASWELL(dev_priv))
13954                 return hsw_mode_set_planes_workaround(state);
13955
13956         return 0;
13957 }
13958
13959 /*
13960  * Handle calculation of various watermark data at the end of the atomic check
13961  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13962  * handlers to ensure that all derived state has been updated.
13963  */
13964 static int calc_watermark_data(struct intel_atomic_state *state)
13965 {
13966         struct drm_device *dev = state->base.dev;
13967         struct drm_i915_private *dev_priv = to_i915(dev);
13968
13969         /* Is there platform-specific watermark information to calculate? */
13970         if (dev_priv->display.compute_global_watermarks)
13971                 return dev_priv->display.compute_global_watermarks(state);
13972
13973         return 0;
13974 }
13975
13976 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13977                                      struct intel_crtc_state *new_crtc_state)
13978 {
13979         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13980                 return;
13981
13982         new_crtc_state->uapi.mode_changed = false;
13983         new_crtc_state->update_pipe = true;
13984 }
13985
13986 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
13987                                     struct intel_crtc_state *new_crtc_state)
13988 {
13989         /*
13990          * If we're not doing the full modeset we want to
13991          * keep the current M/N values as they may be
13992          * sufficiently different to the computed values
13993          * to cause problems.
13994          *
13995          * FIXME: should really copy more fuzzy state here
13996          */
13997         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13998         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13999         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14000         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14001 }
14002
14003 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14004                                           struct intel_crtc *crtc,
14005                                           u8 plane_ids_mask)
14006 {
14007         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14008         struct intel_plane *plane;
14009
14010         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14011                 struct intel_plane_state *plane_state;
14012
14013                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14014                         continue;
14015
14016                 plane_state = intel_atomic_get_plane_state(state, plane);
14017                 if (IS_ERR(plane_state))
14018                         return PTR_ERR(plane_state);
14019         }
14020
14021         return 0;
14022 }
14023
14024 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
14025                                      struct intel_crtc *crtc)
14026 {
14027         const struct intel_crtc_state *old_crtc_state =
14028                 intel_atomic_get_old_crtc_state(state, crtc);
14029         const struct intel_crtc_state *new_crtc_state =
14030                 intel_atomic_get_new_crtc_state(state, crtc);
14031
14032         return intel_crtc_add_planes_to_state(state, crtc,
14033                                               old_crtc_state->enabled_planes |
14034                                               new_crtc_state->enabled_planes);
14035 }
14036
14037 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14038 {
14039         /* See {hsw,vlv,ivb}_plane_ratio() */
14040         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14041                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14042                 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
14043 }
14044
14045 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
14046                                            struct intel_crtc *crtc,
14047                                            struct intel_crtc *other)
14048 {
14049         const struct intel_plane_state *plane_state;
14050         struct intel_plane *plane;
14051         u8 plane_ids = 0;
14052         int i;
14053
14054         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14055                 if (plane->pipe == crtc->pipe)
14056                         plane_ids |= BIT(plane->id);
14057         }
14058
14059         return intel_crtc_add_planes_to_state(state, other, plane_ids);
14060 }
14061
14062 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
14063 {
14064         const struct intel_crtc_state *crtc_state;
14065         struct intel_crtc *crtc;
14066         int i;
14067
14068         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14069                 int ret;
14070
14071                 if (!crtc_state->bigjoiner)
14072                         continue;
14073
14074                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
14075                                                       crtc_state->bigjoiner_linked_crtc);
14076                 if (ret)
14077                         return ret;
14078         }
14079
14080         return 0;
14081 }
14082
14083 static int intel_atomic_check_planes(struct intel_atomic_state *state)
14084 {
14085         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14086         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14087         struct intel_plane_state *plane_state;
14088         struct intel_plane *plane;
14089         struct intel_crtc *crtc;
14090         int i, ret;
14091
14092         ret = icl_add_linked_planes(state);
14093         if (ret)
14094                 return ret;
14095
14096         ret = intel_bigjoiner_add_affected_planes(state);
14097         if (ret)
14098                 return ret;
14099
14100         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14101                 ret = intel_plane_atomic_check(state, plane);
14102                 if (ret) {
14103                         drm_dbg_atomic(&dev_priv->drm,
14104                                        "[PLANE:%d:%s] atomic driver check failed\n",
14105                                        plane->base.base.id, plane->base.name);
14106                         return ret;
14107                 }
14108         }
14109
14110         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14111                                             new_crtc_state, i) {
14112                 u8 old_active_planes, new_active_planes;
14113
14114                 ret = icl_check_nv12_planes(new_crtc_state);
14115                 if (ret)
14116                         return ret;
14117
14118                 /*
14119                  * On some platforms the number of active planes affects
14120                  * the planes' minimum cdclk calculation. Add such planes
14121                  * to the state before we compute the minimum cdclk.
14122                  */
14123                 if (!active_planes_affects_min_cdclk(dev_priv))
14124                         continue;
14125
14126                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14127                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14128
14129                 /*
14130                  * Not only the number of planes, but if the plane configuration had
14131                  * changed might already mean we need to recompute min CDCLK,
14132                  * because different planes might consume different amount of Dbuf bandwidth
14133                  * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
14134                  */
14135                 if (old_active_planes == new_active_planes)
14136                         continue;
14137
14138                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14139                 if (ret)
14140                         return ret;
14141         }
14142
14143         return 0;
14144 }
14145
14146 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
14147                                     bool *need_cdclk_calc)
14148 {
14149         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14150         const struct intel_cdclk_state *old_cdclk_state;
14151         const struct intel_cdclk_state *new_cdclk_state;
14152         struct intel_plane_state *plane_state;
14153         struct intel_bw_state *new_bw_state;
14154         struct intel_plane *plane;
14155         int min_cdclk = 0;
14156         enum pipe pipe;
14157         int ret;
14158         int i;
14159         /*
14160          * active_planes bitmask has been updated, and potentially
14161          * affected planes are part of the state. We can now
14162          * compute the minimum cdclk for each plane.
14163          */
14164         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14165                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14166                 if (ret)
14167                         return ret;
14168         }
14169
14170         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
14171         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14172
14173         if (new_cdclk_state &&
14174             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
14175                 *need_cdclk_calc = true;
14176
14177         ret = dev_priv->display.bw_calc_min_cdclk(state);
14178         if (ret)
14179                 return ret;
14180
14181         new_bw_state = intel_atomic_get_new_bw_state(state);
14182
14183         if (!new_cdclk_state || !new_bw_state)
14184                 return 0;
14185
14186         for_each_pipe(dev_priv, pipe) {
14187                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
14188
14189                 /*
14190                  * Currently do this change only if we need to increase
14191                  */
14192                 if (new_bw_state->min_cdclk > min_cdclk)
14193                         *need_cdclk_calc = true;
14194         }
14195
14196         return 0;
14197 }
14198
14199 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14200 {
14201         struct intel_crtc_state *crtc_state;
14202         struct intel_crtc *crtc;
14203         int i;
14204
14205         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14206                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14207                 int ret;
14208
14209                 ret = intel_crtc_atomic_check(state, crtc);
14210                 if (ret) {
14211                         drm_dbg_atomic(&i915->drm,
14212                                        "[CRTC:%d:%s] atomic driver check failed\n",
14213                                        crtc->base.base.id, crtc->base.name);
14214                         return ret;
14215                 }
14216         }
14217
14218         return 0;
14219 }
14220
14221 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14222                                                u8 transcoders)
14223 {
14224         const struct intel_crtc_state *new_crtc_state;
14225         struct intel_crtc *crtc;
14226         int i;
14227
14228         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14229                 if (new_crtc_state->hw.enable &&
14230                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14231                     intel_crtc_needs_modeset(new_crtc_state))
14232                         return true;
14233         }
14234
14235         return false;
14236 }
14237
14238 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
14239                                         struct intel_crtc *crtc,
14240                                         struct intel_crtc_state *old_crtc_state,
14241                                         struct intel_crtc_state *new_crtc_state)
14242 {
14243         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14244         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
14245         struct intel_crtc *slave, *master;
14246
14247         /* slave being enabled, is master is still claiming this crtc? */
14248         if (old_crtc_state->bigjoiner_slave) {
14249                 slave = crtc;
14250                 master = old_crtc_state->bigjoiner_linked_crtc;
14251                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
14252                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
14253                         goto claimed;
14254         }
14255
14256         if (!new_crtc_state->bigjoiner)
14257                 return 0;
14258
14259         if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
14260                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
14261                               "CRTC + 1 to be used, doesn't exist\n",
14262                               crtc->base.base.id, crtc->base.name);
14263                 return -EINVAL;
14264         }
14265
14266         slave = new_crtc_state->bigjoiner_linked_crtc =
14267                 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
14268         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
14269         master = crtc;
14270         if (IS_ERR(slave_crtc_state))
14271                 return PTR_ERR(slave_crtc_state);
14272
14273         /* master being enabled, slave was already configured? */
14274         if (slave_crtc_state->uapi.enable)
14275                 goto claimed;
14276
14277         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
14278                       slave->base.base.id, slave->base.name);
14279
14280         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
14281
14282 claimed:
14283         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
14284                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
14285                       slave->base.base.id, slave->base.name,
14286                       master->base.base.id, master->base.name);
14287         return -EINVAL;
14288 }
14289
14290 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
14291                                  struct intel_crtc_state *master_crtc_state)
14292 {
14293         struct intel_crtc_state *slave_crtc_state =
14294                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
14295
14296         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
14297         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
14298         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
14299         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
14300 }
14301
14302 /**
14303  * DOC: asynchronous flip implementation
14304  *
14305  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
14306  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
14307  * Correspondingly, support is currently added for primary plane only.
14308  *
14309  * Async flip can only change the plane surface address, so anything else
14310  * changing is rejected from the intel_atomic_check_async() function.
14311  * Once this check is cleared, flip done interrupt is enabled using
14312  * the skl_enable_flip_done() function.
14313  *
14314  * As soon as the surface address register is written, flip done interrupt is
14315  * generated and the requested events are sent to the usersapce in the interrupt
14316  * handler itself. The timestamp and sequence sent during the flip done event
14317  * correspond to the last vblank and have no relation to the actual time when
14318  * the flip done event was sent.
14319  */
14320 static int intel_atomic_check_async(struct intel_atomic_state *state)
14321 {
14322         struct drm_i915_private *i915 = to_i915(state->base.dev);
14323         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14324         const struct intel_plane_state *new_plane_state, *old_plane_state;
14325         struct intel_crtc *crtc;
14326         struct intel_plane *plane;
14327         int i;
14328
14329         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14330                                             new_crtc_state, i) {
14331                 if (intel_crtc_needs_modeset(new_crtc_state)) {
14332                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
14333                         return -EINVAL;
14334                 }
14335
14336                 if (!new_crtc_state->hw.active) {
14337                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
14338                         return -EINVAL;
14339                 }
14340                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
14341                         drm_dbg_kms(&i915->drm,
14342                                     "Active planes cannot be changed during async flip\n");
14343                         return -EINVAL;
14344                 }
14345         }
14346
14347         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14348                                              new_plane_state, i) {
14349                 /*
14350                  * TODO: Async flip is only supported through the page flip IOCTL
14351                  * as of now. So support currently added for primary plane only.
14352                  * Support for other planes on platforms on which supports
14353                  * this(vlv/chv and icl+) should be added when async flip is
14354                  * enabled in the atomic IOCTL path.
14355                  */
14356                 if (plane->id != PLANE_PRIMARY)
14357                         return -EINVAL;
14358
14359                 /*
14360                  * FIXME: This check is kept generic for all platforms.
14361                  * Need to verify this for all gen9 and gen10 platforms to enable
14362                  * this selectively if required.
14363                  */
14364                 switch (new_plane_state->hw.fb->modifier) {
14365                 case I915_FORMAT_MOD_X_TILED:
14366                 case I915_FORMAT_MOD_Y_TILED:
14367                 case I915_FORMAT_MOD_Yf_TILED:
14368                         break;
14369                 default:
14370                         drm_dbg_kms(&i915->drm,
14371                                     "Linear memory/CCS does not support async flips\n");
14372                         return -EINVAL;
14373                 }
14374
14375                 if (old_plane_state->color_plane[0].stride !=
14376                     new_plane_state->color_plane[0].stride) {
14377                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
14378                         return -EINVAL;
14379                 }
14380
14381                 if (old_plane_state->hw.fb->modifier !=
14382                     new_plane_state->hw.fb->modifier) {
14383                         drm_dbg_kms(&i915->drm,
14384                                     "Framebuffer modifiers cannot be changed in async flip\n");
14385                         return -EINVAL;
14386                 }
14387
14388                 if (old_plane_state->hw.fb->format !=
14389                     new_plane_state->hw.fb->format) {
14390                         drm_dbg_kms(&i915->drm,
14391                                     "Framebuffer format cannot be changed in async flip\n");
14392                         return -EINVAL;
14393                 }
14394
14395                 if (old_plane_state->hw.rotation !=
14396                     new_plane_state->hw.rotation) {
14397                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
14398                         return -EINVAL;
14399                 }
14400
14401                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
14402                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
14403                         drm_dbg_kms(&i915->drm,
14404                                     "Plane size/co-ordinates cannot be changed in async flip\n");
14405                         return -EINVAL;
14406                 }
14407
14408                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
14409                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
14410                         return -EINVAL;
14411                 }
14412
14413                 if (old_plane_state->hw.pixel_blend_mode !=
14414                     new_plane_state->hw.pixel_blend_mode) {
14415                         drm_dbg_kms(&i915->drm,
14416                                     "Pixel blend mode cannot be changed in async flip\n");
14417                         return -EINVAL;
14418                 }
14419
14420                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
14421                         drm_dbg_kms(&i915->drm,
14422                                     "Color encoding cannot be changed in async flip\n");
14423                         return -EINVAL;
14424                 }
14425
14426                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
14427                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
14428                         return -EINVAL;
14429                 }
14430         }
14431
14432         return 0;
14433 }
14434
14435 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
14436 {
14437         struct intel_crtc_state *crtc_state;
14438         struct intel_crtc *crtc;
14439         int i;
14440
14441         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14442                 struct intel_crtc_state *linked_crtc_state;
14443                 struct intel_crtc *linked_crtc;
14444                 int ret;
14445
14446                 if (!crtc_state->bigjoiner)
14447                         continue;
14448
14449                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
14450                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
14451                 if (IS_ERR(linked_crtc_state))
14452                         return PTR_ERR(linked_crtc_state);
14453
14454                 if (!intel_crtc_needs_modeset(crtc_state))
14455                         continue;
14456
14457                 linked_crtc_state->uapi.mode_changed = true;
14458
14459                 ret = drm_atomic_add_affected_connectors(&state->base,
14460                                                          &linked_crtc->base);
14461                 if (ret)
14462                         return ret;
14463
14464                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
14465                 if (ret)
14466                         return ret;
14467         }
14468
14469         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14470                 /* Kill old bigjoiner link, we may re-establish afterwards */
14471                 if (intel_crtc_needs_modeset(crtc_state) &&
14472                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
14473                         kill_bigjoiner_slave(state, crtc_state);
14474         }
14475
14476         return 0;
14477 }
14478
14479 /**
14480  * intel_atomic_check - validate state object
14481  * @dev: drm device
14482  * @_state: state to validate
14483  */
14484 static int intel_atomic_check(struct drm_device *dev,
14485                               struct drm_atomic_state *_state)
14486 {
14487         struct drm_i915_private *dev_priv = to_i915(dev);
14488         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14489         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14490         struct intel_crtc *crtc;
14491         int ret, i;
14492         bool any_ms = false;
14493
14494         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14495                                             new_crtc_state, i) {
14496                 if (new_crtc_state->inherited != old_crtc_state->inherited)
14497                         new_crtc_state->uapi.mode_changed = true;
14498         }
14499
14500         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14501         if (ret)
14502                 goto fail;
14503
14504         ret = intel_bigjoiner_add_affected_crtcs(state);
14505         if (ret)
14506                 goto fail;
14507
14508         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14509                                             new_crtc_state, i) {
14510                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
14511                         /* Light copy */
14512                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
14513
14514                         continue;
14515                 }
14516
14517                 if (!new_crtc_state->uapi.enable) {
14518                         if (!new_crtc_state->bigjoiner_slave) {
14519                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
14520                                 any_ms = true;
14521                         }
14522                         continue;
14523                 }
14524
14525                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
14526                 if (ret)
14527                         goto fail;
14528
14529                 ret = intel_modeset_pipe_config(state, new_crtc_state);
14530                 if (ret)
14531                         goto fail;
14532
14533                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
14534                                                    new_crtc_state);
14535                 if (ret)
14536                         goto fail;
14537         }
14538
14539         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14540                                             new_crtc_state, i) {
14541                 if (!intel_crtc_needs_modeset(new_crtc_state))
14542                         continue;
14543
14544                 ret = intel_modeset_pipe_config_late(new_crtc_state);
14545                 if (ret)
14546                         goto fail;
14547
14548                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14549         }
14550
14551         /**
14552          * Check if fastset is allowed by external dependencies like other
14553          * pipes and transcoders.
14554          *
14555          * Right now it only forces a fullmodeset when the MST master
14556          * transcoder did not changed but the pipe of the master transcoder
14557          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14558          * in case of port synced crtcs, if one of the synced crtcs
14559          * needs a full modeset, all other synced crtcs should be
14560          * forced a full modeset.
14561          */
14562         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14563                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
14564                         continue;
14565
14566                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14567                         enum transcoder master = new_crtc_state->mst_master_transcoder;
14568
14569                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14570                                 new_crtc_state->uapi.mode_changed = true;
14571                                 new_crtc_state->update_pipe = false;
14572                         }
14573                 }
14574
14575                 if (is_trans_port_sync_mode(new_crtc_state)) {
14576                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
14577
14578                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14579                                 trans |= BIT(new_crtc_state->master_transcoder);
14580
14581                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
14582                                 new_crtc_state->uapi.mode_changed = true;
14583                                 new_crtc_state->update_pipe = false;
14584                         }
14585                 }
14586
14587                 if (new_crtc_state->bigjoiner) {
14588                         struct intel_crtc_state *linked_crtc_state =
14589                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
14590
14591                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
14592                                 new_crtc_state->uapi.mode_changed = true;
14593                                 new_crtc_state->update_pipe = false;
14594                         }
14595                 }
14596         }
14597
14598         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14599                                             new_crtc_state, i) {
14600                 if (intel_crtc_needs_modeset(new_crtc_state)) {
14601                         any_ms = true;
14602                         continue;
14603                 }
14604
14605                 if (!new_crtc_state->update_pipe)
14606                         continue;
14607
14608                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14609         }
14610
14611         if (any_ms && !check_digital_port_conflicts(state)) {
14612                 drm_dbg_kms(&dev_priv->drm,
14613                             "rejecting conflicting digital port configuration\n");
14614                 ret = -EINVAL;
14615                 goto fail;
14616         }
14617
14618         ret = drm_dp_mst_atomic_check(&state->base);
14619         if (ret)
14620                 goto fail;
14621
14622         ret = intel_atomic_check_planes(state);
14623         if (ret)
14624                 goto fail;
14625
14626         /*
14627          * distrust_bios_wm will force a full dbuf recomputation
14628          * but the hardware state will only get updated accordingly
14629          * if state->modeset==true. Hence distrust_bios_wm==true &&
14630          * state->modeset==false is an invalid combination which
14631          * would cause the hardware and software dbuf state to get
14632          * out of sync. We must prevent that.
14633          *
14634          * FIXME clean up this mess and introduce better
14635          * state tracking for dbuf.
14636          */
14637         if (dev_priv->wm.distrust_bios_wm)
14638                 any_ms = true;
14639
14640         intel_fbc_choose_crtc(dev_priv, state);
14641         ret = calc_watermark_data(state);
14642         if (ret)
14643                 goto fail;
14644
14645         ret = intel_bw_atomic_check(state);
14646         if (ret)
14647                 goto fail;
14648
14649         ret = intel_atomic_check_cdclk(state, &any_ms);
14650         if (ret)
14651                 goto fail;
14652
14653         if (any_ms) {
14654                 ret = intel_modeset_checks(state);
14655                 if (ret)
14656                         goto fail;
14657
14658                 ret = intel_modeset_calc_cdclk(state);
14659                 if (ret)
14660                         return ret;
14661
14662                 intel_modeset_clear_plls(state);
14663         }
14664
14665         ret = intel_atomic_check_crtcs(state);
14666         if (ret)
14667                 goto fail;
14668
14669         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14670                                             new_crtc_state, i) {
14671                 if (new_crtc_state->uapi.async_flip) {
14672                         ret = intel_atomic_check_async(state);
14673                         if (ret)
14674                                 goto fail;
14675                 }
14676
14677                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
14678                     !new_crtc_state->update_pipe)
14679                         continue;
14680
14681                 intel_dump_pipe_config(new_crtc_state, state,
14682                                        intel_crtc_needs_modeset(new_crtc_state) ?
14683                                        "[modeset]" : "[fastset]");
14684         }
14685
14686         return 0;
14687
14688  fail:
14689         if (ret == -EDEADLK)
14690                 return ret;
14691
14692         /*
14693          * FIXME would probably be nice to know which crtc specifically
14694          * caused the failure, in cases where we can pinpoint it.
14695          */
14696         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14697                                             new_crtc_state, i)
14698                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14699
14700         return ret;
14701 }
14702
14703 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14704 {
14705         struct intel_crtc_state *crtc_state;
14706         struct intel_crtc *crtc;
14707         int i, ret;
14708
14709         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
14710         if (ret < 0)
14711                 return ret;
14712
14713         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14714                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
14715
14716                 if (mode_changed || crtc_state->update_pipe ||
14717                     crtc_state->uapi.color_mgmt_changed) {
14718                         intel_dsb_prepare(crtc_state);
14719                 }
14720         }
14721
14722         return 0;
14723 }
14724
14725 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14726 {
14727         struct drm_device *dev = crtc->base.dev;
14728         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14729
14730         if (!vblank->max_vblank_count)
14731                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14732
14733         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14734 }
14735
14736 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14737                                   struct intel_crtc_state *crtc_state)
14738 {
14739         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14740
14741         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14742                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14743
14744         if (crtc_state->has_pch_encoder) {
14745                 enum pipe pch_transcoder =
14746                         intel_crtc_pch_transcoder(crtc);
14747
14748                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14749         }
14750 }
14751
14752 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14753                                const struct intel_crtc_state *new_crtc_state)
14754 {
14755         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14757
14758         /*
14759          * Update pipe size and adjust fitter if needed: the reason for this is
14760          * that in compute_mode_changes we check the native mode (not the pfit
14761          * mode) to see if we can flip rather than do a full mode set. In the
14762          * fastboot case, we'll flip, but if we don't update the pipesrc and
14763          * pfit state, we'll end up with a big fb scanned out into the wrong
14764          * sized surface.
14765          */
14766         intel_set_pipe_src_size(new_crtc_state);
14767
14768         /* on skylake this is done by detaching scalers */
14769         if (INTEL_GEN(dev_priv) >= 9) {
14770                 skl_detach_scalers(new_crtc_state);
14771
14772                 if (new_crtc_state->pch_pfit.enabled)
14773                         skl_pfit_enable(new_crtc_state);
14774         } else if (HAS_PCH_SPLIT(dev_priv)) {
14775                 if (new_crtc_state->pch_pfit.enabled)
14776                         ilk_pfit_enable(new_crtc_state);
14777                 else if (old_crtc_state->pch_pfit.enabled)
14778                         ilk_pfit_disable(old_crtc_state);
14779         }
14780
14781         /*
14782          * The register is supposedly single buffered so perhaps
14783          * not 100% correct to do this here. But SKL+ calculate
14784          * this based on the adjust pixel rate so pfit changes do
14785          * affect it and so it must be updated for fastsets.
14786          * HSW/BDW only really need this here for fastboot, after
14787          * that the value should not change without a full modeset.
14788          */
14789         if (INTEL_GEN(dev_priv) >= 9 ||
14790             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14791                 hsw_set_linetime_wm(new_crtc_state);
14792
14793         if (INTEL_GEN(dev_priv) >= 11)
14794                 icl_set_pipe_chicken(crtc);
14795 }
14796
14797 static void commit_pipe_config(struct intel_atomic_state *state,
14798                                struct intel_crtc *crtc)
14799 {
14800         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14801         const struct intel_crtc_state *old_crtc_state =
14802                 intel_atomic_get_old_crtc_state(state, crtc);
14803         const struct intel_crtc_state *new_crtc_state =
14804                 intel_atomic_get_new_crtc_state(state, crtc);
14805         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
14806
14807         /*
14808          * During modesets pipe configuration was programmed as the
14809          * CRTC was enabled.
14810          */
14811         if (!modeset) {
14812                 if (new_crtc_state->uapi.color_mgmt_changed ||
14813                     new_crtc_state->update_pipe)
14814                         intel_color_commit(new_crtc_state);
14815
14816                 if (INTEL_GEN(dev_priv) >= 9)
14817                         skl_detach_scalers(new_crtc_state);
14818
14819                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14820                         bdw_set_pipemisc(new_crtc_state);
14821
14822                 if (new_crtc_state->update_pipe)
14823                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14824
14825                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
14826         }
14827
14828         if (dev_priv->display.atomic_update_watermarks)
14829                 dev_priv->display.atomic_update_watermarks(state, crtc);
14830 }
14831
14832 static void intel_enable_crtc(struct intel_atomic_state *state,
14833                               struct intel_crtc *crtc)
14834 {
14835         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14836         const struct intel_crtc_state *new_crtc_state =
14837                 intel_atomic_get_new_crtc_state(state, crtc);
14838
14839         if (!intel_crtc_needs_modeset(new_crtc_state))
14840                 return;
14841
14842         intel_crtc_update_active_timings(new_crtc_state);
14843
14844         dev_priv->display.crtc_enable(state, crtc);
14845
14846         if (new_crtc_state->bigjoiner_slave)
14847                 return;
14848
14849         /* vblanks work again, re-enable pipe CRC. */
14850         intel_crtc_enable_pipe_crc(crtc);
14851 }
14852
14853 static void intel_update_crtc(struct intel_atomic_state *state,
14854                               struct intel_crtc *crtc)
14855 {
14856         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14857         const struct intel_crtc_state *old_crtc_state =
14858                 intel_atomic_get_old_crtc_state(state, crtc);
14859         struct intel_crtc_state *new_crtc_state =
14860                 intel_atomic_get_new_crtc_state(state, crtc);
14861         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
14862
14863         if (!modeset) {
14864                 if (new_crtc_state->preload_luts &&
14865                     (new_crtc_state->uapi.color_mgmt_changed ||
14866                      new_crtc_state->update_pipe))
14867                         intel_color_load_luts(new_crtc_state);
14868
14869                 intel_pre_plane_update(state, crtc);
14870
14871                 if (new_crtc_state->update_pipe)
14872                         intel_encoders_update_pipe(state, crtc);
14873         }
14874
14875         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14876                 intel_fbc_disable(crtc);
14877         else
14878                 intel_fbc_enable(state, crtc);
14879
14880         /* Perform vblank evasion around commit operation */
14881         intel_pipe_update_start(new_crtc_state);
14882
14883         commit_pipe_config(state, crtc);
14884
14885         if (INTEL_GEN(dev_priv) >= 9)
14886                 skl_update_planes_on_crtc(state, crtc);
14887         else
14888                 i9xx_update_planes_on_crtc(state, crtc);
14889
14890         intel_pipe_update_end(new_crtc_state);
14891
14892         /*
14893          * We usually enable FIFO underrun interrupts as part of the
14894          * CRTC enable sequence during modesets.  But when we inherit a
14895          * valid pipe configuration from the BIOS we need to take care
14896          * of enabling them on the CRTC's first fastset.
14897          */
14898         if (new_crtc_state->update_pipe && !modeset &&
14899             old_crtc_state->inherited)
14900                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14901 }
14902
14903 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14904                                           struct intel_crtc_state *old_crtc_state,
14905                                           struct intel_crtc_state *new_crtc_state,
14906                                           struct intel_crtc *crtc)
14907 {
14908         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14909
14910         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
14911
14912         intel_crtc_disable_planes(state, crtc);
14913
14914         /*
14915          * We still need special handling for disabling bigjoiner master
14916          * and slaves since for slave we do not have encoder or plls
14917          * so we dont need to disable those.
14918          */
14919         if (old_crtc_state->bigjoiner) {
14920                 intel_crtc_disable_planes(state,
14921                                           old_crtc_state->bigjoiner_linked_crtc);
14922                 old_crtc_state->bigjoiner_linked_crtc->active = false;
14923         }
14924
14925         /*
14926          * We need to disable pipe CRC before disabling the pipe,
14927          * or we race against vblank off.
14928          */
14929         intel_crtc_disable_pipe_crc(crtc);
14930
14931         dev_priv->display.crtc_disable(state, crtc);
14932         crtc->active = false;
14933         intel_fbc_disable(crtc);
14934         intel_disable_shared_dpll(old_crtc_state);
14935
14936         /* FIXME unify this for all platforms */
14937         if (!new_crtc_state->hw.active &&
14938             !HAS_GMCH(dev_priv) &&
14939             dev_priv->display.initial_watermarks)
14940                 dev_priv->display.initial_watermarks(state, crtc);
14941 }
14942
14943 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14944 {
14945         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14946         struct intel_crtc *crtc;
14947         u32 handled = 0;
14948         int i;
14949
14950         /* Only disable port sync and MST slaves */
14951         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14952                                             new_crtc_state, i) {
14953                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
14954                         continue;
14955
14956                 if (!old_crtc_state->hw.active)
14957                         continue;
14958
14959                 /* In case of Transcoder port Sync master slave CRTCs can be
14960                  * assigned in any order and we need to make sure that
14961                  * slave CRTCs are disabled first and then master CRTC since
14962                  * Slave vblanks are masked till Master Vblanks.
14963                  */
14964                 if (!is_trans_port_sync_slave(old_crtc_state) &&
14965                     !intel_dp_mst_is_slave_trans(old_crtc_state))
14966                         continue;
14967
14968                 intel_pre_plane_update(state, crtc);
14969                 intel_old_crtc_state_disables(state, old_crtc_state,
14970                                               new_crtc_state, crtc);
14971                 handled |= BIT(crtc->pipe);
14972         }
14973
14974         /* Disable everything else left on */
14975         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14976                                             new_crtc_state, i) {
14977                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
14978                     (handled & BIT(crtc->pipe)) ||
14979                     old_crtc_state->bigjoiner_slave)
14980                         continue;
14981
14982                 intel_pre_plane_update(state, crtc);
14983                 if (old_crtc_state->bigjoiner) {
14984                         struct intel_crtc *slave =
14985                                 old_crtc_state->bigjoiner_linked_crtc;
14986
14987                         intel_pre_plane_update(state, slave);
14988                 }
14989
14990                 if (old_crtc_state->hw.active)
14991                         intel_old_crtc_state_disables(state, old_crtc_state,
14992                                                       new_crtc_state, crtc);
14993         }
14994 }
14995
14996 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14997 {
14998         struct intel_crtc_state *new_crtc_state;
14999         struct intel_crtc *crtc;
15000         int i;
15001
15002         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15003                 if (!new_crtc_state->hw.active)
15004                         continue;
15005
15006                 intel_enable_crtc(state, crtc);
15007                 intel_update_crtc(state, crtc);
15008         }
15009 }
15010
15011 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15012 {
15013         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15014         struct intel_crtc *crtc;
15015         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15016         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15017         u8 update_pipes = 0, modeset_pipes = 0;
15018         int i;
15019
15020         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15021                 enum pipe pipe = crtc->pipe;
15022
15023                 if (!new_crtc_state->hw.active)
15024                         continue;
15025
15026                 /* ignore allocations for crtc's that have been turned off. */
15027                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
15028                         entries[pipe] = old_crtc_state->wm.skl.ddb;
15029                         update_pipes |= BIT(pipe);
15030                 } else {
15031                         modeset_pipes |= BIT(pipe);
15032                 }
15033         }
15034
15035         /*
15036          * Whenever the number of active pipes changes, we need to make sure we
15037          * update the pipes in the right order so that their ddb allocations
15038          * never overlap with each other between CRTC updates. Otherwise we'll
15039          * cause pipe underruns and other bad stuff.
15040          *
15041          * So first lets enable all pipes that do not need a fullmodeset as
15042          * those don't have any external dependency.
15043          */
15044         while (update_pipes) {
15045                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15046                                                     new_crtc_state, i) {
15047                         enum pipe pipe = crtc->pipe;
15048
15049                         if ((update_pipes & BIT(pipe)) == 0)
15050                                 continue;
15051
15052                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15053                                                         entries, I915_MAX_PIPES, pipe))
15054                                 continue;
15055
15056                         entries[pipe] = new_crtc_state->wm.skl.ddb;
15057                         update_pipes &= ~BIT(pipe);
15058
15059                         intel_update_crtc(state, crtc);
15060
15061                         /*
15062                          * If this is an already active pipe, it's DDB changed,
15063                          * and this isn't the last pipe that needs updating
15064                          * then we need to wait for a vblank to pass for the
15065                          * new ddb allocation to take effect.
15066                          */
15067                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15068                                                  &old_crtc_state->wm.skl.ddb) &&
15069                             (update_pipes | modeset_pipes))
15070                                 intel_wait_for_vblank(dev_priv, pipe);
15071                 }
15072         }
15073
15074         update_pipes = modeset_pipes;
15075
15076         /*
15077          * Enable all pipes that needs a modeset and do not depends on other
15078          * pipes
15079          */
15080         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15081                 enum pipe pipe = crtc->pipe;
15082
15083                 if ((modeset_pipes & BIT(pipe)) == 0)
15084                         continue;
15085
15086                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15087                     is_trans_port_sync_master(new_crtc_state) ||
15088                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
15089                         continue;
15090
15091                 modeset_pipes &= ~BIT(pipe);
15092
15093                 intel_enable_crtc(state, crtc);
15094         }
15095
15096         /*
15097          * Then we enable all remaining pipes that depend on other
15098          * pipes: MST slaves and port sync masters, big joiner master
15099          */
15100         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15101                 enum pipe pipe = crtc->pipe;
15102
15103                 if ((modeset_pipes & BIT(pipe)) == 0)
15104                         continue;
15105
15106                 modeset_pipes &= ~BIT(pipe);
15107
15108                 intel_enable_crtc(state, crtc);
15109         }
15110
15111         /*
15112          * Finally we do the plane updates/etc. for all pipes that got enabled.
15113          */
15114         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15115                 enum pipe pipe = crtc->pipe;
15116
15117                 if ((update_pipes & BIT(pipe)) == 0)
15118                         continue;
15119
15120                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15121                                                                         entries, I915_MAX_PIPES, pipe));
15122
15123                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15124                 update_pipes &= ~BIT(pipe);
15125
15126                 intel_update_crtc(state, crtc);
15127         }
15128
15129         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15130         drm_WARN_ON(&dev_priv->drm, update_pipes);
15131 }
15132
15133 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15134 {
15135         struct intel_atomic_state *state, *next;
15136         struct llist_node *freed;
15137
15138         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15139         llist_for_each_entry_safe(state, next, freed, freed)
15140                 drm_atomic_state_put(&state->base);
15141 }
15142
15143 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15144 {
15145         struct drm_i915_private *dev_priv =
15146                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15147
15148         intel_atomic_helper_free_state(dev_priv);
15149 }
15150
15151 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15152 {
15153         struct wait_queue_entry wait_fence, wait_reset;
15154         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15155
15156         init_wait_entry(&wait_fence, 0);
15157         init_wait_entry(&wait_reset, 0);
15158         for (;;) {
15159                 prepare_to_wait(&intel_state->commit_ready.wait,
15160                                 &wait_fence, TASK_UNINTERRUPTIBLE);
15161                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15162                                               I915_RESET_MODESET),
15163                                 &wait_reset, TASK_UNINTERRUPTIBLE);
15164
15165
15166                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15167                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15168                         break;
15169
15170                 schedule();
15171         }
15172         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15173         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15174                                   I915_RESET_MODESET),
15175                     &wait_reset);
15176 }
15177
15178 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
15179 {
15180         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15181         struct intel_crtc *crtc;
15182         int i;
15183
15184         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15185                                             new_crtc_state, i)
15186                 intel_dsb_cleanup(old_crtc_state);
15187 }
15188
15189 static void intel_atomic_cleanup_work(struct work_struct *work)
15190 {
15191         struct intel_atomic_state *state =
15192                 container_of(work, struct intel_atomic_state, base.commit_work);
15193         struct drm_i915_private *i915 = to_i915(state->base.dev);
15194
15195         intel_cleanup_dsbs(state);
15196         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
15197         drm_atomic_helper_commit_cleanup_done(&state->base);
15198         drm_atomic_state_put(&state->base);
15199
15200         intel_atomic_helper_free_state(i915);
15201 }
15202
15203 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15204 {
15205         struct drm_device *dev = state->base.dev;
15206         struct drm_i915_private *dev_priv = to_i915(dev);
15207         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15208         struct intel_crtc *crtc;
15209         u64 put_domains[I915_MAX_PIPES] = {};
15210         intel_wakeref_t wakeref = 0;
15211         int i;
15212
15213         intel_atomic_commit_fence_wait(state);
15214
15215         drm_atomic_helper_wait_for_dependencies(&state->base);
15216
15217         if (state->modeset)
15218                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15219
15220         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15221                                             new_crtc_state, i) {
15222                 if (intel_crtc_needs_modeset(new_crtc_state) ||
15223                     new_crtc_state->update_pipe) {
15224
15225                         put_domains[crtc->pipe] =
15226                                 modeset_get_crtc_power_domains(new_crtc_state);
15227                 }
15228         }
15229
15230         intel_commit_modeset_disables(state);
15231
15232         /* FIXME: Eventually get rid of our crtc->config pointer */
15233         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15234                 crtc->config = new_crtc_state;
15235
15236         if (state->modeset) {
15237                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15238
15239                 intel_set_cdclk_pre_plane_update(state);
15240
15241                 intel_modeset_verify_disabled(dev_priv, state);
15242         }
15243
15244         intel_sagv_pre_plane_update(state);
15245
15246         /* Complete the events for pipes that have now been disabled */
15247         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15248                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
15249
15250                 /* Complete events for now disable pipes here. */
15251                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15252                         spin_lock_irq(&dev->event_lock);
15253                         drm_crtc_send_vblank_event(&crtc->base,
15254                                                    new_crtc_state->uapi.event);
15255                         spin_unlock_irq(&dev->event_lock);
15256
15257                         new_crtc_state->uapi.event = NULL;
15258                 }
15259         }
15260
15261         if (state->modeset)
15262                 intel_encoders_update_prepare(state);
15263
15264         intel_dbuf_pre_plane_update(state);
15265
15266         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15267                 if (new_crtc_state->uapi.async_flip)
15268                         skl_enable_flip_done(crtc);
15269         }
15270
15271         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15272         dev_priv->display.commit_modeset_enables(state);
15273
15274         if (state->modeset) {
15275                 intel_encoders_update_complete(state);
15276
15277                 intel_set_cdclk_post_plane_update(state);
15278         }
15279
15280         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15281          * already, but still need the state for the delayed optimization. To
15282          * fix this:
15283          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15284          * - schedule that vblank worker _before_ calling hw_done
15285          * - at the start of commit_tail, cancel it _synchrously
15286          * - switch over to the vblank wait helper in the core after that since
15287          *   we don't need out special handling any more.
15288          */
15289         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15290
15291         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15292                 if (new_crtc_state->uapi.async_flip)
15293                         skl_disable_flip_done(crtc);
15294
15295                 if (new_crtc_state->hw.active &&
15296                     !intel_crtc_needs_modeset(new_crtc_state) &&
15297                     !new_crtc_state->preload_luts &&
15298                     (new_crtc_state->uapi.color_mgmt_changed ||
15299                      new_crtc_state->update_pipe))
15300                         intel_color_load_luts(new_crtc_state);
15301         }
15302
15303         /*
15304          * Now that the vblank has passed, we can go ahead and program the
15305          * optimal watermarks on platforms that need two-step watermark
15306          * programming.
15307          *
15308          * TODO: Move this (and other cleanup) to an async worker eventually.
15309          */
15310         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15311                                             new_crtc_state, i) {
15312                 /*
15313                  * Gen2 reports pipe underruns whenever all planes are disabled.
15314                  * So re-enable underrun reporting after some planes get enabled.
15315                  *
15316                  * We do this before .optimize_watermarks() so that we have a
15317                  * chance of catching underruns with the intermediate watermarks
15318                  * vs. the new plane configuration.
15319                  */
15320                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15321                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15322
15323                 if (dev_priv->display.optimize_watermarks)
15324                         dev_priv->display.optimize_watermarks(state, crtc);
15325         }
15326
15327         intel_dbuf_post_plane_update(state);
15328
15329         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15330                 intel_post_plane_update(state, crtc);
15331
15332                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
15333
15334                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15335
15336                 /*
15337                  * DSB cleanup is done in cleanup_work aligning with framebuffer
15338                  * cleanup. So copy and reset the dsb structure to sync with
15339                  * commit_done and later do dsb cleanup in cleanup_work.
15340                  */
15341                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
15342         }
15343
15344         /* Underruns don't always raise interrupts, so check manually */
15345         intel_check_cpu_fifo_underruns(dev_priv);
15346         intel_check_pch_fifo_underruns(dev_priv);
15347
15348         if (state->modeset)
15349                 intel_verify_planes(state);
15350
15351         intel_sagv_post_plane_update(state);
15352
15353         drm_atomic_helper_commit_hw_done(&state->base);
15354
15355         if (state->modeset) {
15356                 /* As one of the primary mmio accessors, KMS has a high
15357                  * likelihood of triggering bugs in unclaimed access. After we
15358                  * finish modesetting, see if an error has been flagged, and if
15359                  * so enable debugging for the next modeset - and hope we catch
15360                  * the culprit.
15361                  */
15362                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15363                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15364         }
15365         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15366
15367         /*
15368          * Defer the cleanup of the old state to a separate worker to not
15369          * impede the current task (userspace for blocking modesets) that
15370          * are executed inline. For out-of-line asynchronous modesets/flips,
15371          * deferring to a new worker seems overkill, but we would place a
15372          * schedule point (cond_resched()) here anyway to keep latencies
15373          * down.
15374          */
15375         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15376         queue_work(system_highpri_wq, &state->base.commit_work);
15377 }
15378
15379 static void intel_atomic_commit_work(struct work_struct *work)
15380 {
15381         struct intel_atomic_state *state =
15382                 container_of(work, struct intel_atomic_state, base.commit_work);
15383
15384         intel_atomic_commit_tail(state);
15385 }
15386
15387 static int __i915_sw_fence_call
15388 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15389                           enum i915_sw_fence_notify notify)
15390 {
15391         struct intel_atomic_state *state =
15392                 container_of(fence, struct intel_atomic_state, commit_ready);
15393
15394         switch (notify) {
15395         case FENCE_COMPLETE:
15396                 /* we do blocking waits in the worker, nothing to do here */
15397                 break;
15398         case FENCE_FREE:
15399                 {
15400                         struct intel_atomic_helper *helper =
15401                                 &to_i915(state->base.dev)->atomic_helper;
15402
15403                         if (llist_add(&state->freed, &helper->free_list))
15404                                 schedule_work(&helper->free_work);
15405                         break;
15406                 }
15407         }
15408
15409         return NOTIFY_DONE;
15410 }
15411
15412 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15413 {
15414         struct intel_plane_state *old_plane_state, *new_plane_state;
15415         struct intel_plane *plane;
15416         int i;
15417
15418         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15419                                              new_plane_state, i)
15420                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15421                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15422                                         plane->frontbuffer_bit);
15423 }
15424
15425 static int intel_atomic_commit(struct drm_device *dev,
15426                                struct drm_atomic_state *_state,
15427                                bool nonblock)
15428 {
15429         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15430         struct drm_i915_private *dev_priv = to_i915(dev);
15431         int ret = 0;
15432
15433         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15434
15435         drm_atomic_state_get(&state->base);
15436         i915_sw_fence_init(&state->commit_ready,
15437                            intel_atomic_commit_ready);
15438
15439         /*
15440          * The intel_legacy_cursor_update() fast path takes care
15441          * of avoiding the vblank waits for simple cursor
15442          * movement and flips. For cursor on/off and size changes,
15443          * we want to perform the vblank waits so that watermark
15444          * updates happen during the correct frames. Gen9+ have
15445          * double buffered watermarks and so shouldn't need this.
15446          *
15447          * Unset state->legacy_cursor_update before the call to
15448          * drm_atomic_helper_setup_commit() because otherwise
15449          * drm_atomic_helper_wait_for_flip_done() is a noop and
15450          * we get FIFO underruns because we didn't wait
15451          * for vblank.
15452          *
15453          * FIXME doing watermarks and fb cleanup from a vblank worker
15454          * (assuming we had any) would solve these problems.
15455          */
15456         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15457                 struct intel_crtc_state *new_crtc_state;
15458                 struct intel_crtc *crtc;
15459                 int i;
15460
15461                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15462                         if (new_crtc_state->wm.need_postvbl_update ||
15463                             new_crtc_state->update_wm_post)
15464                                 state->base.legacy_cursor_update = false;
15465         }
15466
15467         ret = intel_atomic_prepare_commit(state);
15468         if (ret) {
15469                 drm_dbg_atomic(&dev_priv->drm,
15470                                "Preparing state failed with %i\n", ret);
15471                 i915_sw_fence_commit(&state->commit_ready);
15472                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15473                 return ret;
15474         }
15475
15476         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15477         if (!ret)
15478                 ret = drm_atomic_helper_swap_state(&state->base, true);
15479         if (!ret)
15480                 intel_atomic_swap_global_state(state);
15481
15482         if (ret) {
15483                 struct intel_crtc_state *new_crtc_state;
15484                 struct intel_crtc *crtc;
15485                 int i;
15486
15487                 i915_sw_fence_commit(&state->commit_ready);
15488
15489                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15490                         intel_dsb_cleanup(new_crtc_state);
15491
15492                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15493                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15494                 return ret;
15495         }
15496         dev_priv->wm.distrust_bios_wm = false;
15497         intel_shared_dpll_swap_state(state);
15498         intel_atomic_track_fbs(state);
15499
15500         drm_atomic_state_get(&state->base);
15501         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15502
15503         i915_sw_fence_commit(&state->commit_ready);
15504         if (nonblock && state->modeset) {
15505                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15506         } else if (nonblock) {
15507                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15508         } else {
15509                 if (state->modeset)
15510                         flush_workqueue(dev_priv->modeset_wq);
15511                 intel_atomic_commit_tail(state);
15512         }
15513
15514         return 0;
15515 }
15516
15517 struct wait_rps_boost {
15518         struct wait_queue_entry wait;
15519
15520         struct drm_crtc *crtc;
15521         struct i915_request *request;
15522 };
15523
15524 static int do_rps_boost(struct wait_queue_entry *_wait,
15525                         unsigned mode, int sync, void *key)
15526 {
15527         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15528         struct i915_request *rq = wait->request;
15529
15530         /*
15531          * If we missed the vblank, but the request is already running it
15532          * is reasonable to assume that it will complete before the next
15533          * vblank without our intervention, so leave RPS alone.
15534          */
15535         if (!i915_request_started(rq))
15536                 intel_rps_boost(rq);
15537         i915_request_put(rq);
15538
15539         drm_crtc_vblank_put(wait->crtc);
15540
15541         list_del(&wait->wait.entry);
15542         kfree(wait);
15543         return 1;
15544 }
15545
15546 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15547                                        struct dma_fence *fence)
15548 {
15549         struct wait_rps_boost *wait;
15550
15551         if (!dma_fence_is_i915(fence))
15552                 return;
15553
15554         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15555                 return;
15556
15557         if (drm_crtc_vblank_get(crtc))
15558                 return;
15559
15560         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15561         if (!wait) {
15562                 drm_crtc_vblank_put(crtc);
15563                 return;
15564         }
15565
15566         wait->request = to_request(dma_fence_get(fence));
15567         wait->crtc = crtc;
15568
15569         wait->wait.func = do_rps_boost;
15570         wait->wait.flags = 0;
15571
15572         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15573 }
15574
15575 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15576 {
15577         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15578         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15579         struct drm_framebuffer *fb = plane_state->hw.fb;
15580         struct i915_vma *vma;
15581
15582         if (plane->id == PLANE_CURSOR &&
15583             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15584                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15585                 const int align = intel_cursor_alignment(dev_priv);
15586                 int err;
15587
15588                 err = i915_gem_object_attach_phys(obj, align);
15589                 if (err)
15590                         return err;
15591         }
15592
15593         vma = intel_pin_and_fence_fb_obj(fb,
15594                                          &plane_state->view,
15595                                          intel_plane_uses_fence(plane_state),
15596                                          &plane_state->flags);
15597         if (IS_ERR(vma))
15598                 return PTR_ERR(vma);
15599
15600         plane_state->vma = vma;
15601
15602         return 0;
15603 }
15604
15605 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15606 {
15607         struct i915_vma *vma;
15608
15609         vma = fetch_and_zero(&old_plane_state->vma);
15610         if (vma)
15611                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15612 }
15613
15614 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15615 {
15616         struct i915_sched_attr attr = {
15617                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15618         };
15619
15620         i915_gem_object_wait_priority(obj, 0, &attr);
15621 }
15622
15623 /**
15624  * intel_prepare_plane_fb - Prepare fb for usage on plane
15625  * @_plane: drm plane to prepare for
15626  * @_new_plane_state: the plane state being prepared
15627  *
15628  * Prepares a framebuffer for usage on a display plane.  Generally this
15629  * involves pinning the underlying object and updating the frontbuffer tracking
15630  * bits.  Some older platforms need special physical address handling for
15631  * cursor planes.
15632  *
15633  * Returns 0 on success, negative error code on failure.
15634  */
15635 int
15636 intel_prepare_plane_fb(struct drm_plane *_plane,
15637                        struct drm_plane_state *_new_plane_state)
15638 {
15639         struct intel_plane *plane = to_intel_plane(_plane);
15640         struct intel_plane_state *new_plane_state =
15641                 to_intel_plane_state(_new_plane_state);
15642         struct intel_atomic_state *state =
15643                 to_intel_atomic_state(new_plane_state->uapi.state);
15644         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15645         const struct intel_plane_state *old_plane_state =
15646                 intel_atomic_get_old_plane_state(state, plane);
15647         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15648         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15649         int ret;
15650
15651         if (old_obj) {
15652                 const struct intel_crtc_state *crtc_state =
15653                         intel_atomic_get_new_crtc_state(state,
15654                                                         to_intel_crtc(old_plane_state->hw.crtc));
15655
15656                 /* Big Hammer, we also need to ensure that any pending
15657                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15658                  * current scanout is retired before unpinning the old
15659                  * framebuffer. Note that we rely on userspace rendering
15660                  * into the buffer attached to the pipe they are waiting
15661                  * on. If not, userspace generates a GPU hang with IPEHR
15662                  * point to the MI_WAIT_FOR_EVENT.
15663                  *
15664                  * This should only fail upon a hung GPU, in which case we
15665                  * can safely continue.
15666                  */
15667                 if (intel_crtc_needs_modeset(crtc_state)) {
15668                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
15669                                                               old_obj->base.resv, NULL,
15670                                                               false, 0,
15671                                                               GFP_KERNEL);
15672                         if (ret < 0)
15673                                 return ret;
15674                 }
15675         }
15676
15677         if (new_plane_state->uapi.fence) { /* explicit fencing */
15678                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
15679                                                     new_plane_state->uapi.fence,
15680                                                     i915_fence_timeout(dev_priv),
15681                                                     GFP_KERNEL);
15682                 if (ret < 0)
15683                         return ret;
15684         }
15685
15686         if (!obj)
15687                 return 0;
15688
15689         ret = i915_gem_object_pin_pages(obj);
15690         if (ret)
15691                 return ret;
15692
15693         ret = intel_plane_pin_fb(new_plane_state);
15694
15695         i915_gem_object_unpin_pages(obj);
15696         if (ret)
15697                 return ret;
15698
15699         fb_obj_bump_render_priority(obj);
15700         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15701
15702         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15703                 struct dma_fence *fence;
15704
15705                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
15706                                                       obj->base.resv, NULL,
15707                                                       false,
15708                                                       i915_fence_timeout(dev_priv),
15709                                                       GFP_KERNEL);
15710                 if (ret < 0)
15711                         goto unpin_fb;
15712
15713                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15714                 if (fence) {
15715                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15716                                                    fence);
15717                         dma_fence_put(fence);
15718                 }
15719         } else {
15720                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15721                                            new_plane_state->uapi.fence);
15722         }
15723
15724         /*
15725          * We declare pageflips to be interactive and so merit a small bias
15726          * towards upclocking to deliver the frame on time. By only changing
15727          * the RPS thresholds to sample more regularly and aim for higher
15728          * clocks we can hopefully deliver low power workloads (like kodi)
15729          * that are not quite steady state without resorting to forcing
15730          * maximum clocks following a vblank miss (see do_rps_boost()).
15731          */
15732         if (!state->rps_interactive) {
15733                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15734                 state->rps_interactive = true;
15735         }
15736
15737         return 0;
15738
15739 unpin_fb:
15740         intel_plane_unpin_fb(new_plane_state);
15741
15742         return ret;
15743 }
15744
15745 /**
15746  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15747  * @plane: drm plane to clean up for
15748  * @_old_plane_state: the state from the previous modeset
15749  *
15750  * Cleans up a framebuffer that has just been removed from a plane.
15751  */
15752 void
15753 intel_cleanup_plane_fb(struct drm_plane *plane,
15754                        struct drm_plane_state *_old_plane_state)
15755 {
15756         struct intel_plane_state *old_plane_state =
15757                 to_intel_plane_state(_old_plane_state);
15758         struct intel_atomic_state *state =
15759                 to_intel_atomic_state(old_plane_state->uapi.state);
15760         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15761         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
15762
15763         if (!obj)
15764                 return;
15765
15766         if (state->rps_interactive) {
15767                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15768                 state->rps_interactive = false;
15769         }
15770
15771         /* Should only be called after a successful intel_prepare_plane_fb()! */
15772         intel_plane_unpin_fb(old_plane_state);
15773 }
15774
15775 /**
15776  * intel_plane_destroy - destroy a plane
15777  * @plane: plane to destroy
15778  *
15779  * Common destruction function for all types of planes (primary, cursor,
15780  * sprite).
15781  */
15782 void intel_plane_destroy(struct drm_plane *plane)
15783 {
15784         drm_plane_cleanup(plane);
15785         kfree(to_intel_plane(plane));
15786 }
15787
15788 static int intel_crtc_late_register(struct drm_crtc *crtc)
15789 {
15790         intel_crtc_debugfs_add(crtc);
15791         return 0;
15792 }
15793
15794 #define INTEL_CRTC_FUNCS \
15795         .set_config = drm_atomic_helper_set_config, \
15796         .destroy = intel_crtc_destroy, \
15797         .page_flip = drm_atomic_helper_page_flip, \
15798         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15799         .atomic_destroy_state = intel_crtc_destroy_state, \
15800         .set_crc_source = intel_crtc_set_crc_source, \
15801         .verify_crc_source = intel_crtc_verify_crc_source, \
15802         .get_crc_sources = intel_crtc_get_crc_sources, \
15803         .late_register = intel_crtc_late_register
15804
15805 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15806         INTEL_CRTC_FUNCS,
15807
15808         .get_vblank_counter = g4x_get_vblank_counter,
15809         .enable_vblank = bdw_enable_vblank,
15810         .disable_vblank = bdw_disable_vblank,
15811         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15812 };
15813
15814 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15815         INTEL_CRTC_FUNCS,
15816
15817         .get_vblank_counter = g4x_get_vblank_counter,
15818         .enable_vblank = ilk_enable_vblank,
15819         .disable_vblank = ilk_disable_vblank,
15820         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15821 };
15822
15823 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15824         INTEL_CRTC_FUNCS,
15825
15826         .get_vblank_counter = g4x_get_vblank_counter,
15827         .enable_vblank = i965_enable_vblank,
15828         .disable_vblank = i965_disable_vblank,
15829         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15830 };
15831
15832 static const struct drm_crtc_funcs i965_crtc_funcs = {
15833         INTEL_CRTC_FUNCS,
15834
15835         .get_vblank_counter = i915_get_vblank_counter,
15836         .enable_vblank = i965_enable_vblank,
15837         .disable_vblank = i965_disable_vblank,
15838         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15839 };
15840
15841 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15842         INTEL_CRTC_FUNCS,
15843
15844         .get_vblank_counter = i915_get_vblank_counter,
15845         .enable_vblank = i915gm_enable_vblank,
15846         .disable_vblank = i915gm_disable_vblank,
15847         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15848 };
15849
15850 static const struct drm_crtc_funcs i915_crtc_funcs = {
15851         INTEL_CRTC_FUNCS,
15852
15853         .get_vblank_counter = i915_get_vblank_counter,
15854         .enable_vblank = i8xx_enable_vblank,
15855         .disable_vblank = i8xx_disable_vblank,
15856         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15857 };
15858
15859 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15860         INTEL_CRTC_FUNCS,
15861
15862         /* no hw vblank counter */
15863         .enable_vblank = i8xx_enable_vblank,
15864         .disable_vblank = i8xx_disable_vblank,
15865         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15866 };
15867
15868 static struct intel_crtc *intel_crtc_alloc(void)
15869 {
15870         struct intel_crtc_state *crtc_state;
15871         struct intel_crtc *crtc;
15872
15873         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
15874         if (!crtc)
15875                 return ERR_PTR(-ENOMEM);
15876
15877         crtc_state = intel_crtc_state_alloc(crtc);
15878         if (!crtc_state) {
15879                 kfree(crtc);
15880                 return ERR_PTR(-ENOMEM);
15881         }
15882
15883         crtc->base.state = &crtc_state->uapi;
15884         crtc->config = crtc_state;
15885
15886         return crtc;
15887 }
15888
15889 static void intel_crtc_free(struct intel_crtc *crtc)
15890 {
15891         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
15892         kfree(crtc);
15893 }
15894
15895 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
15896 {
15897         struct intel_plane *plane;
15898
15899         for_each_intel_plane(&dev_priv->drm, plane) {
15900                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
15901                                                                   plane->pipe);
15902
15903                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
15904         }
15905 }
15906
15907 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15908 {
15909         struct intel_plane *primary, *cursor;
15910         const struct drm_crtc_funcs *funcs;
15911         struct intel_crtc *crtc;
15912         int sprite, ret;
15913
15914         crtc = intel_crtc_alloc();
15915         if (IS_ERR(crtc))
15916                 return PTR_ERR(crtc);
15917
15918         crtc->pipe = pipe;
15919         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
15920
15921         primary = intel_primary_plane_create(dev_priv, pipe);
15922         if (IS_ERR(primary)) {
15923                 ret = PTR_ERR(primary);
15924                 goto fail;
15925         }
15926         crtc->plane_ids_mask |= BIT(primary->id);
15927
15928         for_each_sprite(dev_priv, pipe, sprite) {
15929                 struct intel_plane *plane;
15930
15931                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15932                 if (IS_ERR(plane)) {
15933                         ret = PTR_ERR(plane);
15934                         goto fail;
15935                 }
15936                 crtc->plane_ids_mask |= BIT(plane->id);
15937         }
15938
15939         cursor = intel_cursor_plane_create(dev_priv, pipe);
15940         if (IS_ERR(cursor)) {
15941                 ret = PTR_ERR(cursor);
15942                 goto fail;
15943         }
15944         crtc->plane_ids_mask |= BIT(cursor->id);
15945
15946         if (HAS_GMCH(dev_priv)) {
15947                 if (IS_CHERRYVIEW(dev_priv) ||
15948                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15949                         funcs = &g4x_crtc_funcs;
15950                 else if (IS_GEN(dev_priv, 4))
15951                         funcs = &i965_crtc_funcs;
15952                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15953                         funcs = &i915gm_crtc_funcs;
15954                 else if (IS_GEN(dev_priv, 3))
15955                         funcs = &i915_crtc_funcs;
15956                 else
15957                         funcs = &i8xx_crtc_funcs;
15958         } else {
15959                 if (INTEL_GEN(dev_priv) >= 8)
15960                         funcs = &bdw_crtc_funcs;
15961                 else
15962                         funcs = &ilk_crtc_funcs;
15963         }
15964
15965         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
15966                                         &primary->base, &cursor->base,
15967                                         funcs, "pipe %c", pipe_name(pipe));
15968         if (ret)
15969                 goto fail;
15970
15971         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15972                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15973         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
15974
15975         if (INTEL_GEN(dev_priv) < 9) {
15976                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15977
15978                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15979                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15980                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
15981         }
15982
15983         if (INTEL_GEN(dev_priv) >= 10)
15984                 drm_crtc_create_scaling_filter_property(&crtc->base,
15985                                                 BIT(DRM_SCALING_FILTER_DEFAULT) |
15986                                                 BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
15987
15988         intel_color_init(crtc);
15989
15990         intel_crtc_crc_init(crtc);
15991
15992         drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
15993
15994         return 0;
15995
15996 fail:
15997         intel_crtc_free(crtc);
15998
15999         return ret;
16000 }
16001
16002 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16003                                       struct drm_file *file)
16004 {
16005         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16006         struct drm_crtc *drmmode_crtc;
16007         struct intel_crtc *crtc;
16008
16009         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16010         if (!drmmode_crtc)
16011                 return -ENOENT;
16012
16013         crtc = to_intel_crtc(drmmode_crtc);
16014         pipe_from_crtc_id->pipe = crtc->pipe;
16015
16016         return 0;
16017 }
16018
16019 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16020 {
16021         struct drm_device *dev = encoder->base.dev;
16022         struct intel_encoder *source_encoder;
16023         u32 possible_clones = 0;
16024
16025         for_each_intel_encoder(dev, source_encoder) {
16026                 if (encoders_cloneable(encoder, source_encoder))
16027                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16028         }
16029
16030         return possible_clones;
16031 }
16032
16033 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16034 {
16035         struct drm_device *dev = encoder->base.dev;
16036         struct intel_crtc *crtc;
16037         u32 possible_crtcs = 0;
16038
16039         for_each_intel_crtc(dev, crtc) {
16040                 if (encoder->pipe_mask & BIT(crtc->pipe))
16041                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16042         }
16043
16044         return possible_crtcs;
16045 }
16046
16047 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16048 {
16049         if (!IS_MOBILE(dev_priv))
16050                 return false;
16051
16052         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16053                 return false;
16054
16055         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16056                 return false;
16057
16058         return true;
16059 }
16060
16061 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16062 {
16063         if (INTEL_GEN(dev_priv) >= 9)
16064                 return false;
16065
16066         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16067                 return false;
16068
16069         if (HAS_PCH_LPT_H(dev_priv) &&
16070             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16071                 return false;
16072
16073         /* DDI E can't be used if DDI A requires 4 lanes */
16074         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16075                 return false;
16076
16077         if (!dev_priv->vbt.int_crt_support)
16078                 return false;
16079
16080         return true;
16081 }
16082
16083 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16084 {
16085         int pps_num;
16086         int pps_idx;
16087
16088         if (HAS_DDI(dev_priv))
16089                 return;
16090         /*
16091          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16092          * everywhere where registers can be write protected.
16093          */
16094         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16095                 pps_num = 2;
16096         else
16097                 pps_num = 1;
16098
16099         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16100                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16101
16102                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16103                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16104         }
16105 }
16106
16107 static void intel_pps_init(struct drm_i915_private *dev_priv)
16108 {
16109         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16110                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16111         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16112                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16113         else
16114                 dev_priv->pps_mmio_base = PPS_BASE;
16115
16116         intel_pps_unlock_regs_wa(dev_priv);
16117 }
16118
16119 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16120 {
16121         struct intel_encoder *encoder;
16122         bool dpd_is_edp = false;
16123
16124         intel_pps_init(dev_priv);
16125
16126         if (!HAS_DISPLAY(dev_priv))
16127                 return;
16128
16129         if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
16130                 intel_ddi_init(dev_priv, PORT_A);
16131                 intel_ddi_init(dev_priv, PORT_B);
16132                 intel_ddi_init(dev_priv, PORT_TC1);
16133                 intel_ddi_init(dev_priv, PORT_TC2);
16134         } else if (INTEL_GEN(dev_priv) >= 12) {
16135                 intel_ddi_init(dev_priv, PORT_A);
16136                 intel_ddi_init(dev_priv, PORT_B);
16137                 intel_ddi_init(dev_priv, PORT_TC1);
16138                 intel_ddi_init(dev_priv, PORT_TC2);
16139                 intel_ddi_init(dev_priv, PORT_TC3);
16140                 intel_ddi_init(dev_priv, PORT_TC4);
16141                 intel_ddi_init(dev_priv, PORT_TC5);
16142                 intel_ddi_init(dev_priv, PORT_TC6);
16143                 icl_dsi_init(dev_priv);
16144         } else if (IS_JSL_EHL(dev_priv)) {
16145                 intel_ddi_init(dev_priv, PORT_A);
16146                 intel_ddi_init(dev_priv, PORT_B);
16147                 intel_ddi_init(dev_priv, PORT_C);
16148                 intel_ddi_init(dev_priv, PORT_D);
16149                 icl_dsi_init(dev_priv);
16150         } else if (IS_GEN(dev_priv, 11)) {
16151                 intel_ddi_init(dev_priv, PORT_A);
16152                 intel_ddi_init(dev_priv, PORT_B);
16153                 intel_ddi_init(dev_priv, PORT_C);
16154                 intel_ddi_init(dev_priv, PORT_D);
16155                 intel_ddi_init(dev_priv, PORT_E);
16156                 /*
16157                  * On some ICL SKUs port F is not present. No strap bits for
16158                  * this, so rely on VBT.
16159                  * Work around broken VBTs on SKUs known to have no port F.
16160                  */
16161                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16162                     intel_bios_is_port_present(dev_priv, PORT_F))
16163                         intel_ddi_init(dev_priv, PORT_F);
16164
16165                 icl_dsi_init(dev_priv);
16166         } else if (IS_GEN9_LP(dev_priv)) {
16167                 /*
16168                  * FIXME: Broxton doesn't support port detection via the
16169                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16170                  * detect the ports.
16171                  */
16172                 intel_ddi_init(dev_priv, PORT_A);
16173                 intel_ddi_init(dev_priv, PORT_B);
16174                 intel_ddi_init(dev_priv, PORT_C);
16175
16176                 vlv_dsi_init(dev_priv);
16177         } else if (HAS_DDI(dev_priv)) {
16178                 int found;
16179
16180                 if (intel_ddi_crt_present(dev_priv))
16181                         intel_crt_init(dev_priv);
16182
16183                 /*
16184                  * Haswell uses DDI functions to detect digital outputs.
16185                  * On SKL pre-D0 the strap isn't connected, so we assume
16186                  * it's there.
16187                  */
16188                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16189                 /* WaIgnoreDDIAStrap: skl */
16190                 if (found || IS_GEN9_BC(dev_priv))
16191                         intel_ddi_init(dev_priv, PORT_A);
16192
16193                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16194                  * register */
16195                 found = intel_de_read(dev_priv, SFUSE_STRAP);
16196
16197                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16198                         intel_ddi_init(dev_priv, PORT_B);
16199                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16200                         intel_ddi_init(dev_priv, PORT_C);
16201                 if (found & SFUSE_STRAP_DDID_DETECTED)
16202                         intel_ddi_init(dev_priv, PORT_D);
16203                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16204                         intel_ddi_init(dev_priv, PORT_F);
16205                 /*
16206                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16207                  */
16208                 if (IS_GEN9_BC(dev_priv) &&
16209                     intel_bios_is_port_present(dev_priv, PORT_E))
16210                         intel_ddi_init(dev_priv, PORT_E);
16211
16212         } else if (HAS_PCH_SPLIT(dev_priv)) {
16213                 int found;
16214
16215                 /*
16216                  * intel_edp_init_connector() depends on this completing first,
16217                  * to prevent the registration of both eDP and LVDS and the
16218                  * incorrect sharing of the PPS.
16219                  */
16220                 intel_lvds_init(dev_priv);
16221                 intel_crt_init(dev_priv);
16222
16223                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16224
16225                 if (ilk_has_edp_a(dev_priv))
16226                         intel_dp_init(dev_priv, DP_A, PORT_A);
16227
16228                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16229                         /* PCH SDVOB multiplex with HDMIB */
16230                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16231                         if (!found)
16232                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16233                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
16234                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16235                 }
16236
16237                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
16238                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16239
16240                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
16241                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16242
16243                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
16244                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16245
16246                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
16247                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16248         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16249                 bool has_edp, has_port;
16250
16251                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16252                         intel_crt_init(dev_priv);
16253
16254                 /*
16255                  * The DP_DETECTED bit is the latched state of the DDC
16256                  * SDA pin at boot. However since eDP doesn't require DDC
16257                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16258                  * eDP ports may have been muxed to an alternate function.
16259                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16260                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16261                  * detect eDP ports.
16262                  *
16263                  * Sadly the straps seem to be missing sometimes even for HDMI
16264                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16265                  * and VBT for the presence of the port. Additionally we can't
16266                  * trust the port type the VBT declares as we've seen at least
16267                  * HDMI ports that the VBT claim are DP or eDP.
16268                  */
16269                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16270                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16271                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
16272                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16273                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16274                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16275
16276                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16277                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16278                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
16279                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16280                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16281                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16282
16283                 if (IS_CHERRYVIEW(dev_priv)) {
16284                         /*
16285                          * eDP not supported on port D,
16286                          * so no need to worry about it
16287                          */
16288                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16289                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
16290                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16291                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
16292                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16293                 }
16294
16295                 vlv_dsi_init(dev_priv);
16296         } else if (IS_PINEVIEW(dev_priv)) {
16297                 intel_lvds_init(dev_priv);
16298                 intel_crt_init(dev_priv);
16299         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16300                 bool found = false;
16301
16302                 if (IS_MOBILE(dev_priv))
16303                         intel_lvds_init(dev_priv);
16304
16305                 intel_crt_init(dev_priv);
16306
16307                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16308                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
16309                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16310                         if (!found && IS_G4X(dev_priv)) {
16311                                 drm_dbg_kms(&dev_priv->drm,
16312                                             "probing HDMI on SDVOB\n");
16313                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16314                         }
16315
16316                         if (!found && IS_G4X(dev_priv))
16317                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16318                 }
16319
16320                 /* Before G4X SDVOC doesn't have its own detect register */
16321
16322                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16323                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
16324                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16325                 }
16326
16327                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
16328
16329                         if (IS_G4X(dev_priv)) {
16330                                 drm_dbg_kms(&dev_priv->drm,
16331                                             "probing HDMI on SDVOC\n");
16332                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16333                         }
16334                         if (IS_G4X(dev_priv))
16335                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16336                 }
16337
16338                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
16339                         intel_dp_init(dev_priv, DP_D, PORT_D);
16340
16341                 if (SUPPORTS_TV(dev_priv))
16342                         intel_tv_init(dev_priv);
16343         } else if (IS_GEN(dev_priv, 2)) {
16344                 if (IS_I85X(dev_priv))
16345                         intel_lvds_init(dev_priv);
16346
16347                 intel_crt_init(dev_priv);
16348                 intel_dvo_init(dev_priv);
16349         }
16350
16351         intel_psr_init(dev_priv);
16352
16353         for_each_intel_encoder(&dev_priv->drm, encoder) {
16354                 encoder->base.possible_crtcs =
16355                         intel_encoder_possible_crtcs(encoder);
16356                 encoder->base.possible_clones =
16357                         intel_encoder_possible_clones(encoder);
16358         }
16359
16360         intel_init_pch_refclk(dev_priv);
16361
16362         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16363 }
16364
16365 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16366 {
16367         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16368
16369         drm_framebuffer_cleanup(fb);
16370         intel_frontbuffer_put(intel_fb->frontbuffer);
16371
16372         kfree(intel_fb);
16373 }
16374
16375 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16376                                                 struct drm_file *file,
16377                                                 unsigned int *handle)
16378 {
16379         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16380         struct drm_i915_private *i915 = to_i915(obj->base.dev);
16381
16382         if (obj->userptr.mm) {
16383                 drm_dbg(&i915->drm,
16384                         "attempting to use a userptr for a framebuffer, denied\n");
16385                 return -EINVAL;
16386         }
16387
16388         return drm_gem_handle_create(file, &obj->base, handle);
16389 }
16390
16391 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16392                                         struct drm_file *file,
16393                                         unsigned flags, unsigned color,
16394                                         struct drm_clip_rect *clips,
16395                                         unsigned num_clips)
16396 {
16397         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16398
16399         i915_gem_object_flush_if_display(obj);
16400         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16401
16402         return 0;
16403 }
16404
16405 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16406         .destroy = intel_user_framebuffer_destroy,
16407         .create_handle = intel_user_framebuffer_create_handle,
16408         .dirty = intel_user_framebuffer_dirty,
16409 };
16410
16411 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16412                                   struct drm_i915_gem_object *obj,
16413                                   struct drm_mode_fb_cmd2 *mode_cmd)
16414 {
16415         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16416         struct drm_framebuffer *fb = &intel_fb->base;
16417         u32 max_stride;
16418         unsigned int tiling, stride;
16419         int ret = -EINVAL;
16420         int i;
16421
16422         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16423         if (!intel_fb->frontbuffer)
16424                 return -ENOMEM;
16425
16426         i915_gem_object_lock(obj, NULL);
16427         tiling = i915_gem_object_get_tiling(obj);
16428         stride = i915_gem_object_get_stride(obj);
16429         i915_gem_object_unlock(obj);
16430
16431         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16432                 /*
16433                  * If there's a fence, enforce that
16434                  * the fb modifier and tiling mode match.
16435                  */
16436                 if (tiling != I915_TILING_NONE &&
16437                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16438                         drm_dbg_kms(&dev_priv->drm,
16439                                     "tiling_mode doesn't match fb modifier\n");
16440                         goto err;
16441                 }
16442         } else {
16443                 if (tiling == I915_TILING_X) {
16444                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16445                 } else if (tiling == I915_TILING_Y) {
16446                         drm_dbg_kms(&dev_priv->drm,
16447                                     "No Y tiling for legacy addfb\n");
16448                         goto err;
16449                 }
16450         }
16451
16452         if (!drm_any_plane_has_format(&dev_priv->drm,
16453                                       mode_cmd->pixel_format,
16454                                       mode_cmd->modifier[0])) {
16455                 struct drm_format_name_buf format_name;
16456
16457                 drm_dbg_kms(&dev_priv->drm,
16458                             "unsupported pixel format %s / modifier 0x%llx\n",
16459                             drm_get_format_name(mode_cmd->pixel_format,
16460                                                 &format_name),
16461                             mode_cmd->modifier[0]);
16462                 goto err;
16463         }
16464
16465         /*
16466          * gen2/3 display engine uses the fence if present,
16467          * so the tiling mode must match the fb modifier exactly.
16468          */
16469         if (INTEL_GEN(dev_priv) < 4 &&
16470             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16471                 drm_dbg_kms(&dev_priv->drm,
16472                             "tiling_mode must match fb modifier exactly on gen2/3\n");
16473                 goto err;
16474         }
16475
16476         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16477                                          mode_cmd->modifier[0]);
16478         if (mode_cmd->pitches[0] > max_stride) {
16479                 drm_dbg_kms(&dev_priv->drm,
16480                             "%s pitch (%u) must be at most %d\n",
16481                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16482                             "tiled" : "linear",
16483                             mode_cmd->pitches[0], max_stride);
16484                 goto err;
16485         }
16486
16487         /*
16488          * If there's a fence, enforce that
16489          * the fb pitch and fence stride match.
16490          */
16491         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16492                 drm_dbg_kms(&dev_priv->drm,
16493                             "pitch (%d) must match tiling stride (%d)\n",
16494                             mode_cmd->pitches[0], stride);
16495                 goto err;
16496         }
16497
16498         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16499         if (mode_cmd->offsets[0] != 0) {
16500                 drm_dbg_kms(&dev_priv->drm,
16501                             "plane 0 offset (0x%08x) must be 0\n",
16502                             mode_cmd->offsets[0]);
16503                 goto err;
16504         }
16505
16506         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16507
16508         for (i = 0; i < fb->format->num_planes; i++) {
16509                 u32 stride_alignment;
16510
16511                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16512                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
16513                                     i);
16514                         goto err;
16515                 }
16516
16517                 stride_alignment = intel_fb_stride_alignment(fb, i);
16518                 if (fb->pitches[i] & (stride_alignment - 1)) {
16519                         drm_dbg_kms(&dev_priv->drm,
16520                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
16521                                     i, fb->pitches[i], stride_alignment);
16522                         goto err;
16523                 }
16524
16525                 if (is_gen12_ccs_plane(fb, i)) {
16526                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
16527
16528                         if (fb->pitches[i] != ccs_aux_stride) {
16529                                 drm_dbg_kms(&dev_priv->drm,
16530                                             "ccs aux plane %d pitch (%d) must be %d\n",
16531                                             i,
16532                                             fb->pitches[i], ccs_aux_stride);
16533                                 goto err;
16534                         }
16535                 }
16536
16537                 fb->obj[i] = &obj->base;
16538         }
16539
16540         ret = intel_fill_fb_info(dev_priv, fb);
16541         if (ret)
16542                 goto err;
16543
16544         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16545         if (ret) {
16546                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
16547                 goto err;
16548         }
16549
16550         return 0;
16551
16552 err:
16553         intel_frontbuffer_put(intel_fb->frontbuffer);
16554         return ret;
16555 }
16556
16557 static struct drm_framebuffer *
16558 intel_user_framebuffer_create(struct drm_device *dev,
16559                               struct drm_file *filp,
16560                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16561 {
16562         struct drm_framebuffer *fb;
16563         struct drm_i915_gem_object *obj;
16564         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16565
16566         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16567         if (!obj)
16568                 return ERR_PTR(-ENOENT);
16569
16570         fb = intel_framebuffer_create(obj, &mode_cmd);
16571         i915_gem_object_put(obj);
16572
16573         return fb;
16574 }
16575
16576 static enum drm_mode_status
16577 intel_mode_valid(struct drm_device *dev,
16578                  const struct drm_display_mode *mode)
16579 {
16580         struct drm_i915_private *dev_priv = to_i915(dev);
16581         int hdisplay_max, htotal_max;
16582         int vdisplay_max, vtotal_max;
16583
16584         /*
16585          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16586          * of DBLSCAN modes to the output's mode list when they detect
16587          * the scaling mode property on the connector. And they don't
16588          * ask the kernel to validate those modes in any way until
16589          * modeset time at which point the client gets a protocol error.
16590          * So in order to not upset those clients we silently ignore the
16591          * DBLSCAN flag on such connectors. For other connectors we will
16592          * reject modes with the DBLSCAN flag in encoder->compute_config().
16593          * And we always reject DBLSCAN modes in connector->mode_valid()
16594          * as we never want such modes on the connector's mode list.
16595          */
16596
16597         if (mode->vscan > 1)
16598                 return MODE_NO_VSCAN;
16599
16600         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16601                 return MODE_H_ILLEGAL;
16602
16603         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16604                            DRM_MODE_FLAG_NCSYNC |
16605                            DRM_MODE_FLAG_PCSYNC))
16606                 return MODE_HSYNC;
16607
16608         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16609                            DRM_MODE_FLAG_PIXMUX |
16610                            DRM_MODE_FLAG_CLKDIV2))
16611                 return MODE_BAD;
16612
16613         /* Transcoder timing limits */
16614         if (INTEL_GEN(dev_priv) >= 11) {
16615                 hdisplay_max = 16384;
16616                 vdisplay_max = 8192;
16617                 htotal_max = 16384;
16618                 vtotal_max = 8192;
16619         } else if (INTEL_GEN(dev_priv) >= 9 ||
16620                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16621                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16622                 vdisplay_max = 4096;
16623                 htotal_max = 8192;
16624                 vtotal_max = 8192;
16625         } else if (INTEL_GEN(dev_priv) >= 3) {
16626                 hdisplay_max = 4096;
16627                 vdisplay_max = 4096;
16628                 htotal_max = 8192;
16629                 vtotal_max = 8192;
16630         } else {
16631                 hdisplay_max = 2048;
16632                 vdisplay_max = 2048;
16633                 htotal_max = 4096;
16634                 vtotal_max = 4096;
16635         }
16636
16637         if (mode->hdisplay > hdisplay_max ||
16638             mode->hsync_start > htotal_max ||
16639             mode->hsync_end > htotal_max ||
16640             mode->htotal > htotal_max)
16641                 return MODE_H_ILLEGAL;
16642
16643         if (mode->vdisplay > vdisplay_max ||
16644             mode->vsync_start > vtotal_max ||
16645             mode->vsync_end > vtotal_max ||
16646             mode->vtotal > vtotal_max)
16647                 return MODE_V_ILLEGAL;
16648
16649         if (INTEL_GEN(dev_priv) >= 5) {
16650                 if (mode->hdisplay < 64 ||
16651                     mode->htotal - mode->hdisplay < 32)
16652                         return MODE_H_ILLEGAL;
16653
16654                 if (mode->vtotal - mode->vdisplay < 5)
16655                         return MODE_V_ILLEGAL;
16656         } else {
16657                 if (mode->htotal - mode->hdisplay < 32)
16658                         return MODE_H_ILLEGAL;
16659
16660                 if (mode->vtotal - mode->vdisplay < 3)
16661                         return MODE_V_ILLEGAL;
16662         }
16663
16664         return MODE_OK;
16665 }
16666
16667 enum drm_mode_status
16668 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16669                                 const struct drm_display_mode *mode,
16670                                 bool bigjoiner)
16671 {
16672         int plane_width_max, plane_height_max;
16673
16674         /*
16675          * intel_mode_valid() should be
16676          * sufficient on older platforms.
16677          */
16678         if (INTEL_GEN(dev_priv) < 9)
16679                 return MODE_OK;
16680
16681         /*
16682          * Most people will probably want a fullscreen
16683          * plane so let's not advertize modes that are
16684          * too big for that.
16685          */
16686         if (INTEL_GEN(dev_priv) >= 11) {
16687                 plane_width_max = 5120 << bigjoiner;
16688                 plane_height_max = 4320;
16689         } else {
16690                 plane_width_max = 5120;
16691                 plane_height_max = 4096;
16692         }
16693
16694         if (mode->hdisplay > plane_width_max)
16695                 return MODE_H_ILLEGAL;
16696
16697         if (mode->vdisplay > plane_height_max)
16698                 return MODE_V_ILLEGAL;
16699
16700         return MODE_OK;
16701 }
16702
16703 static const struct drm_mode_config_funcs intel_mode_funcs = {
16704         .fb_create = intel_user_framebuffer_create,
16705         .get_format_info = intel_get_format_info,
16706         .output_poll_changed = intel_fbdev_output_poll_changed,
16707         .mode_valid = intel_mode_valid,
16708         .atomic_check = intel_atomic_check,
16709         .atomic_commit = intel_atomic_commit,
16710         .atomic_state_alloc = intel_atomic_state_alloc,
16711         .atomic_state_clear = intel_atomic_state_clear,
16712         .atomic_state_free = intel_atomic_state_free,
16713 };
16714
16715 /**
16716  * intel_init_display_hooks - initialize the display modesetting hooks
16717  * @dev_priv: device private
16718  */
16719 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16720 {
16721         intel_init_cdclk_hooks(dev_priv);
16722
16723         if (INTEL_GEN(dev_priv) >= 9) {
16724                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
16725                 dev_priv->display.get_initial_plane_config =
16726                         skl_get_initial_plane_config;
16727                 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
16728                 dev_priv->display.crtc_enable = hsw_crtc_enable;
16729                 dev_priv->display.crtc_disable = hsw_crtc_disable;
16730         } else if (HAS_DDI(dev_priv)) {
16731                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
16732                 dev_priv->display.get_initial_plane_config =
16733                         i9xx_get_initial_plane_config;
16734                 dev_priv->display.crtc_compute_clock =
16735                         hsw_crtc_compute_clock;
16736                 dev_priv->display.crtc_enable = hsw_crtc_enable;
16737                 dev_priv->display.crtc_disable = hsw_crtc_disable;
16738         } else if (HAS_PCH_SPLIT(dev_priv)) {
16739                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
16740                 dev_priv->display.get_initial_plane_config =
16741                         i9xx_get_initial_plane_config;
16742                 dev_priv->display.crtc_compute_clock =
16743                         ilk_crtc_compute_clock;
16744                 dev_priv->display.crtc_enable = ilk_crtc_enable;
16745                 dev_priv->display.crtc_disable = ilk_crtc_disable;
16746         } else if (IS_CHERRYVIEW(dev_priv)) {
16747                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16748                 dev_priv->display.get_initial_plane_config =
16749                         i9xx_get_initial_plane_config;
16750                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16751                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16752                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16753         } else if (IS_VALLEYVIEW(dev_priv)) {
16754                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16755                 dev_priv->display.get_initial_plane_config =
16756                         i9xx_get_initial_plane_config;
16757                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16758                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16759                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16760         } else if (IS_G4X(dev_priv)) {
16761                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16762                 dev_priv->display.get_initial_plane_config =
16763                         i9xx_get_initial_plane_config;
16764                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16765                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16766                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16767         } else if (IS_PINEVIEW(dev_priv)) {
16768                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16769                 dev_priv->display.get_initial_plane_config =
16770                         i9xx_get_initial_plane_config;
16771                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16772                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16773                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16774         } else if (!IS_GEN(dev_priv, 2)) {
16775                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16776                 dev_priv->display.get_initial_plane_config =
16777                         i9xx_get_initial_plane_config;
16778                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16779                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16780                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16781         } else {
16782                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16783                 dev_priv->display.get_initial_plane_config =
16784                         i9xx_get_initial_plane_config;
16785                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16786                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16787                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16788         }
16789
16790         if (IS_GEN(dev_priv, 5)) {
16791                 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
16792         } else if (IS_GEN(dev_priv, 6)) {
16793                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16794         } else if (IS_IVYBRIDGE(dev_priv)) {
16795                 /* FIXME: detect B0+ stepping and use auto training */
16796                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16797         }
16798
16799         if (INTEL_GEN(dev_priv) >= 9)
16800                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16801         else
16802                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16803
16804 }
16805
16806 void intel_modeset_init_hw(struct drm_i915_private *i915)
16807 {
16808         struct intel_cdclk_state *cdclk_state =
16809                 to_intel_cdclk_state(i915->cdclk.obj.state);
16810         struct intel_dbuf_state *dbuf_state =
16811                 to_intel_dbuf_state(i915->dbuf.obj.state);
16812
16813         intel_update_cdclk(i915);
16814         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
16815         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
16816
16817         dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
16818 }
16819
16820 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
16821 {
16822         struct drm_plane *plane;
16823         struct intel_crtc *crtc;
16824
16825         for_each_intel_crtc(state->dev, crtc) {
16826                 struct intel_crtc_state *crtc_state;
16827
16828                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
16829                 if (IS_ERR(crtc_state))
16830                         return PTR_ERR(crtc_state);
16831
16832                 if (crtc_state->hw.active) {
16833                         /*
16834                          * Preserve the inherited flag to avoid
16835                          * taking the full modeset path.
16836                          */
16837                         crtc_state->inherited = true;
16838                 }
16839         }
16840
16841         drm_for_each_plane(plane, state->dev) {
16842                 struct drm_plane_state *plane_state;
16843
16844                 plane_state = drm_atomic_get_plane_state(state, plane);
16845                 if (IS_ERR(plane_state))
16846                         return PTR_ERR(plane_state);
16847         }
16848
16849         return 0;
16850 }
16851
16852 /*
16853  * Calculate what we think the watermarks should be for the state we've read
16854  * out of the hardware and then immediately program those watermarks so that
16855  * we ensure the hardware settings match our internal state.
16856  *
16857  * We can calculate what we think WM's should be by creating a duplicate of the
16858  * current state (which was constructed during hardware readout) and running it
16859  * through the atomic check code to calculate new watermark values in the
16860  * state object.
16861  */
16862 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
16863 {
16864         struct drm_atomic_state *state;
16865         struct intel_atomic_state *intel_state;
16866         struct intel_crtc *crtc;
16867         struct intel_crtc_state *crtc_state;
16868         struct drm_modeset_acquire_ctx ctx;
16869         int ret;
16870         int i;
16871
16872         /* Only supported on platforms that use atomic watermark design */
16873         if (!dev_priv->display.optimize_watermarks)
16874                 return;
16875
16876         state = drm_atomic_state_alloc(&dev_priv->drm);
16877         if (drm_WARN_ON(&dev_priv->drm, !state))
16878                 return;
16879
16880         intel_state = to_intel_atomic_state(state);
16881
16882         drm_modeset_acquire_init(&ctx, 0);
16883
16884 retry:
16885         state->acquire_ctx = &ctx;
16886
16887         /*
16888          * Hardware readout is the only time we don't want to calculate
16889          * intermediate watermarks (since we don't trust the current
16890          * watermarks).
16891          */
16892         if (!HAS_GMCH(dev_priv))
16893                 intel_state->skip_intermediate_wm = true;
16894
16895         ret = sanitize_watermarks_add_affected(state);
16896         if (ret)
16897                 goto fail;
16898
16899         ret = intel_atomic_check(&dev_priv->drm, state);
16900         if (ret)
16901                 goto fail;
16902
16903         /* Write calculated watermark values back */
16904         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16905                 crtc_state->wm.need_postvbl_update = true;
16906                 dev_priv->display.optimize_watermarks(intel_state, crtc);
16907
16908                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16909         }
16910
16911 fail:
16912         if (ret == -EDEADLK) {
16913                 drm_atomic_state_clear(state);
16914                 drm_modeset_backoff(&ctx);
16915                 goto retry;
16916         }
16917
16918         /*
16919          * If we fail here, it means that the hardware appears to be
16920          * programmed in a way that shouldn't be possible, given our
16921          * understanding of watermark requirements.  This might mean a
16922          * mistake in the hardware readout code or a mistake in the
16923          * watermark calculations for a given platform.  Raise a WARN
16924          * so that this is noticeable.
16925          *
16926          * If this actually happens, we'll have to just leave the
16927          * BIOS-programmed watermarks untouched and hope for the best.
16928          */
16929         drm_WARN(&dev_priv->drm, ret,
16930                  "Could not determine valid watermarks for inherited state\n");
16931
16932         drm_atomic_state_put(state);
16933
16934         drm_modeset_drop_locks(&ctx);
16935         drm_modeset_acquire_fini(&ctx);
16936 }
16937
16938 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16939 {
16940         if (IS_GEN(dev_priv, 5)) {
16941                 u32 fdi_pll_clk =
16942                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16943
16944                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16945         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16946                 dev_priv->fdi_pll_freq = 270000;
16947         } else {
16948                 return;
16949         }
16950
16951         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16952 }
16953
16954 static int intel_initial_commit(struct drm_device *dev)
16955 {
16956         struct drm_atomic_state *state = NULL;
16957         struct drm_modeset_acquire_ctx ctx;
16958         struct intel_crtc *crtc;
16959         int ret = 0;
16960
16961         state = drm_atomic_state_alloc(dev);
16962         if (!state)
16963                 return -ENOMEM;
16964
16965         drm_modeset_acquire_init(&ctx, 0);
16966
16967 retry:
16968         state->acquire_ctx = &ctx;
16969
16970         for_each_intel_crtc(dev, crtc) {
16971                 struct intel_crtc_state *crtc_state =
16972                         intel_atomic_get_crtc_state(state, crtc);
16973
16974                 if (IS_ERR(crtc_state)) {
16975                         ret = PTR_ERR(crtc_state);
16976                         goto out;
16977                 }
16978
16979                 if (crtc_state->hw.active) {
16980                         struct intel_encoder *encoder;
16981
16982                         /*
16983                          * We've not yet detected sink capabilities
16984                          * (audio,infoframes,etc.) and thus we don't want to
16985                          * force a full state recomputation yet. We want that to
16986                          * happen only for the first real commit from userspace.
16987                          * So preserve the inherited flag for the time being.
16988                          */
16989                         crtc_state->inherited = true;
16990
16991                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16992                         if (ret)
16993                                 goto out;
16994
16995                         /*
16996                          * FIXME hack to force a LUT update to avoid the
16997                          * plane update forcing the pipe gamma on without
16998                          * having a proper LUT loaded. Remove once we
16999                          * have readout for pipe gamma enable.
17000                          */
17001                         crtc_state->uapi.color_mgmt_changed = true;
17002
17003                         for_each_intel_encoder_mask(dev, encoder,
17004                                                     crtc_state->uapi.encoder_mask) {
17005                                 if (encoder->initial_fastset_check &&
17006                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
17007                                         ret = drm_atomic_add_affected_connectors(state,
17008                                                                                  &crtc->base);
17009                                         if (ret)
17010                                                 goto out;
17011                                 }
17012                         }
17013                 }
17014         }
17015
17016         ret = drm_atomic_commit(state);
17017
17018 out:
17019         if (ret == -EDEADLK) {
17020                 drm_atomic_state_clear(state);
17021                 drm_modeset_backoff(&ctx);
17022                 goto retry;
17023         }
17024
17025         drm_atomic_state_put(state);
17026
17027         drm_modeset_drop_locks(&ctx);
17028         drm_modeset_acquire_fini(&ctx);
17029
17030         return ret;
17031 }
17032
17033 static void intel_mode_config_init(struct drm_i915_private *i915)
17034 {
17035         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17036
17037         drm_mode_config_init(&i915->drm);
17038         INIT_LIST_HEAD(&i915->global_obj_list);
17039
17040         mode_config->min_width = 0;
17041         mode_config->min_height = 0;
17042
17043         mode_config->preferred_depth = 24;
17044         mode_config->prefer_shadow = 1;
17045
17046         mode_config->allow_fb_modifiers = true;
17047
17048         mode_config->funcs = &intel_mode_funcs;
17049
17050         if (INTEL_GEN(i915) >= 9)
17051                 mode_config->async_page_flip = true;
17052
17053         /*
17054          * Maximum framebuffer dimensions, chosen to match
17055          * the maximum render engine surface size on gen4+.
17056          */
17057         if (INTEL_GEN(i915) >= 7) {
17058                 mode_config->max_width = 16384;
17059                 mode_config->max_height = 16384;
17060         } else if (INTEL_GEN(i915) >= 4) {
17061                 mode_config->max_width = 8192;
17062                 mode_config->max_height = 8192;
17063         } else if (IS_GEN(i915, 3)) {
17064                 mode_config->max_width = 4096;
17065                 mode_config->max_height = 4096;
17066         } else {
17067                 mode_config->max_width = 2048;
17068                 mode_config->max_height = 2048;
17069         }
17070
17071         if (IS_I845G(i915) || IS_I865G(i915)) {
17072                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17073                 mode_config->cursor_height = 1023;
17074         } else if (IS_I830(i915) || IS_I85X(i915) ||
17075                    IS_I915G(i915) || IS_I915GM(i915)) {
17076                 mode_config->cursor_width = 64;
17077                 mode_config->cursor_height = 64;
17078         } else {
17079                 mode_config->cursor_width = 256;
17080                 mode_config->cursor_height = 256;
17081         }
17082 }
17083
17084 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17085 {
17086         intel_atomic_global_obj_cleanup(i915);
17087         drm_mode_config_cleanup(&i915->drm);
17088 }
17089
17090 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17091 {
17092         if (plane_config->fb) {
17093                 struct drm_framebuffer *fb = &plane_config->fb->base;
17094
17095                 /* We may only have the stub and not a full framebuffer */
17096                 if (drm_framebuffer_read_refcount(fb))
17097                         drm_framebuffer_put(fb);
17098                 else
17099                         kfree(fb);
17100         }
17101
17102         if (plane_config->vma)
17103                 i915_vma_put(plane_config->vma);
17104 }
17105
17106 /* part #1: call before irq install */
17107 int intel_modeset_init_noirq(struct drm_i915_private *i915)
17108 {
17109         int ret;
17110
17111         if (i915_inject_probe_failure(i915))
17112                 return -ENODEV;
17113
17114         if (HAS_DISPLAY(i915)) {
17115                 ret = drm_vblank_init(&i915->drm,
17116                                       INTEL_NUM_PIPES(i915));
17117                 if (ret)
17118                         return ret;
17119         }
17120
17121         intel_bios_init(i915);
17122
17123         ret = intel_vga_register(i915);
17124         if (ret)
17125                 goto cleanup_bios;
17126
17127         /* FIXME: completely on the wrong abstraction layer */
17128         intel_power_domains_init_hw(i915, false);
17129
17130         intel_csr_ucode_init(i915);
17131
17132         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17133         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17134                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17135
17136         intel_mode_config_init(i915);
17137
17138         ret = intel_cdclk_init(i915);
17139         if (ret)
17140                 goto cleanup_vga_client_pw_domain_csr;
17141
17142         ret = intel_dbuf_init(i915);
17143         if (ret)
17144                 goto cleanup_vga_client_pw_domain_csr;
17145
17146         ret = intel_bw_init(i915);
17147         if (ret)
17148                 goto cleanup_vga_client_pw_domain_csr;
17149
17150         init_llist_head(&i915->atomic_helper.free_list);
17151         INIT_WORK(&i915->atomic_helper.free_work,
17152                   intel_atomic_helper_free_state_worker);
17153
17154         intel_init_quirks(i915);
17155
17156         intel_fbc_init(i915);
17157
17158         return 0;
17159
17160 cleanup_vga_client_pw_domain_csr:
17161         intel_csr_ucode_fini(i915);
17162         intel_power_domains_driver_remove(i915);
17163         intel_vga_unregister(i915);
17164 cleanup_bios:
17165         intel_bios_driver_remove(i915);
17166
17167         return ret;
17168 }
17169
17170 /* part #2: call after irq install, but before gem init */
17171 int intel_modeset_init_nogem(struct drm_i915_private *i915)
17172 {
17173         struct drm_device *dev = &i915->drm;
17174         enum pipe pipe;
17175         struct intel_crtc *crtc;
17176         int ret;
17177
17178         intel_init_pm(i915);
17179
17180         intel_panel_sanitize_ssc(i915);
17181
17182         intel_gmbus_setup(i915);
17183
17184         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17185                     INTEL_NUM_PIPES(i915),
17186                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17187
17188         if (HAS_DISPLAY(i915)) {
17189                 for_each_pipe(i915, pipe) {
17190                         ret = intel_crtc_init(i915, pipe);
17191                         if (ret) {
17192                                 intel_mode_config_cleanup(i915);
17193                                 return ret;
17194                         }
17195                 }
17196         }
17197
17198         intel_plane_possible_crtcs_init(i915);
17199         intel_shared_dpll_init(dev);
17200         intel_update_fdi_pll_freq(i915);
17201
17202         intel_update_czclk(i915);
17203         intel_modeset_init_hw(i915);
17204
17205         intel_hdcp_component_init(i915);
17206
17207         if (i915->max_cdclk_freq == 0)
17208                 intel_update_max_cdclk(i915);
17209
17210         /*
17211          * If the platform has HTI, we need to find out whether it has reserved
17212          * any display resources before we create our display outputs.
17213          */
17214         if (INTEL_INFO(i915)->display.has_hti)
17215                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
17216
17217         /* Just disable it once at startup */
17218         intel_vga_disable(i915);
17219         intel_setup_outputs(i915);
17220
17221         drm_modeset_lock_all(dev);
17222         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17223         drm_modeset_unlock_all(dev);
17224
17225         for_each_intel_crtc(dev, crtc) {
17226                 struct intel_initial_plane_config plane_config = {};
17227
17228                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
17229                         continue;
17230
17231                 /*
17232                  * Note that reserving the BIOS fb up front prevents us
17233                  * from stuffing other stolen allocations like the ring
17234                  * on top.  This prevents some ugliness at boot time, and
17235                  * can even allow for smooth boot transitions if the BIOS
17236                  * fb is large enough for the active pipe configuration.
17237                  */
17238                 i915->display.get_initial_plane_config(crtc, &plane_config);
17239
17240                 /*
17241                  * If the fb is shared between multiple heads, we'll
17242                  * just get the first one.
17243                  */
17244                 intel_find_initial_plane_obj(crtc, &plane_config);
17245
17246                 plane_config_fini(&plane_config);
17247         }
17248
17249         /*
17250          * Make sure hardware watermarks really match the state we read out.
17251          * Note that we need to do this after reconstructing the BIOS fb's
17252          * since the watermark calculation done here will use pstate->fb.
17253          */
17254         if (!HAS_GMCH(i915))
17255                 sanitize_watermarks(i915);
17256
17257         return 0;
17258 }
17259
17260 /* part #3: call after gem init */
17261 int intel_modeset_init(struct drm_i915_private *i915)
17262 {
17263         int ret;
17264
17265         if (!HAS_DISPLAY(i915))
17266                 return 0;
17267
17268         /*
17269          * Force all active planes to recompute their states. So that on
17270          * mode_setcrtc after probe, all the intel_plane_state variables
17271          * are already calculated and there is no assert_plane warnings
17272          * during bootup.
17273          */
17274         ret = intel_initial_commit(&i915->drm);
17275         if (ret)
17276                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
17277
17278         intel_overlay_setup(i915);
17279
17280         ret = intel_fbdev_init(&i915->drm);
17281         if (ret)
17282                 return ret;
17283
17284         /* Only enable hotplug handling once the fbdev is fully set up. */
17285         intel_hpd_init(i915);
17286         intel_hpd_poll_disable(i915);
17287
17288         intel_init_ipc(i915);
17289
17290         return 0;
17291 }
17292
17293 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17294 {
17295         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17296         /* 640x480@60Hz, ~25175 kHz */
17297         struct dpll clock = {
17298                 .m1 = 18,
17299                 .m2 = 7,
17300                 .p1 = 13,
17301                 .p2 = 4,
17302                 .n = 2,
17303         };
17304         u32 dpll, fp;
17305         int i;
17306
17307         drm_WARN_ON(&dev_priv->drm,
17308                     i9xx_calc_dpll_params(48000, &clock) != 25154);
17309
17310         drm_dbg_kms(&dev_priv->drm,
17311                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17312                     pipe_name(pipe), clock.vco, clock.dot);
17313
17314         fp = i9xx_dpll_compute_fp(&clock);
17315         dpll = DPLL_DVO_2X_MODE |
17316                 DPLL_VGA_MODE_DIS |
17317                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17318                 PLL_P2_DIVIDE_BY_4 |
17319                 PLL_REF_INPUT_DREFCLK |
17320                 DPLL_VCO_ENABLE;
17321
17322         intel_de_write(dev_priv, FP0(pipe), fp);
17323         intel_de_write(dev_priv, FP1(pipe), fp);
17324
17325         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17326         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17327         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17328         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17329         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17330         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17331         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17332
17333         /*
17334          * Apparently we need to have VGA mode enabled prior to changing
17335          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17336          * dividers, even though the register value does change.
17337          */
17338         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17339         intel_de_write(dev_priv, DPLL(pipe), dpll);
17340
17341         /* Wait for the clocks to stabilize. */
17342         intel_de_posting_read(dev_priv, DPLL(pipe));
17343         udelay(150);
17344
17345         /* The pixel multiplier can only be updated once the
17346          * DPLL is enabled and the clocks are stable.
17347          *
17348          * So write it again.
17349          */
17350         intel_de_write(dev_priv, DPLL(pipe), dpll);
17351
17352         /* We do this three times for luck */
17353         for (i = 0; i < 3 ; i++) {
17354                 intel_de_write(dev_priv, DPLL(pipe), dpll);
17355                 intel_de_posting_read(dev_priv, DPLL(pipe));
17356                 udelay(150); /* wait for warmup */
17357         }
17358
17359         intel_de_write(dev_priv, PIPECONF(pipe),
17360                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17361         intel_de_posting_read(dev_priv, PIPECONF(pipe));
17362
17363         intel_wait_for_pipe_scanline_moving(crtc);
17364 }
17365
17366 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17367 {
17368         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17369
17370         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
17371                     pipe_name(pipe));
17372
17373         drm_WARN_ON(&dev_priv->drm,
17374                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
17375                     DISPLAY_PLANE_ENABLE);
17376         drm_WARN_ON(&dev_priv->drm,
17377                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
17378                     DISPLAY_PLANE_ENABLE);
17379         drm_WARN_ON(&dev_priv->drm,
17380                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
17381                     DISPLAY_PLANE_ENABLE);
17382         drm_WARN_ON(&dev_priv->drm,
17383                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
17384         drm_WARN_ON(&dev_priv->drm,
17385                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
17386
17387         intel_de_write(dev_priv, PIPECONF(pipe), 0);
17388         intel_de_posting_read(dev_priv, PIPECONF(pipe));
17389
17390         intel_wait_for_pipe_scanline_stopped(crtc);
17391
17392         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
17393         intel_de_posting_read(dev_priv, DPLL(pipe));
17394 }
17395
17396 static void
17397 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17398 {
17399         struct intel_crtc *crtc;
17400
17401         if (INTEL_GEN(dev_priv) >= 4)
17402                 return;
17403
17404         for_each_intel_crtc(&dev_priv->drm, crtc) {
17405                 struct intel_plane *plane =
17406                         to_intel_plane(crtc->base.primary);
17407                 struct intel_crtc *plane_crtc;
17408                 enum pipe pipe;
17409
17410                 if (!plane->get_hw_state(plane, &pipe))
17411                         continue;
17412
17413                 if (pipe == crtc->pipe)
17414                         continue;
17415
17416                 drm_dbg_kms(&dev_priv->drm,
17417                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17418                             plane->base.base.id, plane->base.name);
17419
17420                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17421                 intel_plane_disable_noatomic(plane_crtc, plane);
17422         }
17423 }
17424
17425 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17426 {
17427         struct drm_device *dev = crtc->base.dev;
17428         struct intel_encoder *encoder;
17429
17430         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17431                 return true;
17432
17433         return false;
17434 }
17435
17436 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17437 {
17438         struct drm_device *dev = encoder->base.dev;
17439         struct intel_connector *connector;
17440
17441         for_each_connector_on_encoder(dev, &encoder->base, connector)
17442                 return connector;
17443
17444         return NULL;
17445 }
17446
17447 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17448                               enum pipe pch_transcoder)
17449 {
17450         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17451                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17452 }
17453
17454 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17455 {
17456         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17458         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17459
17460         if (INTEL_GEN(dev_priv) >= 9 ||
17461             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17462                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17463                 u32 val;
17464
17465                 if (transcoder_is_dsi(cpu_transcoder))
17466                         return;
17467
17468                 val = intel_de_read(dev_priv, reg);
17469                 val &= ~HSW_FRAME_START_DELAY_MASK;
17470                 val |= HSW_FRAME_START_DELAY(0);
17471                 intel_de_write(dev_priv, reg, val);
17472         } else {
17473                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17474                 u32 val;
17475
17476                 val = intel_de_read(dev_priv, reg);
17477                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17478                 val |= PIPECONF_FRAME_START_DELAY(0);
17479                 intel_de_write(dev_priv, reg, val);
17480         }
17481
17482         if (!crtc_state->has_pch_encoder)
17483                 return;
17484
17485         if (HAS_PCH_IBX(dev_priv)) {
17486                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17487                 u32 val;
17488
17489                 val = intel_de_read(dev_priv, reg);
17490                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17491                 val |= TRANS_FRAME_START_DELAY(0);
17492                 intel_de_write(dev_priv, reg, val);
17493         } else {
17494                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17495                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17496                 u32 val;
17497
17498                 val = intel_de_read(dev_priv, reg);
17499                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17500                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17501                 intel_de_write(dev_priv, reg, val);
17502         }
17503 }
17504
17505 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17506                                 struct drm_modeset_acquire_ctx *ctx)
17507 {
17508         struct drm_device *dev = crtc->base.dev;
17509         struct drm_i915_private *dev_priv = to_i915(dev);
17510         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17511
17512         if (crtc_state->hw.active) {
17513                 struct intel_plane *plane;
17514
17515                 /* Clear any frame start delays used for debugging left by the BIOS */
17516                 intel_sanitize_frame_start_delay(crtc_state);
17517
17518                 /* Disable everything but the primary plane */
17519                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17520                         const struct intel_plane_state *plane_state =
17521                                 to_intel_plane_state(plane->base.state);
17522
17523                         if (plane_state->uapi.visible &&
17524                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17525                                 intel_plane_disable_noatomic(crtc, plane);
17526                 }
17527
17528                 /*
17529                  * Disable any background color set by the BIOS, but enable the
17530                  * gamma and CSC to match how we program our planes.
17531                  */
17532                 if (INTEL_GEN(dev_priv) >= 9)
17533                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
17534                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
17535         }
17536
17537         /* Adjust the state of the output pipe according to whether we
17538          * have active connectors/encoders. */
17539         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
17540             !crtc_state->bigjoiner_slave)
17541                 intel_crtc_disable_noatomic(crtc, ctx);
17542
17543         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17544                 /*
17545                  * We start out with underrun reporting disabled to avoid races.
17546                  * For correct bookkeeping mark this on active crtcs.
17547                  *
17548                  * Also on gmch platforms we dont have any hardware bits to
17549                  * disable the underrun reporting. Which means we need to start
17550                  * out with underrun reporting disabled also on inactive pipes,
17551                  * since otherwise we'll complain about the garbage we read when
17552                  * e.g. coming up after runtime pm.
17553                  *
17554                  * No protection against concurrent access is required - at
17555                  * worst a fifo underrun happens which also sets this to false.
17556                  */
17557                 crtc->cpu_fifo_underrun_disabled = true;
17558                 /*
17559                  * We track the PCH trancoder underrun reporting state
17560                  * within the crtc. With crtc for pipe A housing the underrun
17561                  * reporting state for PCH transcoder A, crtc for pipe B housing
17562                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17563                  * and marking underrun reporting as disabled for the non-existing
17564                  * PCH transcoders B and C would prevent enabling the south
17565                  * error interrupt (see cpt_can_enable_serr_int()).
17566                  */
17567                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17568                         crtc->pch_fifo_underrun_disabled = true;
17569         }
17570 }
17571
17572 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17573 {
17574         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17575
17576         /*
17577          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17578          * the hardware when a high res displays plugged in. DPLL P
17579          * divider is zero, and the pipe timings are bonkers. We'll
17580          * try to disable everything in that case.
17581          *
17582          * FIXME would be nice to be able to sanitize this state
17583          * without several WARNs, but for now let's take the easy
17584          * road.
17585          */
17586         return IS_GEN(dev_priv, 6) &&
17587                 crtc_state->hw.active &&
17588                 crtc_state->shared_dpll &&
17589                 crtc_state->port_clock == 0;
17590 }
17591
17592 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17593 {
17594         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17595         struct intel_connector *connector;
17596         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17597         struct intel_crtc_state *crtc_state = crtc ?
17598                 to_intel_crtc_state(crtc->base.state) : NULL;
17599
17600         /* We need to check both for a crtc link (meaning that the
17601          * encoder is active and trying to read from a pipe) and the
17602          * pipe itself being active. */
17603         bool has_active_crtc = crtc_state &&
17604                 crtc_state->hw.active;
17605
17606         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17607                 drm_dbg_kms(&dev_priv->drm,
17608                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17609                             pipe_name(crtc->pipe));
17610                 has_active_crtc = false;
17611         }
17612
17613         connector = intel_encoder_find_connector(encoder);
17614         if (connector && !has_active_crtc) {
17615                 drm_dbg_kms(&dev_priv->drm,
17616                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17617                             encoder->base.base.id,
17618                             encoder->base.name);
17619
17620                 /* Connector is active, but has no active pipe. This is
17621                  * fallout from our resume register restoring. Disable
17622                  * the encoder manually again. */
17623                 if (crtc_state) {
17624                         struct drm_encoder *best_encoder;
17625
17626                         drm_dbg_kms(&dev_priv->drm,
17627                                     "[ENCODER:%d:%s] manually disabled\n",
17628                                     encoder->base.base.id,
17629                                     encoder->base.name);
17630
17631                         /* avoid oopsing in case the hooks consult best_encoder */
17632                         best_encoder = connector->base.state->best_encoder;
17633                         connector->base.state->best_encoder = &encoder->base;
17634
17635                         /* FIXME NULL atomic state passed! */
17636                         if (encoder->disable)
17637                                 encoder->disable(NULL, encoder, crtc_state,
17638                                                  connector->base.state);
17639                         if (encoder->post_disable)
17640                                 encoder->post_disable(NULL, encoder, crtc_state,
17641                                                       connector->base.state);
17642
17643                         connector->base.state->best_encoder = best_encoder;
17644                 }
17645                 encoder->base.crtc = NULL;
17646
17647                 /* Inconsistent output/port/pipe state happens presumably due to
17648                  * a bug in one of the get_hw_state functions. Or someplace else
17649                  * in our code, like the register restore mess on resume. Clamp
17650                  * things to off as a safer default. */
17651
17652                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17653                 connector->base.encoder = NULL;
17654         }
17655
17656         /* notify opregion of the sanitized encoder state */
17657         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17658
17659         if (INTEL_GEN(dev_priv) >= 11)
17660                 icl_sanitize_encoder_pll_mapping(encoder);
17661 }
17662
17663 /* FIXME read out full plane state for all planes */
17664 static void readout_plane_state(struct drm_i915_private *dev_priv)
17665 {
17666         struct intel_plane *plane;
17667         struct intel_crtc *crtc;
17668
17669         for_each_intel_plane(&dev_priv->drm, plane) {
17670                 struct intel_plane_state *plane_state =
17671                         to_intel_plane_state(plane->base.state);
17672                 struct intel_crtc_state *crtc_state;
17673                 enum pipe pipe = PIPE_A;
17674                 bool visible;
17675
17676                 visible = plane->get_hw_state(plane, &pipe);
17677
17678                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17679                 crtc_state = to_intel_crtc_state(crtc->base.state);
17680
17681                 intel_set_plane_visible(crtc_state, plane_state, visible);
17682
17683                 drm_dbg_kms(&dev_priv->drm,
17684                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17685                             plane->base.base.id, plane->base.name,
17686                             enableddisabled(visible), pipe_name(pipe));
17687         }
17688
17689         for_each_intel_crtc(&dev_priv->drm, crtc) {
17690                 struct intel_crtc_state *crtc_state =
17691                         to_intel_crtc_state(crtc->base.state);
17692
17693                 fixup_plane_bitmasks(crtc_state);
17694         }
17695 }
17696
17697 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17698 {
17699         struct drm_i915_private *dev_priv = to_i915(dev);
17700         struct intel_cdclk_state *cdclk_state =
17701                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
17702         struct intel_dbuf_state *dbuf_state =
17703                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
17704         enum pipe pipe;
17705         struct intel_crtc *crtc;
17706         struct intel_encoder *encoder;
17707         struct intel_connector *connector;
17708         struct drm_connector_list_iter conn_iter;
17709         u8 active_pipes = 0;
17710
17711         for_each_intel_crtc(dev, crtc) {
17712                 struct intel_crtc_state *crtc_state =
17713                         to_intel_crtc_state(crtc->base.state);
17714
17715                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17716                 intel_crtc_free_hw_state(crtc_state);
17717                 intel_crtc_state_reset(crtc_state, crtc);
17718
17719                 intel_crtc_get_pipe_config(crtc_state);
17720
17721                 crtc_state->hw.enable = crtc_state->hw.active;
17722
17723                 crtc->base.enabled = crtc_state->hw.enable;
17724                 crtc->active = crtc_state->hw.active;
17725
17726                 if (crtc_state->hw.active)
17727                         active_pipes |= BIT(crtc->pipe);
17728
17729                 drm_dbg_kms(&dev_priv->drm,
17730                             "[CRTC:%d:%s] hw state readout: %s\n",
17731                             crtc->base.base.id, crtc->base.name,
17732                             enableddisabled(crtc_state->hw.active));
17733         }
17734
17735         dev_priv->active_pipes = cdclk_state->active_pipes =
17736                 dbuf_state->active_pipes = active_pipes;
17737
17738         readout_plane_state(dev_priv);
17739
17740         intel_dpll_readout_hw_state(dev_priv);
17741
17742         for_each_intel_encoder(dev, encoder) {
17743                 pipe = 0;
17744
17745                 if (encoder->get_hw_state(encoder, &pipe)) {
17746                         struct intel_crtc_state *crtc_state;
17747
17748                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17749                         crtc_state = to_intel_crtc_state(crtc->base.state);
17750
17751                         encoder->base.crtc = &crtc->base;
17752                         intel_encoder_get_config(encoder, crtc_state);
17753                         if (encoder->sync_state)
17754                                 encoder->sync_state(encoder, crtc_state);
17755
17756                         /* read out to slave crtc as well for bigjoiner */
17757                         if (crtc_state->bigjoiner) {
17758                                 /* encoder should read be linked to bigjoiner master */
17759                                 WARN_ON(crtc_state->bigjoiner_slave);
17760
17761                                 crtc = crtc_state->bigjoiner_linked_crtc;
17762                                 crtc_state = to_intel_crtc_state(crtc->base.state);
17763                                 intel_encoder_get_config(encoder, crtc_state);
17764                         }
17765                 } else {
17766                         encoder->base.crtc = NULL;
17767                 }
17768
17769                 drm_dbg_kms(&dev_priv->drm,
17770                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17771                             encoder->base.base.id, encoder->base.name,
17772                             enableddisabled(encoder->base.crtc),
17773                             pipe_name(pipe));
17774         }
17775
17776         drm_connector_list_iter_begin(dev, &conn_iter);
17777         for_each_intel_connector_iter(connector, &conn_iter) {
17778                 if (connector->get_hw_state(connector)) {
17779                         struct intel_crtc_state *crtc_state;
17780                         struct intel_crtc *crtc;
17781
17782                         connector->base.dpms = DRM_MODE_DPMS_ON;
17783
17784                         encoder = intel_attached_encoder(connector);
17785                         connector->base.encoder = &encoder->base;
17786
17787                         crtc = to_intel_crtc(encoder->base.crtc);
17788                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17789
17790                         if (crtc_state && crtc_state->hw.active) {
17791                                 /*
17792                                  * This has to be done during hardware readout
17793                                  * because anything calling .crtc_disable may
17794                                  * rely on the connector_mask being accurate.
17795                                  */
17796                                 crtc_state->uapi.connector_mask |=
17797                                         drm_connector_mask(&connector->base);
17798                                 crtc_state->uapi.encoder_mask |=
17799                                         drm_encoder_mask(&encoder->base);
17800                         }
17801                 } else {
17802                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17803                         connector->base.encoder = NULL;
17804                 }
17805                 drm_dbg_kms(&dev_priv->drm,
17806                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
17807                             connector->base.base.id, connector->base.name,
17808                             enableddisabled(connector->base.encoder));
17809         }
17810         drm_connector_list_iter_end(&conn_iter);
17811
17812         for_each_intel_crtc(dev, crtc) {
17813                 struct intel_bw_state *bw_state =
17814                         to_intel_bw_state(dev_priv->bw_obj.state);
17815                 struct intel_crtc_state *crtc_state =
17816                         to_intel_crtc_state(crtc->base.state);
17817                 struct intel_plane *plane;
17818                 int min_cdclk = 0;
17819
17820                 if (crtc_state->bigjoiner_slave)
17821                         continue;
17822
17823                 if (crtc_state->hw.active) {
17824                         /*
17825                          * The initial mode needs to be set in order to keep
17826                          * the atomic core happy. It wants a valid mode if the
17827                          * crtc's enabled, so we do the above call.
17828                          *
17829                          * But we don't set all the derived state fully, hence
17830                          * set a flag to indicate that a full recalculation is
17831                          * needed on the next commit.
17832                          */
17833                         crtc_state->inherited = true;
17834
17835                         intel_crtc_update_active_timings(crtc_state);
17836
17837                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17838                 }
17839
17840                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17841                         const struct intel_plane_state *plane_state =
17842                                 to_intel_plane_state(plane->base.state);
17843
17844                         /*
17845                          * FIXME don't have the fb yet, so can't
17846                          * use intel_plane_data_rate() :(
17847                          */
17848                         if (plane_state->uapi.visible)
17849                                 crtc_state->data_rate[plane->id] =
17850                                         4 * crtc_state->pixel_rate;
17851                         /*
17852                          * FIXME don't have the fb yet, so can't
17853                          * use plane->min_cdclk() :(
17854                          */
17855                         if (plane_state->uapi.visible && plane->min_cdclk) {
17856                                 if (crtc_state->double_wide ||
17857                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17858                                         crtc_state->min_cdclk[plane->id] =
17859                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17860                                 else
17861                                         crtc_state->min_cdclk[plane->id] =
17862                                                 crtc_state->pixel_rate;
17863                         }
17864                         drm_dbg_kms(&dev_priv->drm,
17865                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
17866                                     plane->base.base.id, plane->base.name,
17867                                     crtc_state->min_cdclk[plane->id]);
17868                 }
17869
17870                 if (crtc_state->hw.active) {
17871                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17872                         if (drm_WARN_ON(dev, min_cdclk < 0))
17873                                 min_cdclk = 0;
17874                 }
17875
17876                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
17877                 cdclk_state->min_voltage_level[crtc->pipe] =
17878                         crtc_state->min_voltage_level;
17879
17880                 intel_bw_crtc_update(bw_state, crtc_state);
17881
17882                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17883
17884                 /* discard our incomplete slave state, copy it from master */
17885                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
17886                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
17887                         struct intel_crtc_state *slave_crtc_state =
17888                                 to_intel_crtc_state(slave->base.state);
17889
17890                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
17891                         slave->base.mode = crtc->base.mode;
17892
17893                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
17894                         cdclk_state->min_voltage_level[slave->pipe] =
17895                                 crtc_state->min_voltage_level;
17896
17897                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
17898                                 const struct intel_plane_state *plane_state =
17899                                         to_intel_plane_state(plane->base.state);
17900
17901                                 /*
17902                                  * FIXME don't have the fb yet, so can't
17903                                  * use intel_plane_data_rate() :(
17904                                  */
17905                                 if (plane_state->uapi.visible)
17906                                         crtc_state->data_rate[plane->id] =
17907                                                 4 * crtc_state->pixel_rate;
17908                                 else
17909                                         crtc_state->data_rate[plane->id] = 0;
17910                         }
17911
17912                         intel_bw_crtc_update(bw_state, slave_crtc_state);
17913                         drm_calc_timestamping_constants(&slave->base,
17914                                                         &slave_crtc_state->hw.adjusted_mode);
17915                 }
17916         }
17917 }
17918
17919 static void
17920 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17921 {
17922         struct intel_encoder *encoder;
17923
17924         for_each_intel_encoder(&dev_priv->drm, encoder) {
17925                 struct intel_crtc_state *crtc_state;
17926
17927                 if (!encoder->get_power_domains)
17928                         continue;
17929
17930                 /*
17931                  * MST-primary and inactive encoders don't have a crtc state
17932                  * and neither of these require any power domain references.
17933                  */
17934                 if (!encoder->base.crtc)
17935                         continue;
17936
17937                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17938                 encoder->get_power_domains(encoder, crtc_state);
17939         }
17940 }
17941
17942 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17943 {
17944         /*
17945          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
17946          * Also known as Wa_14010480278.
17947          */
17948         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
17949                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
17950                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
17951
17952         if (IS_HASWELL(dev_priv)) {
17953                 /*
17954                  * WaRsPkgCStateDisplayPMReq:hsw
17955                  * System hang if this isn't done before disabling all planes!
17956                  */
17957                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
17958                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17959         }
17960
17961         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
17962                 /* Display WA #1142:kbl,cfl,cml */
17963                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
17964                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
17965                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
17966                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
17967                              KBL_ARB_FILL_SPARE_14);
17968         }
17969 }
17970
17971 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17972                                        enum port port, i915_reg_t hdmi_reg)
17973 {
17974         u32 val = intel_de_read(dev_priv, hdmi_reg);
17975
17976         if (val & SDVO_ENABLE ||
17977             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17978                 return;
17979
17980         drm_dbg_kms(&dev_priv->drm,
17981                     "Sanitizing transcoder select for HDMI %c\n",
17982                     port_name(port));
17983
17984         val &= ~SDVO_PIPE_SEL_MASK;
17985         val |= SDVO_PIPE_SEL(PIPE_A);
17986
17987         intel_de_write(dev_priv, hdmi_reg, val);
17988 }
17989
17990 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17991                                      enum port port, i915_reg_t dp_reg)
17992 {
17993         u32 val = intel_de_read(dev_priv, dp_reg);
17994
17995         if (val & DP_PORT_EN ||
17996             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17997                 return;
17998
17999         drm_dbg_kms(&dev_priv->drm,
18000                     "Sanitizing transcoder select for DP %c\n",
18001                     port_name(port));
18002
18003         val &= ~DP_PIPE_SEL_MASK;
18004         val |= DP_PIPE_SEL(PIPE_A);
18005
18006         intel_de_write(dev_priv, dp_reg, val);
18007 }
18008
18009 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18010 {
18011         /*
18012          * The BIOS may select transcoder B on some of the PCH
18013          * ports even it doesn't enable the port. This would trip
18014          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18015          * Sanitize the transcoder select bits to prevent that. We
18016          * assume that the BIOS never actually enabled the port,
18017          * because if it did we'd actually have to toggle the port
18018          * on and back off to make the transcoder A select stick
18019          * (see. intel_dp_link_down(), intel_disable_hdmi(),
18020          * intel_disable_sdvo()).
18021          */
18022         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18023         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18024         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18025
18026         /* PCH SDVOB multiplex with HDMIB */
18027         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18028         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18029         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18030 }
18031
18032 /* Scan out the current hw modeset state,
18033  * and sanitizes it to the current state
18034  */
18035 static void
18036 intel_modeset_setup_hw_state(struct drm_device *dev,
18037                              struct drm_modeset_acquire_ctx *ctx)
18038 {
18039         struct drm_i915_private *dev_priv = to_i915(dev);
18040         struct intel_encoder *encoder;
18041         struct intel_crtc *crtc;
18042         intel_wakeref_t wakeref;
18043
18044         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18045
18046         intel_early_display_was(dev_priv);
18047         intel_modeset_readout_hw_state(dev);
18048
18049         /* HW state is read out, now we need to sanitize this mess. */
18050
18051         /* Sanitize the TypeC port mode upfront, encoders depend on this */
18052         for_each_intel_encoder(dev, encoder) {
18053                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18054
18055                 /* We need to sanitize only the MST primary port. */
18056                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18057                     intel_phy_is_tc(dev_priv, phy))
18058                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
18059         }
18060
18061         get_encoder_power_domains(dev_priv);
18062
18063         if (HAS_PCH_IBX(dev_priv))
18064                 ibx_sanitize_pch_ports(dev_priv);
18065
18066         /*
18067          * intel_sanitize_plane_mapping() may need to do vblank
18068          * waits, so we need vblank interrupts restored beforehand.
18069          */
18070         for_each_intel_crtc(&dev_priv->drm, crtc) {
18071                 struct intel_crtc_state *crtc_state =
18072                         to_intel_crtc_state(crtc->base.state);
18073
18074                 drm_crtc_vblank_reset(&crtc->base);
18075
18076                 if (crtc_state->hw.active)
18077                         intel_crtc_vblank_on(crtc_state);
18078         }
18079
18080         intel_sanitize_plane_mapping(dev_priv);
18081
18082         for_each_intel_encoder(dev, encoder)
18083                 intel_sanitize_encoder(encoder);
18084
18085         for_each_intel_crtc(&dev_priv->drm, crtc) {
18086                 struct intel_crtc_state *crtc_state =
18087                         to_intel_crtc_state(crtc->base.state);
18088
18089                 intel_sanitize_crtc(crtc, ctx);
18090                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18091         }
18092
18093         intel_modeset_update_connector_atomic_state(dev);
18094
18095         intel_dpll_sanitize_state(dev_priv);
18096
18097         if (IS_G4X(dev_priv)) {
18098                 g4x_wm_get_hw_state(dev_priv);
18099                 g4x_wm_sanitize(dev_priv);
18100         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18101                 vlv_wm_get_hw_state(dev_priv);
18102                 vlv_wm_sanitize(dev_priv);
18103         } else if (INTEL_GEN(dev_priv) >= 9) {
18104                 skl_wm_get_hw_state(dev_priv);
18105         } else if (HAS_PCH_SPLIT(dev_priv)) {
18106                 ilk_wm_get_hw_state(dev_priv);
18107         }
18108
18109         for_each_intel_crtc(dev, crtc) {
18110                 struct intel_crtc_state *crtc_state =
18111                         to_intel_crtc_state(crtc->base.state);
18112                 u64 put_domains;
18113
18114                 put_domains = modeset_get_crtc_power_domains(crtc_state);
18115                 if (drm_WARN_ON(dev, put_domains))
18116                         modeset_put_crtc_power_domains(crtc, put_domains);
18117         }
18118
18119         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18120 }
18121
18122 void intel_display_resume(struct drm_device *dev)
18123 {
18124         struct drm_i915_private *dev_priv = to_i915(dev);
18125         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18126         struct drm_modeset_acquire_ctx ctx;
18127         int ret;
18128
18129         dev_priv->modeset_restore_state = NULL;
18130         if (state)
18131                 state->acquire_ctx = &ctx;
18132
18133         drm_modeset_acquire_init(&ctx, 0);
18134
18135         while (1) {
18136                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18137                 if (ret != -EDEADLK)
18138                         break;
18139
18140                 drm_modeset_backoff(&ctx);
18141         }
18142
18143         if (!ret)
18144                 ret = __intel_display_resume(dev, state, &ctx);
18145
18146         intel_enable_ipc(dev_priv);
18147         drm_modeset_drop_locks(&ctx);
18148         drm_modeset_acquire_fini(&ctx);
18149
18150         if (ret)
18151                 drm_err(&dev_priv->drm,
18152                         "Restoring old state failed with %i\n", ret);
18153         if (state)
18154                 drm_atomic_state_put(state);
18155 }
18156
18157 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18158 {
18159         struct intel_connector *connector;
18160         struct drm_connector_list_iter conn_iter;
18161
18162         /* Kill all the work that may have been queued by hpd. */
18163         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18164         for_each_intel_connector_iter(connector, &conn_iter) {
18165                 if (connector->modeset_retry_work.func)
18166                         cancel_work_sync(&connector->modeset_retry_work);
18167                 if (connector->hdcp.shim) {
18168                         cancel_delayed_work_sync(&connector->hdcp.check_work);
18169                         cancel_work_sync(&connector->hdcp.prop_work);
18170                 }
18171         }
18172         drm_connector_list_iter_end(&conn_iter);
18173 }
18174
18175 /* part #1: call before irq uninstall */
18176 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18177 {
18178         flush_workqueue(i915->flip_wq);
18179         flush_workqueue(i915->modeset_wq);
18180
18181         flush_work(&i915->atomic_helper.free_work);
18182         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18183 }
18184
18185 /* part #2: call after irq uninstall */
18186 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18187 {
18188         /*
18189          * Due to the hpd irq storm handling the hotplug work can re-arm the
18190          * poll handlers. Hence disable polling after hpd handling is shut down.
18191          */
18192         intel_hpd_poll_fini(i915);
18193
18194         /*
18195          * MST topology needs to be suspended so we don't have any calls to
18196          * fbdev after it's finalized. MST will be destroyed later as part of
18197          * drm_mode_config_cleanup()
18198          */
18199         intel_dp_mst_suspend(i915);
18200
18201         /* poll work can call into fbdev, hence clean that up afterwards */
18202         intel_fbdev_fini(i915);
18203
18204         intel_unregister_dsm_handler();
18205
18206         intel_fbc_global_disable(i915);
18207
18208         /* flush any delayed tasks or pending work */
18209         flush_scheduled_work();
18210
18211         intel_hdcp_component_fini(i915);
18212
18213         intel_mode_config_cleanup(i915);
18214
18215         intel_overlay_cleanup(i915);
18216
18217         intel_gmbus_teardown(i915);
18218
18219         destroy_workqueue(i915->flip_wq);
18220         destroy_workqueue(i915->modeset_wq);
18221
18222         intel_fbc_cleanup_cfb(i915);
18223 }
18224
18225 /* part #3: call after gem init */
18226 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
18227 {
18228         intel_csr_ucode_fini(i915);
18229
18230         intel_power_domains_driver_remove(i915);
18231
18232         intel_vga_unregister(i915);
18233
18234         intel_bios_driver_remove(i915);
18235 }
18236
18237 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18238
18239 struct intel_display_error_state {
18240
18241         u32 power_well_driver;
18242
18243         struct intel_cursor_error_state {
18244                 u32 control;
18245                 u32 position;
18246                 u32 base;
18247                 u32 size;
18248         } cursor[I915_MAX_PIPES];
18249
18250         struct intel_pipe_error_state {
18251                 bool power_domain_on;
18252                 u32 source;
18253                 u32 stat;
18254         } pipe[I915_MAX_PIPES];
18255
18256         struct intel_plane_error_state {
18257                 u32 control;
18258                 u32 stride;
18259                 u32 size;
18260                 u32 pos;
18261                 u32 addr;
18262                 u32 surface;
18263                 u32 tile_offset;
18264         } plane[I915_MAX_PIPES];
18265
18266         struct intel_transcoder_error_state {
18267                 bool available;
18268                 bool power_domain_on;
18269                 enum transcoder cpu_transcoder;
18270
18271                 u32 conf;
18272
18273                 u32 htotal;
18274                 u32 hblank;
18275                 u32 hsync;
18276                 u32 vtotal;
18277                 u32 vblank;
18278                 u32 vsync;
18279         } transcoder[5];
18280 };
18281
18282 struct intel_display_error_state *
18283 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18284 {
18285         struct intel_display_error_state *error;
18286         int transcoders[] = {
18287                 TRANSCODER_A,
18288                 TRANSCODER_B,
18289                 TRANSCODER_C,
18290                 TRANSCODER_D,
18291                 TRANSCODER_EDP,
18292         };
18293         int i;
18294
18295         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18296
18297         if (!HAS_DISPLAY(dev_priv))
18298                 return NULL;
18299
18300         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18301         if (error == NULL)
18302                 return NULL;
18303
18304         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18305                 error->power_well_driver = intel_de_read(dev_priv,
18306                                                          HSW_PWR_WELL_CTL2);
18307
18308         for_each_pipe(dev_priv, i) {
18309                 error->pipe[i].power_domain_on =
18310                         __intel_display_power_is_enabled(dev_priv,
18311                                                          POWER_DOMAIN_PIPE(i));
18312                 if (!error->pipe[i].power_domain_on)
18313                         continue;
18314
18315                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18316                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18317                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18318
18319                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18320                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18321                 if (INTEL_GEN(dev_priv) <= 3) {
18322                         error->plane[i].size = intel_de_read(dev_priv,
18323                                                              DSPSIZE(i));
18324                         error->plane[i].pos = intel_de_read(dev_priv,
18325                                                             DSPPOS(i));
18326                 }
18327                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18328                         error->plane[i].addr = intel_de_read(dev_priv,
18329                                                              DSPADDR(i));
18330                 if (INTEL_GEN(dev_priv) >= 4) {
18331                         error->plane[i].surface = intel_de_read(dev_priv,
18332                                                                 DSPSURF(i));
18333                         error->plane[i].tile_offset = intel_de_read(dev_priv,
18334                                                                     DSPTILEOFF(i));
18335                 }
18336
18337                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18338
18339                 if (HAS_GMCH(dev_priv))
18340                         error->pipe[i].stat = intel_de_read(dev_priv,
18341                                                             PIPESTAT(i));
18342         }
18343
18344         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18345                 enum transcoder cpu_transcoder = transcoders[i];
18346
18347                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
18348                         continue;
18349
18350                 error->transcoder[i].available = true;
18351                 error->transcoder[i].power_domain_on =
18352                         __intel_display_power_is_enabled(dev_priv,
18353                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18354                 if (!error->transcoder[i].power_domain_on)
18355                         continue;
18356
18357                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18358
18359                 error->transcoder[i].conf = intel_de_read(dev_priv,
18360                                                           PIPECONF(cpu_transcoder));
18361                 error->transcoder[i].htotal = intel_de_read(dev_priv,
18362                                                             HTOTAL(cpu_transcoder));
18363                 error->transcoder[i].hblank = intel_de_read(dev_priv,
18364                                                             HBLANK(cpu_transcoder));
18365                 error->transcoder[i].hsync = intel_de_read(dev_priv,
18366                                                            HSYNC(cpu_transcoder));
18367                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
18368                                                             VTOTAL(cpu_transcoder));
18369                 error->transcoder[i].vblank = intel_de_read(dev_priv,
18370                                                             VBLANK(cpu_transcoder));
18371                 error->transcoder[i].vsync = intel_de_read(dev_priv,
18372                                                            VSYNC(cpu_transcoder));
18373         }
18374
18375         return error;
18376 }
18377
18378 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18379
18380 void
18381 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18382                                 struct intel_display_error_state *error)
18383 {
18384         struct drm_i915_private *dev_priv = m->i915;
18385         int i;
18386
18387         if (!error)
18388                 return;
18389
18390         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18391         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18392                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18393                            error->power_well_driver);
18394         for_each_pipe(dev_priv, i) {
18395                 err_printf(m, "Pipe [%d]:\n", i);
18396                 err_printf(m, "  Power: %s\n",
18397                            onoff(error->pipe[i].power_domain_on));
18398                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18399                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18400
18401                 err_printf(m, "Plane [%d]:\n", i);
18402                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18403                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18404                 if (INTEL_GEN(dev_priv) <= 3) {
18405                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18406                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18407                 }
18408                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18409                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18410                 if (INTEL_GEN(dev_priv) >= 4) {
18411                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18412                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18413                 }
18414
18415                 err_printf(m, "Cursor [%d]:\n", i);
18416                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18417                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18418                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18419         }
18420
18421         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18422                 if (!error->transcoder[i].available)
18423                         continue;
18424
18425                 err_printf(m, "CPU transcoder: %s\n",
18426                            transcoder_name(error->transcoder[i].cpu_transcoder));
18427                 err_printf(m, "  Power: %s\n",
18428                            onoff(error->transcoder[i].power_domain_on));
18429                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18430                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18431                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18432                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18433                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18434                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18435                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18436         }
18437 }
18438
18439 #endif