Merge drm/drm-next into drm-intel-next-queued
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <drm/drm_atomic_uapi.h>
50 #include <linux/dma_remapping.h>
51 #include <linux/reservation.h>
52
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t i8xx_primary_formats[] = {
55         DRM_FORMAT_C8,
56         DRM_FORMAT_RGB565,
57         DRM_FORMAT_XRGB1555,
58         DRM_FORMAT_XRGB8888,
59 };
60
61 /* Primary plane formats for gen >= 4 */
62 static const uint32_t i965_primary_formats[] = {
63         DRM_FORMAT_C8,
64         DRM_FORMAT_RGB565,
65         DRM_FORMAT_XRGB8888,
66         DRM_FORMAT_XBGR8888,
67         DRM_FORMAT_XRGB2101010,
68         DRM_FORMAT_XBGR2101010,
69 };
70
71 static const uint64_t i9xx_format_modifiers[] = {
72         I915_FORMAT_MOD_X_TILED,
73         DRM_FORMAT_MOD_LINEAR,
74         DRM_FORMAT_MOD_INVALID
75 };
76
77 static const uint32_t skl_primary_formats[] = {
78         DRM_FORMAT_C8,
79         DRM_FORMAT_RGB565,
80         DRM_FORMAT_XRGB8888,
81         DRM_FORMAT_XBGR8888,
82         DRM_FORMAT_ARGB8888,
83         DRM_FORMAT_ABGR8888,
84         DRM_FORMAT_XRGB2101010,
85         DRM_FORMAT_XBGR2101010,
86         DRM_FORMAT_YUYV,
87         DRM_FORMAT_YVYU,
88         DRM_FORMAT_UYVY,
89         DRM_FORMAT_VYUY,
90 };
91
92 static const uint32_t skl_pri_planar_formats[] = {
93         DRM_FORMAT_C8,
94         DRM_FORMAT_RGB565,
95         DRM_FORMAT_XRGB8888,
96         DRM_FORMAT_XBGR8888,
97         DRM_FORMAT_ARGB8888,
98         DRM_FORMAT_ABGR8888,
99         DRM_FORMAT_XRGB2101010,
100         DRM_FORMAT_XBGR2101010,
101         DRM_FORMAT_YUYV,
102         DRM_FORMAT_YVYU,
103         DRM_FORMAT_UYVY,
104         DRM_FORMAT_VYUY,
105         DRM_FORMAT_NV12,
106 };
107
108 static const uint64_t skl_format_modifiers_noccs[] = {
109         I915_FORMAT_MOD_Yf_TILED,
110         I915_FORMAT_MOD_Y_TILED,
111         I915_FORMAT_MOD_X_TILED,
112         DRM_FORMAT_MOD_LINEAR,
113         DRM_FORMAT_MOD_INVALID
114 };
115
116 static const uint64_t skl_format_modifiers_ccs[] = {
117         I915_FORMAT_MOD_Yf_TILED_CCS,
118         I915_FORMAT_MOD_Y_TILED_CCS,
119         I915_FORMAT_MOD_Yf_TILED,
120         I915_FORMAT_MOD_Y_TILED,
121         I915_FORMAT_MOD_X_TILED,
122         DRM_FORMAT_MOD_LINEAR,
123         DRM_FORMAT_MOD_INVALID
124 };
125
126 /* Cursor formats */
127 static const uint32_t intel_cursor_formats[] = {
128         DRM_FORMAT_ARGB8888,
129 };
130
131 static const uint64_t cursor_format_modifiers[] = {
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
137                                 struct intel_crtc_state *pipe_config);
138 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
139                                    struct intel_crtc_state *pipe_config);
140
141 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
142                                   struct drm_i915_gem_object *obj,
143                                   struct drm_mode_fb_cmd2 *mode_cmd);
144 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
145 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
146 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
147 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
148                                          struct intel_link_m_n *m_n,
149                                          struct intel_link_m_n *m2_n2);
150 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
151 static void haswell_set_pipeconf(struct drm_crtc *crtc);
152 static void haswell_set_pipemisc(struct drm_crtc *crtc);
153 static void vlv_prepare_pll(struct intel_crtc *crtc,
154                             const struct intel_crtc_state *pipe_config);
155 static void chv_prepare_pll(struct intel_crtc *crtc,
156                             const struct intel_crtc_state *pipe_config);
157 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
159 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
160                                     struct intel_crtc_state *crtc_state);
161 static void skylake_pfit_enable(struct intel_crtc *crtc);
162 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
163 static void ironlake_pfit_enable(struct intel_crtc *crtc);
164 static void intel_modeset_setup_hw_state(struct drm_device *dev,
165                                          struct drm_modeset_acquire_ctx *ctx);
166 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
167
168 struct intel_limit {
169         struct {
170                 int min, max;
171         } dot, vco, n, m, m1, m2, p, p1;
172
173         struct {
174                 int dot_limit;
175                 int p2_slow, p2_fast;
176         } p2;
177 };
178
179 /* returns HPLL frequency in kHz */
180 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
181 {
182         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
183
184         /* Obtain SKU information */
185         mutex_lock(&dev_priv->sb_lock);
186         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
187                 CCK_FUSE_HPLL_FREQ_MASK;
188         mutex_unlock(&dev_priv->sb_lock);
189
190         return vco_freq[hpll_freq] * 1000;
191 }
192
193 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
194                       const char *name, u32 reg, int ref_freq)
195 {
196         u32 val;
197         int divider;
198
199         mutex_lock(&dev_priv->sb_lock);
200         val = vlv_cck_read(dev_priv, reg);
201         mutex_unlock(&dev_priv->sb_lock);
202
203         divider = val & CCK_FREQUENCY_VALUES;
204
205         WARN((val & CCK_FREQUENCY_STATUS) !=
206              (divider << CCK_FREQUENCY_STATUS_SHIFT),
207              "%s change in progress\n", name);
208
209         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213                            const char *name, u32 reg)
214 {
215         if (dev_priv->hpll_freq == 0)
216                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
217
218         return vlv_get_cck_clock(dev_priv, name, reg,
219                                  dev_priv->hpll_freq);
220 }
221
222 static void intel_update_czclk(struct drm_i915_private *dev_priv)
223 {
224         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
225                 return;
226
227         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
228                                                       CCK_CZ_CLOCK_CONTROL);
229
230         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
231 }
232
233 static inline u32 /* units of 100MHz */
234 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
235                     const struct intel_crtc_state *pipe_config)
236 {
237         if (HAS_DDI(dev_priv))
238                 return pipe_config->port_clock; /* SPLL */
239         else
240                 return dev_priv->fdi_pll_freq;
241 }
242
243 static const struct intel_limit intel_limits_i8xx_dac = {
244         .dot = { .min = 25000, .max = 350000 },
245         .vco = { .min = 908000, .max = 1512000 },
246         .n = { .min = 2, .max = 16 },
247         .m = { .min = 96, .max = 140 },
248         .m1 = { .min = 18, .max = 26 },
249         .m2 = { .min = 6, .max = 16 },
250         .p = { .min = 4, .max = 128 },
251         .p1 = { .min = 2, .max = 33 },
252         .p2 = { .dot_limit = 165000,
253                 .p2_slow = 4, .p2_fast = 2 },
254 };
255
256 static const struct intel_limit intel_limits_i8xx_dvo = {
257         .dot = { .min = 25000, .max = 350000 },
258         .vco = { .min = 908000, .max = 1512000 },
259         .n = { .min = 2, .max = 16 },
260         .m = { .min = 96, .max = 140 },
261         .m1 = { .min = 18, .max = 26 },
262         .m2 = { .min = 6, .max = 16 },
263         .p = { .min = 4, .max = 128 },
264         .p1 = { .min = 2, .max = 33 },
265         .p2 = { .dot_limit = 165000,
266                 .p2_slow = 4, .p2_fast = 4 },
267 };
268
269 static const struct intel_limit intel_limits_i8xx_lvds = {
270         .dot = { .min = 25000, .max = 350000 },
271         .vco = { .min = 908000, .max = 1512000 },
272         .n = { .min = 2, .max = 16 },
273         .m = { .min = 96, .max = 140 },
274         .m1 = { .min = 18, .max = 26 },
275         .m2 = { .min = 6, .max = 16 },
276         .p = { .min = 4, .max = 128 },
277         .p1 = { .min = 1, .max = 6 },
278         .p2 = { .dot_limit = 165000,
279                 .p2_slow = 14, .p2_fast = 7 },
280 };
281
282 static const struct intel_limit intel_limits_i9xx_sdvo = {
283         .dot = { .min = 20000, .max = 400000 },
284         .vco = { .min = 1400000, .max = 2800000 },
285         .n = { .min = 1, .max = 6 },
286         .m = { .min = 70, .max = 120 },
287         .m1 = { .min = 8, .max = 18 },
288         .m2 = { .min = 3, .max = 7 },
289         .p = { .min = 5, .max = 80 },
290         .p1 = { .min = 1, .max = 8 },
291         .p2 = { .dot_limit = 200000,
292                 .p2_slow = 10, .p2_fast = 5 },
293 };
294
295 static const struct intel_limit intel_limits_i9xx_lvds = {
296         .dot = { .min = 20000, .max = 400000 },
297         .vco = { .min = 1400000, .max = 2800000 },
298         .n = { .min = 1, .max = 6 },
299         .m = { .min = 70, .max = 120 },
300         .m1 = { .min = 8, .max = 18 },
301         .m2 = { .min = 3, .max = 7 },
302         .p = { .min = 7, .max = 98 },
303         .p1 = { .min = 1, .max = 8 },
304         .p2 = { .dot_limit = 112000,
305                 .p2_slow = 14, .p2_fast = 7 },
306 };
307
308
309 static const struct intel_limit intel_limits_g4x_sdvo = {
310         .dot = { .min = 25000, .max = 270000 },
311         .vco = { .min = 1750000, .max = 3500000},
312         .n = { .min = 1, .max = 4 },
313         .m = { .min = 104, .max = 138 },
314         .m1 = { .min = 17, .max = 23 },
315         .m2 = { .min = 5, .max = 11 },
316         .p = { .min = 10, .max = 30 },
317         .p1 = { .min = 1, .max = 3},
318         .p2 = { .dot_limit = 270000,
319                 .p2_slow = 10,
320                 .p2_fast = 10
321         },
322 };
323
324 static const struct intel_limit intel_limits_g4x_hdmi = {
325         .dot = { .min = 22000, .max = 400000 },
326         .vco = { .min = 1750000, .max = 3500000},
327         .n = { .min = 1, .max = 4 },
328         .m = { .min = 104, .max = 138 },
329         .m1 = { .min = 16, .max = 23 },
330         .m2 = { .min = 5, .max = 11 },
331         .p = { .min = 5, .max = 80 },
332         .p1 = { .min = 1, .max = 8},
333         .p2 = { .dot_limit = 165000,
334                 .p2_slow = 10, .p2_fast = 5 },
335 };
336
337 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
338         .dot = { .min = 20000, .max = 115000 },
339         .vco = { .min = 1750000, .max = 3500000 },
340         .n = { .min = 1, .max = 3 },
341         .m = { .min = 104, .max = 138 },
342         .m1 = { .min = 17, .max = 23 },
343         .m2 = { .min = 5, .max = 11 },
344         .p = { .min = 28, .max = 112 },
345         .p1 = { .min = 2, .max = 8 },
346         .p2 = { .dot_limit = 0,
347                 .p2_slow = 14, .p2_fast = 14
348         },
349 };
350
351 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
352         .dot = { .min = 80000, .max = 224000 },
353         .vco = { .min = 1750000, .max = 3500000 },
354         .n = { .min = 1, .max = 3 },
355         .m = { .min = 104, .max = 138 },
356         .m1 = { .min = 17, .max = 23 },
357         .m2 = { .min = 5, .max = 11 },
358         .p = { .min = 14, .max = 42 },
359         .p1 = { .min = 2, .max = 6 },
360         .p2 = { .dot_limit = 0,
361                 .p2_slow = 7, .p2_fast = 7
362         },
363 };
364
365 static const struct intel_limit intel_limits_pineview_sdvo = {
366         .dot = { .min = 20000, .max = 400000},
367         .vco = { .min = 1700000, .max = 3500000 },
368         /* Pineview's Ncounter is a ring counter */
369         .n = { .min = 3, .max = 6 },
370         .m = { .min = 2, .max = 256 },
371         /* Pineview only has one combined m divider, which we treat as m2. */
372         .m1 = { .min = 0, .max = 0 },
373         .m2 = { .min = 0, .max = 254 },
374         .p = { .min = 5, .max = 80 },
375         .p1 = { .min = 1, .max = 8 },
376         .p2 = { .dot_limit = 200000,
377                 .p2_slow = 10, .p2_fast = 5 },
378 };
379
380 static const struct intel_limit intel_limits_pineview_lvds = {
381         .dot = { .min = 20000, .max = 400000 },
382         .vco = { .min = 1700000, .max = 3500000 },
383         .n = { .min = 3, .max = 6 },
384         .m = { .min = 2, .max = 256 },
385         .m1 = { .min = 0, .max = 0 },
386         .m2 = { .min = 0, .max = 254 },
387         .p = { .min = 7, .max = 112 },
388         .p1 = { .min = 1, .max = 8 },
389         .p2 = { .dot_limit = 112000,
390                 .p2_slow = 14, .p2_fast = 14 },
391 };
392
393 /* Ironlake / Sandybridge
394  *
395  * We calculate clock using (register_value + 2) for N/M1/M2, so here
396  * the range value for them is (actual_value - 2).
397  */
398 static const struct intel_limit intel_limits_ironlake_dac = {
399         .dot = { .min = 25000, .max = 350000 },
400         .vco = { .min = 1760000, .max = 3510000 },
401         .n = { .min = 1, .max = 5 },
402         .m = { .min = 79, .max = 127 },
403         .m1 = { .min = 12, .max = 22 },
404         .m2 = { .min = 5, .max = 9 },
405         .p = { .min = 5, .max = 80 },
406         .p1 = { .min = 1, .max = 8 },
407         .p2 = { .dot_limit = 225000,
408                 .p2_slow = 10, .p2_fast = 5 },
409 };
410
411 static const struct intel_limit intel_limits_ironlake_single_lvds = {
412         .dot = { .min = 25000, .max = 350000 },
413         .vco = { .min = 1760000, .max = 3510000 },
414         .n = { .min = 1, .max = 3 },
415         .m = { .min = 79, .max = 118 },
416         .m1 = { .min = 12, .max = 22 },
417         .m2 = { .min = 5, .max = 9 },
418         .p = { .min = 28, .max = 112 },
419         .p1 = { .min = 2, .max = 8 },
420         .p2 = { .dot_limit = 225000,
421                 .p2_slow = 14, .p2_fast = 14 },
422 };
423
424 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
425         .dot = { .min = 25000, .max = 350000 },
426         .vco = { .min = 1760000, .max = 3510000 },
427         .n = { .min = 1, .max = 3 },
428         .m = { .min = 79, .max = 127 },
429         .m1 = { .min = 12, .max = 22 },
430         .m2 = { .min = 5, .max = 9 },
431         .p = { .min = 14, .max = 56 },
432         .p1 = { .min = 2, .max = 8 },
433         .p2 = { .dot_limit = 225000,
434                 .p2_slow = 7, .p2_fast = 7 },
435 };
436
437 /* LVDS 100mhz refclk limits. */
438 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
439         .dot = { .min = 25000, .max = 350000 },
440         .vco = { .min = 1760000, .max = 3510000 },
441         .n = { .min = 1, .max = 2 },
442         .m = { .min = 79, .max = 126 },
443         .m1 = { .min = 12, .max = 22 },
444         .m2 = { .min = 5, .max = 9 },
445         .p = { .min = 28, .max = 112 },
446         .p1 = { .min = 2, .max = 8 },
447         .p2 = { .dot_limit = 225000,
448                 .p2_slow = 14, .p2_fast = 14 },
449 };
450
451 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
452         .dot = { .min = 25000, .max = 350000 },
453         .vco = { .min = 1760000, .max = 3510000 },
454         .n = { .min = 1, .max = 3 },
455         .m = { .min = 79, .max = 126 },
456         .m1 = { .min = 12, .max = 22 },
457         .m2 = { .min = 5, .max = 9 },
458         .p = { .min = 14, .max = 42 },
459         .p1 = { .min = 2, .max = 6 },
460         .p2 = { .dot_limit = 225000,
461                 .p2_slow = 7, .p2_fast = 7 },
462 };
463
464 static const struct intel_limit intel_limits_vlv = {
465          /*
466           * These are the data rate limits (measured in fast clocks)
467           * since those are the strictest limits we have. The fast
468           * clock and actual rate limits are more relaxed, so checking
469           * them would make no difference.
470           */
471         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
472         .vco = { .min = 4000000, .max = 6000000 },
473         .n = { .min = 1, .max = 7 },
474         .m1 = { .min = 2, .max = 3 },
475         .m2 = { .min = 11, .max = 156 },
476         .p1 = { .min = 2, .max = 3 },
477         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
478 };
479
480 static const struct intel_limit intel_limits_chv = {
481         /*
482          * These are the data rate limits (measured in fast clocks)
483          * since those are the strictest limits we have.  The fast
484          * clock and actual rate limits are more relaxed, so checking
485          * them would make no difference.
486          */
487         .dot = { .min = 25000 * 5, .max = 540000 * 5},
488         .vco = { .min = 4800000, .max = 6480000 },
489         .n = { .min = 1, .max = 1 },
490         .m1 = { .min = 2, .max = 2 },
491         .m2 = { .min = 24 << 22, .max = 175 << 22 },
492         .p1 = { .min = 2, .max = 4 },
493         .p2 = { .p2_slow = 1, .p2_fast = 14 },
494 };
495
496 static const struct intel_limit intel_limits_bxt = {
497         /* FIXME: find real dot limits */
498         .dot = { .min = 0, .max = INT_MAX },
499         .vco = { .min = 4800000, .max = 6700000 },
500         .n = { .min = 1, .max = 1 },
501         .m1 = { .min = 2, .max = 2 },
502         /* FIXME: find real m2 limits */
503         .m2 = { .min = 2 << 22, .max = 255 << 22 },
504         .p1 = { .min = 2, .max = 4 },
505         .p2 = { .p2_slow = 1, .p2_fast = 20 },
506 };
507
508 static void
509 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
510 {
511         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
512                 return;
513
514         if (enable)
515                 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
516         else
517                 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
518 }
519
520 static void
521 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
522 {
523         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
524                 return;
525
526         if (enable)
527                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
529         else
530                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
531                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
532                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
533 }
534
535 static bool
536 needs_modeset(const struct drm_crtc_state *state)
537 {
538         return drm_atomic_crtc_needs_modeset(state);
539 }
540
541 /*
542  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
543  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
544  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
545  * The helpers' return value is the rate of the clock that is fed to the
546  * display engine's pipe which can be the above fast dot clock rate or a
547  * divided-down version of it.
548  */
549 /* m1 is reserved as 0 in Pineview, n is a ring counter */
550 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
551 {
552         clock->m = clock->m2 + 2;
553         clock->p = clock->p1 * clock->p2;
554         if (WARN_ON(clock->n == 0 || clock->p == 0))
555                 return 0;
556         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
557         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
559         return clock->dot;
560 }
561
562 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
563 {
564         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
565 }
566
567 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
568 {
569         clock->m = i9xx_dpll_compute_m(clock);
570         clock->p = clock->p1 * clock->p2;
571         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
572                 return 0;
573         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
574         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
575
576         return clock->dot;
577 }
578
579 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
580 {
581         clock->m = clock->m1 * clock->m2;
582         clock->p = clock->p1 * clock->p2;
583         if (WARN_ON(clock->n == 0 || clock->p == 0))
584                 return 0;
585         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
586         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
587
588         return clock->dot / 5;
589 }
590
591 int chv_calc_dpll_params(int refclk, struct dpll *clock)
592 {
593         clock->m = clock->m1 * clock->m2;
594         clock->p = clock->p1 * clock->p2;
595         if (WARN_ON(clock->n == 0 || clock->p == 0))
596                 return 0;
597         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
598                         clock->n << 22);
599         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
600
601         return clock->dot / 5;
602 }
603
604 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
605
606 /*
607  * Returns whether the given set of divisors are valid for a given refclk with
608  * the given connectors.
609  */
610 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
611                                const struct intel_limit *limit,
612                                const struct dpll *clock)
613 {
614         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
615                 INTELPllInvalid("n out of range\n");
616         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
617                 INTELPllInvalid("p1 out of range\n");
618         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
619                 INTELPllInvalid("m2 out of range\n");
620         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
621                 INTELPllInvalid("m1 out of range\n");
622
623         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
624             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
625                 if (clock->m1 <= clock->m2)
626                         INTELPllInvalid("m1 <= m2\n");
627
628         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
629             !IS_GEN9_LP(dev_priv)) {
630                 if (clock->p < limit->p.min || limit->p.max < clock->p)
631                         INTELPllInvalid("p out of range\n");
632                 if (clock->m < limit->m.min || limit->m.max < clock->m)
633                         INTELPllInvalid("m out of range\n");
634         }
635
636         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
637                 INTELPllInvalid("vco out of range\n");
638         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
639          * connector, etc., rather than just a single range.
640          */
641         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
642                 INTELPllInvalid("dot out of range\n");
643
644         return true;
645 }
646
647 static int
648 i9xx_select_p2_div(const struct intel_limit *limit,
649                    const struct intel_crtc_state *crtc_state,
650                    int target)
651 {
652         struct drm_device *dev = crtc_state->base.crtc->dev;
653
654         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
655                 /*
656                  * For LVDS just rely on its current settings for dual-channel.
657                  * We haven't figured out how to reliably set up different
658                  * single/dual channel state, if we even can.
659                  */
660                 if (intel_is_dual_link_lvds(dev))
661                         return limit->p2.p2_fast;
662                 else
663                         return limit->p2.p2_slow;
664         } else {
665                 if (target < limit->p2.dot_limit)
666                         return limit->p2.p2_slow;
667                 else
668                         return limit->p2.p2_fast;
669         }
670 }
671
672 /*
673  * Returns a set of divisors for the desired target clock with the given
674  * refclk, or FALSE.  The returned values represent the clock equation:
675  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
676  *
677  * Target and reference clocks are specified in kHz.
678  *
679  * If match_clock is provided, then best_clock P divider must match the P
680  * divider from @match_clock used for LVDS downclocking.
681  */
682 static bool
683 i9xx_find_best_dpll(const struct intel_limit *limit,
684                     struct intel_crtc_state *crtc_state,
685                     int target, int refclk, struct dpll *match_clock,
686                     struct dpll *best_clock)
687 {
688         struct drm_device *dev = crtc_state->base.crtc->dev;
689         struct dpll clock;
690         int err = target;
691
692         memset(best_clock, 0, sizeof(*best_clock));
693
694         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
695
696         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
697              clock.m1++) {
698                 for (clock.m2 = limit->m2.min;
699                      clock.m2 <= limit->m2.max; clock.m2++) {
700                         if (clock.m2 >= clock.m1)
701                                 break;
702                         for (clock.n = limit->n.min;
703                              clock.n <= limit->n.max; clock.n++) {
704                                 for (clock.p1 = limit->p1.min;
705                                         clock.p1 <= limit->p1.max; clock.p1++) {
706                                         int this_err;
707
708                                         i9xx_calc_dpll_params(refclk, &clock);
709                                         if (!intel_PLL_is_valid(to_i915(dev),
710                                                                 limit,
711                                                                 &clock))
712                                                 continue;
713                                         if (match_clock &&
714                                             clock.p != match_clock->p)
715                                                 continue;
716
717                                         this_err = abs(clock.dot - target);
718                                         if (this_err < err) {
719                                                 *best_clock = clock;
720                                                 err = this_err;
721                                         }
722                                 }
723                         }
724                 }
725         }
726
727         return (err != target);
728 }
729
730 /*
731  * Returns a set of divisors for the desired target clock with the given
732  * refclk, or FALSE.  The returned values represent the clock equation:
733  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
734  *
735  * Target and reference clocks are specified in kHz.
736  *
737  * If match_clock is provided, then best_clock P divider must match the P
738  * divider from @match_clock used for LVDS downclocking.
739  */
740 static bool
741 pnv_find_best_dpll(const struct intel_limit *limit,
742                    struct intel_crtc_state *crtc_state,
743                    int target, int refclk, struct dpll *match_clock,
744                    struct dpll *best_clock)
745 {
746         struct drm_device *dev = crtc_state->base.crtc->dev;
747         struct dpll clock;
748         int err = target;
749
750         memset(best_clock, 0, sizeof(*best_clock));
751
752         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
753
754         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
755              clock.m1++) {
756                 for (clock.m2 = limit->m2.min;
757                      clock.m2 <= limit->m2.max; clock.m2++) {
758                         for (clock.n = limit->n.min;
759                              clock.n <= limit->n.max; clock.n++) {
760                                 for (clock.p1 = limit->p1.min;
761                                         clock.p1 <= limit->p1.max; clock.p1++) {
762                                         int this_err;
763
764                                         pnv_calc_dpll_params(refclk, &clock);
765                                         if (!intel_PLL_is_valid(to_i915(dev),
766                                                                 limit,
767                                                                 &clock))
768                                                 continue;
769                                         if (match_clock &&
770                                             clock.p != match_clock->p)
771                                                 continue;
772
773                                         this_err = abs(clock.dot - target);
774                                         if (this_err < err) {
775                                                 *best_clock = clock;
776                                                 err = this_err;
777                                         }
778                                 }
779                         }
780                 }
781         }
782
783         return (err != target);
784 }
785
786 /*
787  * Returns a set of divisors for the desired target clock with the given
788  * refclk, or FALSE.  The returned values represent the clock equation:
789  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
790  *
791  * Target and reference clocks are specified in kHz.
792  *
793  * If match_clock is provided, then best_clock P divider must match the P
794  * divider from @match_clock used for LVDS downclocking.
795  */
796 static bool
797 g4x_find_best_dpll(const struct intel_limit *limit,
798                    struct intel_crtc_state *crtc_state,
799                    int target, int refclk, struct dpll *match_clock,
800                    struct dpll *best_clock)
801 {
802         struct drm_device *dev = crtc_state->base.crtc->dev;
803         struct dpll clock;
804         int max_n;
805         bool found = false;
806         /* approximately equals target * 0.00585 */
807         int err_most = (target >> 8) + (target >> 9);
808
809         memset(best_clock, 0, sizeof(*best_clock));
810
811         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
812
813         max_n = limit->n.max;
814         /* based on hardware requirement, prefer smaller n to precision */
815         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
816                 /* based on hardware requirement, prefere larger m1,m2 */
817                 for (clock.m1 = limit->m1.max;
818                      clock.m1 >= limit->m1.min; clock.m1--) {
819                         for (clock.m2 = limit->m2.max;
820                              clock.m2 >= limit->m2.min; clock.m2--) {
821                                 for (clock.p1 = limit->p1.max;
822                                      clock.p1 >= limit->p1.min; clock.p1--) {
823                                         int this_err;
824
825                                         i9xx_calc_dpll_params(refclk, &clock);
826                                         if (!intel_PLL_is_valid(to_i915(dev),
827                                                                 limit,
828                                                                 &clock))
829                                                 continue;
830
831                                         this_err = abs(clock.dot - target);
832                                         if (this_err < err_most) {
833                                                 *best_clock = clock;
834                                                 err_most = this_err;
835                                                 max_n = clock.n;
836                                                 found = true;
837                                         }
838                                 }
839                         }
840                 }
841         }
842         return found;
843 }
844
845 /*
846  * Check if the calculated PLL configuration is more optimal compared to the
847  * best configuration and error found so far. Return the calculated error.
848  */
849 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
850                                const struct dpll *calculated_clock,
851                                const struct dpll *best_clock,
852                                unsigned int best_error_ppm,
853                                unsigned int *error_ppm)
854 {
855         /*
856          * For CHV ignore the error and consider only the P value.
857          * Prefer a bigger P value based on HW requirements.
858          */
859         if (IS_CHERRYVIEW(to_i915(dev))) {
860                 *error_ppm = 0;
861
862                 return calculated_clock->p > best_clock->p;
863         }
864
865         if (WARN_ON_ONCE(!target_freq))
866                 return false;
867
868         *error_ppm = div_u64(1000000ULL *
869                                 abs(target_freq - calculated_clock->dot),
870                              target_freq);
871         /*
872          * Prefer a better P value over a better (smaller) error if the error
873          * is small. Ensure this preference for future configurations too by
874          * setting the error to 0.
875          */
876         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
877                 *error_ppm = 0;
878
879                 return true;
880         }
881
882         return *error_ppm + 10 < best_error_ppm;
883 }
884
885 /*
886  * Returns a set of divisors for the desired target clock with the given
887  * refclk, or FALSE.  The returned values represent the clock equation:
888  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
889  */
890 static bool
891 vlv_find_best_dpll(const struct intel_limit *limit,
892                    struct intel_crtc_state *crtc_state,
893                    int target, int refclk, struct dpll *match_clock,
894                    struct dpll *best_clock)
895 {
896         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
897         struct drm_device *dev = crtc->base.dev;
898         struct dpll clock;
899         unsigned int bestppm = 1000000;
900         /* min update 19.2 MHz */
901         int max_n = min(limit->n.max, refclk / 19200);
902         bool found = false;
903
904         target *= 5; /* fast clock */
905
906         memset(best_clock, 0, sizeof(*best_clock));
907
908         /* based on hardware requirement, prefer smaller n to precision */
909         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
910                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
911                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
912                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
913                                 clock.p = clock.p1 * clock.p2;
914                                 /* based on hardware requirement, prefer bigger m1,m2 values */
915                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
916                                         unsigned int ppm;
917
918                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
919                                                                      refclk * clock.m1);
920
921                                         vlv_calc_dpll_params(refclk, &clock);
922
923                                         if (!intel_PLL_is_valid(to_i915(dev),
924                                                                 limit,
925                                                                 &clock))
926                                                 continue;
927
928                                         if (!vlv_PLL_is_optimal(dev, target,
929                                                                 &clock,
930                                                                 best_clock,
931                                                                 bestppm, &ppm))
932                                                 continue;
933
934                                         *best_clock = clock;
935                                         bestppm = ppm;
936                                         found = true;
937                                 }
938                         }
939                 }
940         }
941
942         return found;
943 }
944
945 /*
946  * Returns a set of divisors for the desired target clock with the given
947  * refclk, or FALSE.  The returned values represent the clock equation:
948  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
949  */
950 static bool
951 chv_find_best_dpll(const struct intel_limit *limit,
952                    struct intel_crtc_state *crtc_state,
953                    int target, int refclk, struct dpll *match_clock,
954                    struct dpll *best_clock)
955 {
956         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
957         struct drm_device *dev = crtc->base.dev;
958         unsigned int best_error_ppm;
959         struct dpll clock;
960         uint64_t m2;
961         int found = false;
962
963         memset(best_clock, 0, sizeof(*best_clock));
964         best_error_ppm = 1000000;
965
966         /*
967          * Based on hardware doc, the n always set to 1, and m1 always
968          * set to 2.  If requires to support 200Mhz refclk, we need to
969          * revisit this because n may not 1 anymore.
970          */
971         clock.n = 1, clock.m1 = 2;
972         target *= 5;    /* fast clock */
973
974         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
975                 for (clock.p2 = limit->p2.p2_fast;
976                                 clock.p2 >= limit->p2.p2_slow;
977                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
978                         unsigned int error_ppm;
979
980                         clock.p = clock.p1 * clock.p2;
981
982                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
983                                         clock.n) << 22, refclk * clock.m1);
984
985                         if (m2 > INT_MAX/clock.m1)
986                                 continue;
987
988                         clock.m2 = m2;
989
990                         chv_calc_dpll_params(refclk, &clock);
991
992                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
993                                 continue;
994
995                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
996                                                 best_error_ppm, &error_ppm))
997                                 continue;
998
999                         *best_clock = clock;
1000                         best_error_ppm = error_ppm;
1001                         found = true;
1002                 }
1003         }
1004
1005         return found;
1006 }
1007
1008 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1009                         struct dpll *best_clock)
1010 {
1011         int refclk = 100000;
1012         const struct intel_limit *limit = &intel_limits_bxt;
1013
1014         return chv_find_best_dpll(limit, crtc_state,
1015                                   target_clock, refclk, NULL, best_clock);
1016 }
1017
1018 bool intel_crtc_active(struct intel_crtc *crtc)
1019 {
1020         /* Be paranoid as we can arrive here with only partial
1021          * state retrieved from the hardware during setup.
1022          *
1023          * We can ditch the adjusted_mode.crtc_clock check as soon
1024          * as Haswell has gained clock readout/fastboot support.
1025          *
1026          * We can ditch the crtc->primary->state->fb check as soon as we can
1027          * properly reconstruct framebuffers.
1028          *
1029          * FIXME: The intel_crtc->active here should be switched to
1030          * crtc->state->active once we have proper CRTC states wired up
1031          * for atomic.
1032          */
1033         return crtc->active && crtc->base.primary->state->fb &&
1034                 crtc->config->base.adjusted_mode.crtc_clock;
1035 }
1036
1037 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1038                                              enum pipe pipe)
1039 {
1040         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1041
1042         return crtc->config->cpu_transcoder;
1043 }
1044
1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1046                                     enum pipe pipe)
1047 {
1048         i915_reg_t reg = PIPEDSL(pipe);
1049         u32 line1, line2;
1050         u32 line_mask;
1051
1052         if (IS_GEN2(dev_priv))
1053                 line_mask = DSL_LINEMASK_GEN2;
1054         else
1055                 line_mask = DSL_LINEMASK_GEN3;
1056
1057         line1 = I915_READ(reg) & line_mask;
1058         msleep(5);
1059         line2 = I915_READ(reg) & line_mask;
1060
1061         return line1 != line2;
1062 }
1063
1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1065 {
1066         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1067         enum pipe pipe = crtc->pipe;
1068
1069         /* Wait for the display line to settle/start moving */
1070         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1071                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1072                           pipe_name(pipe), onoff(state));
1073 }
1074
1075 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1076 {
1077         wait_for_pipe_scanline_moving(crtc, false);
1078 }
1079
1080 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1081 {
1082         wait_for_pipe_scanline_moving(crtc, true);
1083 }
1084
1085 static void
1086 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1087 {
1088         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1089         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1090
1091         if (INTEL_GEN(dev_priv) >= 4) {
1092                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1093                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1094
1095                 /* Wait for the Pipe State to go off */
1096                 if (intel_wait_for_register(dev_priv,
1097                                             reg, I965_PIPECONF_ACTIVE, 0,
1098                                             100))
1099                         WARN(1, "pipe_off wait timed out\n");
1100         } else {
1101                 intel_wait_for_pipe_scanline_stopped(crtc);
1102         }
1103 }
1104
1105 /* Only for pre-ILK configs */
1106 void assert_pll(struct drm_i915_private *dev_priv,
1107                 enum pipe pipe, bool state)
1108 {
1109         u32 val;
1110         bool cur_state;
1111
1112         val = I915_READ(DPLL(pipe));
1113         cur_state = !!(val & DPLL_VCO_ENABLE);
1114         I915_STATE_WARN(cur_state != state,
1115              "PLL state assertion failure (expected %s, current %s)\n",
1116                         onoff(state), onoff(cur_state));
1117 }
1118
1119 /* XXX: the dsi pll is shared between MIPI DSI ports */
1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1121 {
1122         u32 val;
1123         bool cur_state;
1124
1125         mutex_lock(&dev_priv->sb_lock);
1126         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1127         mutex_unlock(&dev_priv->sb_lock);
1128
1129         cur_state = val & DSI_PLL_VCO_EN;
1130         I915_STATE_WARN(cur_state != state,
1131              "DSI PLL state assertion failure (expected %s, current %s)\n",
1132                         onoff(state), onoff(cur_state));
1133 }
1134
1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1136                           enum pipe pipe, bool state)
1137 {
1138         bool cur_state;
1139         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1140                                                                       pipe);
1141
1142         if (HAS_DDI(dev_priv)) {
1143                 /* DDI does not have a specific FDI_TX register */
1144                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1145                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1146         } else {
1147                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1148                 cur_state = !!(val & FDI_TX_ENABLE);
1149         }
1150         I915_STATE_WARN(cur_state != state,
1151              "FDI TX state assertion failure (expected %s, current %s)\n",
1152                         onoff(state), onoff(cur_state));
1153 }
1154 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1155 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1156
1157 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1158                           enum pipe pipe, bool state)
1159 {
1160         u32 val;
1161         bool cur_state;
1162
1163         val = I915_READ(FDI_RX_CTL(pipe));
1164         cur_state = !!(val & FDI_RX_ENABLE);
1165         I915_STATE_WARN(cur_state != state,
1166              "FDI RX state assertion failure (expected %s, current %s)\n",
1167                         onoff(state), onoff(cur_state));
1168 }
1169 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1170 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1171
1172 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1173                                       enum pipe pipe)
1174 {
1175         u32 val;
1176
1177         /* ILK FDI PLL is always enabled */
1178         if (IS_GEN5(dev_priv))
1179                 return;
1180
1181         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1182         if (HAS_DDI(dev_priv))
1183                 return;
1184
1185         val = I915_READ(FDI_TX_CTL(pipe));
1186         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1187 }
1188
1189 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1190                        enum pipe pipe, bool state)
1191 {
1192         u32 val;
1193         bool cur_state;
1194
1195         val = I915_READ(FDI_RX_CTL(pipe));
1196         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1197         I915_STATE_WARN(cur_state != state,
1198              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1199                         onoff(state), onoff(cur_state));
1200 }
1201
1202 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1203 {
1204         i915_reg_t pp_reg;
1205         u32 val;
1206         enum pipe panel_pipe = INVALID_PIPE;
1207         bool locked = true;
1208
1209         if (WARN_ON(HAS_DDI(dev_priv)))
1210                 return;
1211
1212         if (HAS_PCH_SPLIT(dev_priv)) {
1213                 u32 port_sel;
1214
1215                 pp_reg = PP_CONTROL(0);
1216                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1217
1218                 switch (port_sel) {
1219                 case PANEL_PORT_SELECT_LVDS:
1220                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1221                         break;
1222                 case PANEL_PORT_SELECT_DPA:
1223                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1224                         break;
1225                 case PANEL_PORT_SELECT_DPC:
1226                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1227                         break;
1228                 case PANEL_PORT_SELECT_DPD:
1229                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1230                         break;
1231                 default:
1232                         MISSING_CASE(port_sel);
1233                         break;
1234                 }
1235         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1236                 /* presumably write lock depends on pipe, not port select */
1237                 pp_reg = PP_CONTROL(pipe);
1238                 panel_pipe = pipe;
1239         } else {
1240                 u32 port_sel;
1241
1242                 pp_reg = PP_CONTROL(0);
1243                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1244
1245                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1246                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1247         }
1248
1249         val = I915_READ(pp_reg);
1250         if (!(val & PANEL_POWER_ON) ||
1251             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1252                 locked = false;
1253
1254         I915_STATE_WARN(panel_pipe == pipe && locked,
1255              "panel assertion failure, pipe %c regs locked\n",
1256              pipe_name(pipe));
1257 }
1258
1259 void assert_pipe(struct drm_i915_private *dev_priv,
1260                  enum pipe pipe, bool state)
1261 {
1262         bool cur_state;
1263         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1264                                                                       pipe);
1265         enum intel_display_power_domain power_domain;
1266
1267         /* we keep both pipes enabled on 830 */
1268         if (IS_I830(dev_priv))
1269                 state = true;
1270
1271         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1272         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1273                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1274                 cur_state = !!(val & PIPECONF_ENABLE);
1275
1276                 intel_display_power_put(dev_priv, power_domain);
1277         } else {
1278                 cur_state = false;
1279         }
1280
1281         I915_STATE_WARN(cur_state != state,
1282              "pipe %c assertion failure (expected %s, current %s)\n",
1283                         pipe_name(pipe), onoff(state), onoff(cur_state));
1284 }
1285
1286 static void assert_plane(struct intel_plane *plane, bool state)
1287 {
1288         enum pipe pipe;
1289         bool cur_state;
1290
1291         cur_state = plane->get_hw_state(plane, &pipe);
1292
1293         I915_STATE_WARN(cur_state != state,
1294                         "%s assertion failure (expected %s, current %s)\n",
1295                         plane->base.name, onoff(state), onoff(cur_state));
1296 }
1297
1298 #define assert_plane_enabled(p) assert_plane(p, true)
1299 #define assert_plane_disabled(p) assert_plane(p, false)
1300
1301 static void assert_planes_disabled(struct intel_crtc *crtc)
1302 {
1303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1304         struct intel_plane *plane;
1305
1306         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1307                 assert_plane_disabled(plane);
1308 }
1309
1310 static void assert_vblank_disabled(struct drm_crtc *crtc)
1311 {
1312         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1313                 drm_crtc_vblank_put(crtc);
1314 }
1315
1316 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1317                                     enum pipe pipe)
1318 {
1319         u32 val;
1320         bool enabled;
1321
1322         val = I915_READ(PCH_TRANSCONF(pipe));
1323         enabled = !!(val & TRANS_ENABLE);
1324         I915_STATE_WARN(enabled,
1325              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1326              pipe_name(pipe));
1327 }
1328
1329 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1330                                    enum pipe pipe, enum port port,
1331                                    i915_reg_t dp_reg)
1332 {
1333         enum pipe port_pipe;
1334         bool state;
1335
1336         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1337
1338         I915_STATE_WARN(state && port_pipe == pipe,
1339                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1340                         port_name(port), pipe_name(pipe));
1341
1342         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1343                         "IBX PCH DP %c still using transcoder B\n",
1344                         port_name(port));
1345 }
1346
1347 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1348                                      enum pipe pipe, enum port port,
1349                                      i915_reg_t hdmi_reg)
1350 {
1351         enum pipe port_pipe;
1352         bool state;
1353
1354         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1355
1356         I915_STATE_WARN(state && port_pipe == pipe,
1357                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1358                         port_name(port), pipe_name(pipe));
1359
1360         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1361                         "IBX PCH HDMI %c still using transcoder B\n",
1362                         port_name(port));
1363 }
1364
1365 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1366                                       enum pipe pipe)
1367 {
1368         enum pipe port_pipe;
1369
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1372         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1373
1374         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1375                         port_pipe == pipe,
1376                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1377                         pipe_name(pipe));
1378
1379         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1387 }
1388
1389 static void _vlv_enable_pll(struct intel_crtc *crtc,
1390                             const struct intel_crtc_state *pipe_config)
1391 {
1392         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393         enum pipe pipe = crtc->pipe;
1394
1395         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1396         POSTING_READ(DPLL(pipe));
1397         udelay(150);
1398
1399         if (intel_wait_for_register(dev_priv,
1400                                     DPLL(pipe),
1401                                     DPLL_LOCK_VLV,
1402                                     DPLL_LOCK_VLV,
1403                                     1))
1404                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1405 }
1406
1407 static void vlv_enable_pll(struct intel_crtc *crtc,
1408                            const struct intel_crtc_state *pipe_config)
1409 {
1410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411         enum pipe pipe = crtc->pipe;
1412
1413         assert_pipe_disabled(dev_priv, pipe);
1414
1415         /* PLL is protected by panel, make sure we can write it */
1416         assert_panel_unlocked(dev_priv, pipe);
1417
1418         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419                 _vlv_enable_pll(crtc, pipe_config);
1420
1421         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1422         POSTING_READ(DPLL_MD(pipe));
1423 }
1424
1425
1426 static void _chv_enable_pll(struct intel_crtc *crtc,
1427                             const struct intel_crtc_state *pipe_config)
1428 {
1429         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1430         enum pipe pipe = crtc->pipe;
1431         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1432         u32 tmp;
1433
1434         mutex_lock(&dev_priv->sb_lock);
1435
1436         /* Enable back the 10bit clock to display controller */
1437         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1438         tmp |= DPIO_DCLKP_EN;
1439         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1440
1441         mutex_unlock(&dev_priv->sb_lock);
1442
1443         /*
1444          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1445          */
1446         udelay(1);
1447
1448         /* Enable PLL */
1449         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1450
1451         /* Check PLL is locked */
1452         if (intel_wait_for_register(dev_priv,
1453                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1454                                     1))
1455                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1456 }
1457
1458 static void chv_enable_pll(struct intel_crtc *crtc,
1459                            const struct intel_crtc_state *pipe_config)
1460 {
1461         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1462         enum pipe pipe = crtc->pipe;
1463
1464         assert_pipe_disabled(dev_priv, pipe);
1465
1466         /* PLL is protected by panel, make sure we can write it */
1467         assert_panel_unlocked(dev_priv, pipe);
1468
1469         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1470                 _chv_enable_pll(crtc, pipe_config);
1471
1472         if (pipe != PIPE_A) {
1473                 /*
1474                  * WaPixelRepeatModeFixForC0:chv
1475                  *
1476                  * DPLLCMD is AWOL. Use chicken bits to propagate
1477                  * the value from DPLLBMD to either pipe B or C.
1478                  */
1479                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1480                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1481                 I915_WRITE(CBR4_VLV, 0);
1482                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1483
1484                 /*
1485                  * DPLLB VGA mode also seems to cause problems.
1486                  * We should always have it disabled.
1487                  */
1488                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1489         } else {
1490                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1491                 POSTING_READ(DPLL_MD(pipe));
1492         }
1493 }
1494
1495 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1496 {
1497         struct intel_crtc *crtc;
1498         int count = 0;
1499
1500         for_each_intel_crtc(&dev_priv->drm, crtc) {
1501                 count += crtc->base.state->active &&
1502                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1503         }
1504
1505         return count;
1506 }
1507
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509                             const struct intel_crtc_state *crtc_state)
1510 {
1511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512         i915_reg_t reg = DPLL(crtc->pipe);
1513         u32 dpll = crtc_state->dpll_hw_state.dpll;
1514         int i;
1515
1516         assert_pipe_disabled(dev_priv, crtc->pipe);
1517
1518         /* PLL is protected by panel, make sure we can write it */
1519         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1520                 assert_panel_unlocked(dev_priv, crtc->pipe);
1521
1522         /* Enable DVO 2x clock on both PLLs if necessary */
1523         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1524                 /*
1525                  * It appears to be important that we don't enable this
1526                  * for the current pipe before otherwise configuring the
1527                  * PLL. No idea how this should be handled if multiple
1528                  * DVO outputs are enabled simultaneosly.
1529                  */
1530                 dpll |= DPLL_DVO_2X_MODE;
1531                 I915_WRITE(DPLL(!crtc->pipe),
1532                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1533         }
1534
1535         /*
1536          * Apparently we need to have VGA mode enabled prior to changing
1537          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1538          * dividers, even though the register value does change.
1539          */
1540         I915_WRITE(reg, 0);
1541
1542         I915_WRITE(reg, dpll);
1543
1544         /* Wait for the clocks to stabilize. */
1545         POSTING_READ(reg);
1546         udelay(150);
1547
1548         if (INTEL_GEN(dev_priv) >= 4) {
1549                 I915_WRITE(DPLL_MD(crtc->pipe),
1550                            crtc_state->dpll_hw_state.dpll_md);
1551         } else {
1552                 /* The pixel multiplier can only be updated once the
1553                  * DPLL is enabled and the clocks are stable.
1554                  *
1555                  * So write it again.
1556                  */
1557                 I915_WRITE(reg, dpll);
1558         }
1559
1560         /* We do this three times for luck */
1561         for (i = 0; i < 3; i++) {
1562                 I915_WRITE(reg, dpll);
1563                 POSTING_READ(reg);
1564                 udelay(150); /* wait for warmup */
1565         }
1566 }
1567
1568 static void i9xx_disable_pll(struct intel_crtc *crtc)
1569 {
1570         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571         enum pipe pipe = crtc->pipe;
1572
1573         /* Disable DVO 2x clock on both PLLs if necessary */
1574         if (IS_I830(dev_priv) &&
1575             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1576             !intel_num_dvo_pipes(dev_priv)) {
1577                 I915_WRITE(DPLL(PIPE_B),
1578                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1579                 I915_WRITE(DPLL(PIPE_A),
1580                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1581         }
1582
1583         /* Don't disable pipe or pipe PLLs if needed */
1584         if (IS_I830(dev_priv))
1585                 return;
1586
1587         /* Make sure the pipe isn't still relying on us */
1588         assert_pipe_disabled(dev_priv, pipe);
1589
1590         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1591         POSTING_READ(DPLL(pipe));
1592 }
1593
1594 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1595 {
1596         u32 val;
1597
1598         /* Make sure the pipe isn't still relying on us */
1599         assert_pipe_disabled(dev_priv, pipe);
1600
1601         val = DPLL_INTEGRATED_REF_CLK_VLV |
1602                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1603         if (pipe != PIPE_A)
1604                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1605
1606         I915_WRITE(DPLL(pipe), val);
1607         POSTING_READ(DPLL(pipe));
1608 }
1609
1610 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1611 {
1612         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1613         u32 val;
1614
1615         /* Make sure the pipe isn't still relying on us */
1616         assert_pipe_disabled(dev_priv, pipe);
1617
1618         val = DPLL_SSC_REF_CLK_CHV |
1619                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1620         if (pipe != PIPE_A)
1621                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1622
1623         I915_WRITE(DPLL(pipe), val);
1624         POSTING_READ(DPLL(pipe));
1625
1626         mutex_lock(&dev_priv->sb_lock);
1627
1628         /* Disable 10bit clock to display controller */
1629         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1630         val &= ~DPIO_DCLKP_EN;
1631         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1632
1633         mutex_unlock(&dev_priv->sb_lock);
1634 }
1635
1636 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1637                          struct intel_digital_port *dport,
1638                          unsigned int expected_mask)
1639 {
1640         u32 port_mask;
1641         i915_reg_t dpll_reg;
1642
1643         switch (dport->base.port) {
1644         case PORT_B:
1645                 port_mask = DPLL_PORTB_READY_MASK;
1646                 dpll_reg = DPLL(0);
1647                 break;
1648         case PORT_C:
1649                 port_mask = DPLL_PORTC_READY_MASK;
1650                 dpll_reg = DPLL(0);
1651                 expected_mask <<= 4;
1652                 break;
1653         case PORT_D:
1654                 port_mask = DPLL_PORTD_READY_MASK;
1655                 dpll_reg = DPIO_PHY_STATUS;
1656                 break;
1657         default:
1658                 BUG();
1659         }
1660
1661         if (intel_wait_for_register(dev_priv,
1662                                     dpll_reg, port_mask, expected_mask,
1663                                     1000))
1664                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1665                      port_name(dport->base.port),
1666                      I915_READ(dpll_reg) & port_mask, expected_mask);
1667 }
1668
1669 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1670                                            enum pipe pipe)
1671 {
1672         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1673                                                                 pipe);
1674         i915_reg_t reg;
1675         uint32_t val, pipeconf_val;
1676
1677         /* Make sure PCH DPLL is enabled */
1678         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1679
1680         /* FDI must be feeding us bits for PCH ports */
1681         assert_fdi_tx_enabled(dev_priv, pipe);
1682         assert_fdi_rx_enabled(dev_priv, pipe);
1683
1684         if (HAS_PCH_CPT(dev_priv)) {
1685                 /* Workaround: Set the timing override bit before enabling the
1686                  * pch transcoder. */
1687                 reg = TRANS_CHICKEN2(pipe);
1688                 val = I915_READ(reg);
1689                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1690                 I915_WRITE(reg, val);
1691         }
1692
1693         reg = PCH_TRANSCONF(pipe);
1694         val = I915_READ(reg);
1695         pipeconf_val = I915_READ(PIPECONF(pipe));
1696
1697         if (HAS_PCH_IBX(dev_priv)) {
1698                 /*
1699                  * Make the BPC in transcoder be consistent with
1700                  * that in pipeconf reg. For HDMI we must use 8bpc
1701                  * here for both 8bpc and 12bpc.
1702                  */
1703                 val &= ~PIPECONF_BPC_MASK;
1704                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1705                         val |= PIPECONF_8BPC;
1706                 else
1707                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1708         }
1709
1710         val &= ~TRANS_INTERLACE_MASK;
1711         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1712                 if (HAS_PCH_IBX(dev_priv) &&
1713                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1714                         val |= TRANS_LEGACY_INTERLACED_ILK;
1715                 else
1716                         val |= TRANS_INTERLACED;
1717         else
1718                 val |= TRANS_PROGRESSIVE;
1719
1720         I915_WRITE(reg, val | TRANS_ENABLE);
1721         if (intel_wait_for_register(dev_priv,
1722                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1723                                     100))
1724                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1725 }
1726
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728                                       enum transcoder cpu_transcoder)
1729 {
1730         u32 val, pipeconf_val;
1731
1732         /* FDI must be feeding us bits for PCH ports */
1733         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1735
1736         /* Workaround: set timing override bit. */
1737         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1738         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1740
1741         val = TRANS_ENABLE;
1742         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1743
1744         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1745             PIPECONF_INTERLACED_ILK)
1746                 val |= TRANS_INTERLACED;
1747         else
1748                 val |= TRANS_PROGRESSIVE;
1749
1750         I915_WRITE(LPT_TRANSCONF, val);
1751         if (intel_wait_for_register(dev_priv,
1752                                     LPT_TRANSCONF,
1753                                     TRANS_STATE_ENABLE,
1754                                     TRANS_STATE_ENABLE,
1755                                     100))
1756                 DRM_ERROR("Failed to enable PCH transcoder\n");
1757 }
1758
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1760                                             enum pipe pipe)
1761 {
1762         i915_reg_t reg;
1763         uint32_t val;
1764
1765         /* FDI relies on the transcoder */
1766         assert_fdi_tx_disabled(dev_priv, pipe);
1767         assert_fdi_rx_disabled(dev_priv, pipe);
1768
1769         /* Ports must be off as well */
1770         assert_pch_ports_disabled(dev_priv, pipe);
1771
1772         reg = PCH_TRANSCONF(pipe);
1773         val = I915_READ(reg);
1774         val &= ~TRANS_ENABLE;
1775         I915_WRITE(reg, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_wait_for_register(dev_priv,
1778                                     reg, TRANS_STATE_ENABLE, 0,
1779                                     50))
1780                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1781
1782         if (HAS_PCH_CPT(dev_priv)) {
1783                 /* Workaround: Clear the timing override chicken bit again. */
1784                 reg = TRANS_CHICKEN2(pipe);
1785                 val = I915_READ(reg);
1786                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1787                 I915_WRITE(reg, val);
1788         }
1789 }
1790
1791 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1792 {
1793         u32 val;
1794
1795         val = I915_READ(LPT_TRANSCONF);
1796         val &= ~TRANS_ENABLE;
1797         I915_WRITE(LPT_TRANSCONF, val);
1798         /* wait for PCH transcoder off, transcoder state */
1799         if (intel_wait_for_register(dev_priv,
1800                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1801                                     50))
1802                 DRM_ERROR("Failed to disable PCH transcoder\n");
1803
1804         /* Workaround: clear timing override bit. */
1805         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1806         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1807         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1808 }
1809
1810 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1811 {
1812         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1813
1814         if (HAS_PCH_LPT(dev_priv))
1815                 return PIPE_A;
1816         else
1817                 return crtc->pipe;
1818 }
1819
1820 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1821 {
1822         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1823         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1824         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1825         enum pipe pipe = crtc->pipe;
1826         i915_reg_t reg;
1827         u32 val;
1828
1829         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1830
1831         assert_planes_disabled(crtc);
1832
1833         /*
1834          * A pipe without a PLL won't actually be able to drive bits from
1835          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1836          * need the check.
1837          */
1838         if (HAS_GMCH_DISPLAY(dev_priv)) {
1839                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1840                         assert_dsi_pll_enabled(dev_priv);
1841                 else
1842                         assert_pll_enabled(dev_priv, pipe);
1843         } else {
1844                 if (new_crtc_state->has_pch_encoder) {
1845                         /* if driving the PCH, we need FDI enabled */
1846                         assert_fdi_rx_pll_enabled(dev_priv,
1847                                                   intel_crtc_pch_transcoder(crtc));
1848                         assert_fdi_tx_pll_enabled(dev_priv,
1849                                                   (enum pipe) cpu_transcoder);
1850                 }
1851                 /* FIXME: assert CPU port conditions for SNB+ */
1852         }
1853
1854         reg = PIPECONF(cpu_transcoder);
1855         val = I915_READ(reg);
1856         if (val & PIPECONF_ENABLE) {
1857                 /* we keep both pipes enabled on 830 */
1858                 WARN_ON(!IS_I830(dev_priv));
1859                 return;
1860         }
1861
1862         I915_WRITE(reg, val | PIPECONF_ENABLE);
1863         POSTING_READ(reg);
1864
1865         /*
1866          * Until the pipe starts PIPEDSL reads will return a stale value,
1867          * which causes an apparent vblank timestamp jump when PIPEDSL
1868          * resets to its proper value. That also messes up the frame count
1869          * when it's derived from the timestamps. So let's wait for the
1870          * pipe to start properly before we call drm_crtc_vblank_on()
1871          */
1872         if (dev_priv->drm.max_vblank_count == 0)
1873                 intel_wait_for_pipe_scanline_moving(crtc);
1874 }
1875
1876 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1877 {
1878         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1879         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1880         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1881         enum pipe pipe = crtc->pipe;
1882         i915_reg_t reg;
1883         u32 val;
1884
1885         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1886
1887         /*
1888          * Make sure planes won't keep trying to pump pixels to us,
1889          * or we might hang the display.
1890          */
1891         assert_planes_disabled(crtc);
1892
1893         reg = PIPECONF(cpu_transcoder);
1894         val = I915_READ(reg);
1895         if ((val & PIPECONF_ENABLE) == 0)
1896                 return;
1897
1898         /*
1899          * Double wide has implications for planes
1900          * so best keep it disabled when not needed.
1901          */
1902         if (old_crtc_state->double_wide)
1903                 val &= ~PIPECONF_DOUBLE_WIDE;
1904
1905         /* Don't disable pipe or pipe PLLs if needed */
1906         if (!IS_I830(dev_priv))
1907                 val &= ~PIPECONF_ENABLE;
1908
1909         I915_WRITE(reg, val);
1910         if ((val & PIPECONF_ENABLE) == 0)
1911                 intel_wait_for_pipe_off(old_crtc_state);
1912 }
1913
1914 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1915 {
1916         return IS_GEN2(dev_priv) ? 2048 : 4096;
1917 }
1918
1919 static unsigned int
1920 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1921 {
1922         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1923         unsigned int cpp = fb->format->cpp[color_plane];
1924
1925         switch (fb->modifier) {
1926         case DRM_FORMAT_MOD_LINEAR:
1927                 return cpp;
1928         case I915_FORMAT_MOD_X_TILED:
1929                 if (IS_GEN2(dev_priv))
1930                         return 128;
1931                 else
1932                         return 512;
1933         case I915_FORMAT_MOD_Y_TILED_CCS:
1934                 if (color_plane == 1)
1935                         return 128;
1936                 /* fall through */
1937         case I915_FORMAT_MOD_Y_TILED:
1938                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1939                         return 128;
1940                 else
1941                         return 512;
1942         case I915_FORMAT_MOD_Yf_TILED_CCS:
1943                 if (color_plane == 1)
1944                         return 128;
1945                 /* fall through */
1946         case I915_FORMAT_MOD_Yf_TILED:
1947                 switch (cpp) {
1948                 case 1:
1949                         return 64;
1950                 case 2:
1951                 case 4:
1952                         return 128;
1953                 case 8:
1954                 case 16:
1955                         return 256;
1956                 default:
1957                         MISSING_CASE(cpp);
1958                         return cpp;
1959                 }
1960                 break;
1961         default:
1962                 MISSING_CASE(fb->modifier);
1963                 return cpp;
1964         }
1965 }
1966
1967 static unsigned int
1968 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1969 {
1970         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1971                 return 1;
1972         else
1973                 return intel_tile_size(to_i915(fb->dev)) /
1974                         intel_tile_width_bytes(fb, color_plane);
1975 }
1976
1977 /* Return the tile dimensions in pixel units */
1978 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1979                             unsigned int *tile_width,
1980                             unsigned int *tile_height)
1981 {
1982         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1983         unsigned int cpp = fb->format->cpp[color_plane];
1984
1985         *tile_width = tile_width_bytes / cpp;
1986         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1987 }
1988
1989 unsigned int
1990 intel_fb_align_height(const struct drm_framebuffer *fb,
1991                       int color_plane, unsigned int height)
1992 {
1993         unsigned int tile_height = intel_tile_height(fb, color_plane);
1994
1995         return ALIGN(height, tile_height);
1996 }
1997
1998 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1999 {
2000         unsigned int size = 0;
2001         int i;
2002
2003         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2004                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2005
2006         return size;
2007 }
2008
2009 static void
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011                         const struct drm_framebuffer *fb,
2012                         unsigned int rotation)
2013 {
2014         view->type = I915_GGTT_VIEW_NORMAL;
2015         if (drm_rotation_90_or_270(rotation)) {
2016                 view->type = I915_GGTT_VIEW_ROTATED;
2017                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2018         }
2019 }
2020
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023         if (IS_I830(dev_priv))
2024                 return 16 * 1024;
2025         else if (IS_I85X(dev_priv))
2026                 return 256;
2027         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2028                 return 32;
2029         else
2030                 return 4 * 1024;
2031 }
2032
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2034 {
2035         if (INTEL_GEN(dev_priv) >= 9)
2036                 return 256 * 1024;
2037         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2039                 return 128 * 1024;
2040         else if (INTEL_GEN(dev_priv) >= 4)
2041                 return 4 * 1024;
2042         else
2043                 return 0;
2044 }
2045
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047                                          int color_plane)
2048 {
2049         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050
2051         /* AUX_DIST needs only 4K alignment */
2052         if (color_plane == 1)
2053                 return 4096;
2054
2055         switch (fb->modifier) {
2056         case DRM_FORMAT_MOD_LINEAR:
2057                 return intel_linear_alignment(dev_priv);
2058         case I915_FORMAT_MOD_X_TILED:
2059                 if (INTEL_GEN(dev_priv) >= 9)
2060                         return 256 * 1024;
2061                 return 0;
2062         case I915_FORMAT_MOD_Y_TILED_CCS:
2063         case I915_FORMAT_MOD_Yf_TILED_CCS:
2064         case I915_FORMAT_MOD_Y_TILED:
2065         case I915_FORMAT_MOD_Yf_TILED:
2066                 return 1 * 1024 * 1024;
2067         default:
2068                 MISSING_CASE(fb->modifier);
2069                 return 0;
2070         }
2071 }
2072
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2074 {
2075         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2077
2078         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2079 }
2080
2081 struct i915_vma *
2082 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2083                            const struct i915_ggtt_view *view,
2084                            bool uses_fence,
2085                            unsigned long *out_flags)
2086 {
2087         struct drm_device *dev = fb->dev;
2088         struct drm_i915_private *dev_priv = to_i915(dev);
2089         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2090         struct i915_vma *vma;
2091         unsigned int pinctl;
2092         u32 alignment;
2093
2094         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2095
2096         alignment = intel_surf_alignment(fb, 0);
2097
2098         /* Note that the w/a also requires 64 PTE of padding following the
2099          * bo. We currently fill all unused PTE with the shadow page and so
2100          * we should always have valid PTE following the scanout preventing
2101          * the VT-d warning.
2102          */
2103         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2104                 alignment = 256 * 1024;
2105
2106         /*
2107          * Global gtt pte registers are special registers which actually forward
2108          * writes to a chunk of system memory. Which means that there is no risk
2109          * that the register values disappear as soon as we call
2110          * intel_runtime_pm_put(), so it is correct to wrap only the
2111          * pin/unpin/fence and not more.
2112          */
2113         intel_runtime_pm_get(dev_priv);
2114
2115         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2116
2117         pinctl = 0;
2118
2119         /* Valleyview is definitely limited to scanning out the first
2120          * 512MiB. Lets presume this behaviour was inherited from the
2121          * g4x display engine and that all earlier gen are similarly
2122          * limited. Testing suggests that it is a little more
2123          * complicated than this. For example, Cherryview appears quite
2124          * happy to scanout from anywhere within its global aperture.
2125          */
2126         if (HAS_GMCH_DISPLAY(dev_priv))
2127                 pinctl |= PIN_MAPPABLE;
2128
2129         vma = i915_gem_object_pin_to_display_plane(obj,
2130                                                    alignment, view, pinctl);
2131         if (IS_ERR(vma))
2132                 goto err;
2133
2134         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2135                 int ret;
2136
2137                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2138                  * fence, whereas 965+ only requires a fence if using
2139                  * framebuffer compression.  For simplicity, we always, when
2140                  * possible, install a fence as the cost is not that onerous.
2141                  *
2142                  * If we fail to fence the tiled scanout, then either the
2143                  * modeset will reject the change (which is highly unlikely as
2144                  * the affected systems, all but one, do not have unmappable
2145                  * space) or we will not be able to enable full powersaving
2146                  * techniques (also likely not to apply due to various limits
2147                  * FBC and the like impose on the size of the buffer, which
2148                  * presumably we violated anyway with this unmappable buffer).
2149                  * Anyway, it is presumably better to stumble onwards with
2150                  * something and try to run the system in a "less than optimal"
2151                  * mode that matches the user configuration.
2152                  */
2153                 ret = i915_vma_pin_fence(vma);
2154                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2155                         i915_gem_object_unpin_from_display_plane(vma);
2156                         vma = ERR_PTR(ret);
2157                         goto err;
2158                 }
2159
2160                 if (ret == 0 && vma->fence)
2161                         *out_flags |= PLANE_HAS_FENCE;
2162         }
2163
2164         i915_vma_get(vma);
2165 err:
2166         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2167
2168         intel_runtime_pm_put(dev_priv);
2169         return vma;
2170 }
2171
2172 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2173 {
2174         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2175
2176         if (flags & PLANE_HAS_FENCE)
2177                 i915_vma_unpin_fence(vma);
2178         i915_gem_object_unpin_from_display_plane(vma);
2179         i915_vma_put(vma);
2180 }
2181
2182 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2183                           unsigned int rotation)
2184 {
2185         if (drm_rotation_90_or_270(rotation))
2186                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2187         else
2188                 return fb->pitches[color_plane];
2189 }
2190
2191 /*
2192  * Convert the x/y offsets into a linear offset.
2193  * Only valid with 0/180 degree rotation, which is fine since linear
2194  * offset is only used with linear buffers on pre-hsw and tiled buffers
2195  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2196  */
2197 u32 intel_fb_xy_to_linear(int x, int y,
2198                           const struct intel_plane_state *state,
2199                           int color_plane)
2200 {
2201         const struct drm_framebuffer *fb = state->base.fb;
2202         unsigned int cpp = fb->format->cpp[color_plane];
2203         unsigned int pitch = state->color_plane[color_plane].stride;
2204
2205         return y * pitch + x * cpp;
2206 }
2207
2208 /*
2209  * Add the x/y offsets derived from fb->offsets[] to the user
2210  * specified plane src x/y offsets. The resulting x/y offsets
2211  * specify the start of scanout from the beginning of the gtt mapping.
2212  */
2213 void intel_add_fb_offsets(int *x, int *y,
2214                           const struct intel_plane_state *state,
2215                           int color_plane)
2216
2217 {
2218         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2219         unsigned int rotation = state->base.rotation;
2220
2221         if (drm_rotation_90_or_270(rotation)) {
2222                 *x += intel_fb->rotated[color_plane].x;
2223                 *y += intel_fb->rotated[color_plane].y;
2224         } else {
2225                 *x += intel_fb->normal[color_plane].x;
2226                 *y += intel_fb->normal[color_plane].y;
2227         }
2228 }
2229
2230 static u32 intel_adjust_tile_offset(int *x, int *y,
2231                                     unsigned int tile_width,
2232                                     unsigned int tile_height,
2233                                     unsigned int tile_size,
2234                                     unsigned int pitch_tiles,
2235                                     u32 old_offset,
2236                                     u32 new_offset)
2237 {
2238         unsigned int pitch_pixels = pitch_tiles * tile_width;
2239         unsigned int tiles;
2240
2241         WARN_ON(old_offset & (tile_size - 1));
2242         WARN_ON(new_offset & (tile_size - 1));
2243         WARN_ON(new_offset > old_offset);
2244
2245         tiles = (old_offset - new_offset) / tile_size;
2246
2247         *y += tiles / pitch_tiles * tile_height;
2248         *x += tiles % pitch_tiles * tile_width;
2249
2250         /* minimize x in case it got needlessly big */
2251         *y += *x / pitch_pixels * tile_height;
2252         *x %= pitch_pixels;
2253
2254         return new_offset;
2255 }
2256
2257 static u32 intel_adjust_aligned_offset(int *x, int *y,
2258                                        const struct drm_framebuffer *fb,
2259                                        int color_plane,
2260                                        unsigned int rotation,
2261                                        unsigned int pitch,
2262                                        u32 old_offset, u32 new_offset)
2263 {
2264         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2265         unsigned int cpp = fb->format->cpp[color_plane];
2266
2267         WARN_ON(new_offset > old_offset);
2268
2269         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2270                 unsigned int tile_size, tile_width, tile_height;
2271                 unsigned int pitch_tiles;
2272
2273                 tile_size = intel_tile_size(dev_priv);
2274                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2275
2276                 if (drm_rotation_90_or_270(rotation)) {
2277                         pitch_tiles = pitch / tile_height;
2278                         swap(tile_width, tile_height);
2279                 } else {
2280                         pitch_tiles = pitch / (tile_width * cpp);
2281                 }
2282
2283                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2284                                          tile_size, pitch_tiles,
2285                                          old_offset, new_offset);
2286         } else {
2287                 old_offset += *y * pitch + *x * cpp;
2288
2289                 *y = (old_offset - new_offset) / pitch;
2290                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2291         }
2292
2293         return new_offset;
2294 }
2295
2296 /*
2297  * Adjust the tile offset by moving the difference into
2298  * the x/y offsets.
2299  */
2300 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2301                                              const struct intel_plane_state *state,
2302                                              int color_plane,
2303                                              u32 old_offset, u32 new_offset)
2304 {
2305         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2306                                            state->base.rotation,
2307                                            state->color_plane[color_plane].stride,
2308                                            old_offset, new_offset);
2309 }
2310
2311 /*
2312  * Computes the aligned offset to the base tile and adjusts
2313  * x, y. bytes per pixel is assumed to be a power-of-two.
2314  *
2315  * In the 90/270 rotated case, x and y are assumed
2316  * to be already rotated to match the rotated GTT view, and
2317  * pitch is the tile_height aligned framebuffer height.
2318  *
2319  * This function is used when computing the derived information
2320  * under intel_framebuffer, so using any of that information
2321  * here is not allowed. Anything under drm_framebuffer can be
2322  * used. This is why the user has to pass in the pitch since it
2323  * is specified in the rotated orientation.
2324  */
2325 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2326                                         int *x, int *y,
2327                                         const struct drm_framebuffer *fb,
2328                                         int color_plane,
2329                                         unsigned int pitch,
2330                                         unsigned int rotation,
2331                                         u32 alignment)
2332 {
2333         uint64_t fb_modifier = fb->modifier;
2334         unsigned int cpp = fb->format->cpp[color_plane];
2335         u32 offset, offset_aligned;
2336
2337         if (alignment)
2338                 alignment--;
2339
2340         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2341                 unsigned int tile_size, tile_width, tile_height;
2342                 unsigned int tile_rows, tiles, pitch_tiles;
2343
2344                 tile_size = intel_tile_size(dev_priv);
2345                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2346
2347                 if (drm_rotation_90_or_270(rotation)) {
2348                         pitch_tiles = pitch / tile_height;
2349                         swap(tile_width, tile_height);
2350                 } else {
2351                         pitch_tiles = pitch / (tile_width * cpp);
2352                 }
2353
2354                 tile_rows = *y / tile_height;
2355                 *y %= tile_height;
2356
2357                 tiles = *x / tile_width;
2358                 *x %= tile_width;
2359
2360                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2361                 offset_aligned = offset & ~alignment;
2362
2363                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2364                                          tile_size, pitch_tiles,
2365                                          offset, offset_aligned);
2366         } else {
2367                 offset = *y * pitch + *x * cpp;
2368                 offset_aligned = offset & ~alignment;
2369
2370                 *y = (offset & alignment) / pitch;
2371                 *x = ((offset & alignment) - *y * pitch) / cpp;
2372         }
2373
2374         return offset_aligned;
2375 }
2376
2377 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2378                                               const struct intel_plane_state *state,
2379                                               int color_plane)
2380 {
2381         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2382         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2383         const struct drm_framebuffer *fb = state->base.fb;
2384         unsigned int rotation = state->base.rotation;
2385         int pitch = state->color_plane[color_plane].stride;
2386         u32 alignment;
2387
2388         if (intel_plane->id == PLANE_CURSOR)
2389                 alignment = intel_cursor_alignment(dev_priv);
2390         else
2391                 alignment = intel_surf_alignment(fb, color_plane);
2392
2393         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2394                                             pitch, rotation, alignment);
2395 }
2396
2397 /* Convert the fb->offset[] into x/y offsets */
2398 static int intel_fb_offset_to_xy(int *x, int *y,
2399                                  const struct drm_framebuffer *fb,
2400                                  int color_plane)
2401 {
2402         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2403
2404         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2405             fb->offsets[color_plane] % intel_tile_size(dev_priv))
2406                 return -EINVAL;
2407
2408         *x = 0;
2409         *y = 0;
2410
2411         intel_adjust_aligned_offset(x, y,
2412                                     fb, color_plane, DRM_MODE_ROTATE_0,
2413                                     fb->pitches[color_plane],
2414                                     fb->offsets[color_plane], 0);
2415
2416         return 0;
2417 }
2418
2419 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2420 {
2421         switch (fb_modifier) {
2422         case I915_FORMAT_MOD_X_TILED:
2423                 return I915_TILING_X;
2424         case I915_FORMAT_MOD_Y_TILED:
2425         case I915_FORMAT_MOD_Y_TILED_CCS:
2426                 return I915_TILING_Y;
2427         default:
2428                 return I915_TILING_NONE;
2429         }
2430 }
2431
2432 /*
2433  * From the Sky Lake PRM:
2434  * "The Color Control Surface (CCS) contains the compression status of
2435  *  the cache-line pairs. The compression state of the cache-line pair
2436  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2437  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438  *  cache-line-pairs. CCS is always Y tiled."
2439  *
2440  * Since cache line pairs refers to horizontally adjacent cache lines,
2441  * each cache line in the CCS corresponds to an area of 32x16 cache
2442  * lines on the main surface. Since each pixel is 4 bytes, this gives
2443  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2444  * main surface.
2445  */
2446 static const struct drm_format_info ccs_formats[] = {
2447         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2451 };
2452
2453 static const struct drm_format_info *
2454 lookup_format_info(const struct drm_format_info formats[],
2455                    int num_formats, u32 format)
2456 {
2457         int i;
2458
2459         for (i = 0; i < num_formats; i++) {
2460                 if (formats[i].format == format)
2461                         return &formats[i];
2462         }
2463
2464         return NULL;
2465 }
2466
2467 static const struct drm_format_info *
2468 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2469 {
2470         switch (cmd->modifier[0]) {
2471         case I915_FORMAT_MOD_Y_TILED_CCS:
2472         case I915_FORMAT_MOD_Yf_TILED_CCS:
2473                 return lookup_format_info(ccs_formats,
2474                                           ARRAY_SIZE(ccs_formats),
2475                                           cmd->pixel_format);
2476         default:
2477                 return NULL;
2478         }
2479 }
2480
2481 bool is_ccs_modifier(u64 modifier)
2482 {
2483         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2484                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 }
2486
2487 static int
2488 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2489                    struct drm_framebuffer *fb)
2490 {
2491         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2492         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2493         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2494         u32 gtt_offset_rotated = 0;
2495         unsigned int max_size = 0;
2496         int i, num_planes = fb->format->num_planes;
2497         unsigned int tile_size = intel_tile_size(dev_priv);
2498
2499         for (i = 0; i < num_planes; i++) {
2500                 unsigned int width, height;
2501                 unsigned int cpp, size;
2502                 u32 offset;
2503                 int x, y;
2504                 int ret;
2505
2506                 cpp = fb->format->cpp[i];
2507                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2508                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2509
2510                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2511                 if (ret) {
2512                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2513                                       i, fb->offsets[i]);
2514                         return ret;
2515                 }
2516
2517                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2518                         int hsub = fb->format->hsub;
2519                         int vsub = fb->format->vsub;
2520                         int tile_width, tile_height;
2521                         int main_x, main_y;
2522                         int ccs_x, ccs_y;
2523
2524                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2525                         tile_width *= hsub;
2526                         tile_height *= vsub;
2527
2528                         ccs_x = (x * hsub) % tile_width;
2529                         ccs_y = (y * vsub) % tile_height;
2530                         main_x = intel_fb->normal[0].x % tile_width;
2531                         main_y = intel_fb->normal[0].y % tile_height;
2532
2533                         /*
2534                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535                          * x/y offsets must match between CCS and the main surface.
2536                          */
2537                         if (main_x != ccs_x || main_y != ccs_y) {
2538                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2539                                               main_x, main_y,
2540                                               ccs_x, ccs_y,
2541                                               intel_fb->normal[0].x,
2542                                               intel_fb->normal[0].y,
2543                                               x, y);
2544                                 return -EINVAL;
2545                         }
2546                 }
2547
2548                 /*
2549                  * The fence (if used) is aligned to the start of the object
2550                  * so having the framebuffer wrap around across the edge of the
2551                  * fenced region doesn't really work. We have no API to configure
2552                  * the fence start offset within the object (nor could we probably
2553                  * on gen2/3). So it's just easier if we just require that the
2554                  * fb layout agrees with the fence layout. We already check that the
2555                  * fb stride matches the fence stride elsewhere.
2556                  */
2557                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2558                     (x + width) * cpp > fb->pitches[i]) {
2559                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2560                                       i, fb->offsets[i]);
2561                         return -EINVAL;
2562                 }
2563
2564                 /*
2565                  * First pixel of the framebuffer from
2566                  * the start of the normal gtt mapping.
2567                  */
2568                 intel_fb->normal[i].x = x;
2569                 intel_fb->normal[i].y = y;
2570
2571                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2572                                                       fb->pitches[i],
2573                                                       DRM_MODE_ROTATE_0,
2574                                                       tile_size);
2575                 offset /= tile_size;
2576
2577                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2578                         unsigned int tile_width, tile_height;
2579                         unsigned int pitch_tiles;
2580                         struct drm_rect r;
2581
2582                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2583
2584                         rot_info->plane[i].offset = offset;
2585                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2586                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2587                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2588
2589                         intel_fb->rotated[i].pitch =
2590                                 rot_info->plane[i].height * tile_height;
2591
2592                         /* how many tiles does this plane need */
2593                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2594                         /*
2595                          * If the plane isn't horizontally tile aligned,
2596                          * we need one more tile.
2597                          */
2598                         if (x != 0)
2599                                 size++;
2600
2601                         /* rotate the x/y offsets to match the GTT view */
2602                         r.x1 = x;
2603                         r.y1 = y;
2604                         r.x2 = x + width;
2605                         r.y2 = y + height;
2606                         drm_rect_rotate(&r,
2607                                         rot_info->plane[i].width * tile_width,
2608                                         rot_info->plane[i].height * tile_height,
2609                                         DRM_MODE_ROTATE_270);
2610                         x = r.x1;
2611                         y = r.y1;
2612
2613                         /* rotate the tile dimensions to match the GTT view */
2614                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2615                         swap(tile_width, tile_height);
2616
2617                         /*
2618                          * We only keep the x/y offsets, so push all of the
2619                          * gtt offset into the x/y offsets.
2620                          */
2621                         intel_adjust_tile_offset(&x, &y,
2622                                                  tile_width, tile_height,
2623                                                  tile_size, pitch_tiles,
2624                                                  gtt_offset_rotated * tile_size, 0);
2625
2626                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2627
2628                         /*
2629                          * First pixel of the framebuffer from
2630                          * the start of the rotated gtt mapping.
2631                          */
2632                         intel_fb->rotated[i].x = x;
2633                         intel_fb->rotated[i].y = y;
2634                 } else {
2635                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2636                                             x * cpp, tile_size);
2637                 }
2638
2639                 /* how many tiles in total needed in the bo */
2640                 max_size = max(max_size, offset + size);
2641         }
2642
2643         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2644                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645                               mul_u32_u32(max_size, tile_size), obj->base.size);
2646                 return -EINVAL;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int i9xx_format_to_fourcc(int format)
2653 {
2654         switch (format) {
2655         case DISPPLANE_8BPP:
2656                 return DRM_FORMAT_C8;
2657         case DISPPLANE_BGRX555:
2658                 return DRM_FORMAT_XRGB1555;
2659         case DISPPLANE_BGRX565:
2660                 return DRM_FORMAT_RGB565;
2661         default:
2662         case DISPPLANE_BGRX888:
2663                 return DRM_FORMAT_XRGB8888;
2664         case DISPPLANE_RGBX888:
2665                 return DRM_FORMAT_XBGR8888;
2666         case DISPPLANE_BGRX101010:
2667                 return DRM_FORMAT_XRGB2101010;
2668         case DISPPLANE_RGBX101010:
2669                 return DRM_FORMAT_XBGR2101010;
2670         }
2671 }
2672
2673 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2674 {
2675         switch (format) {
2676         case PLANE_CTL_FORMAT_RGB_565:
2677                 return DRM_FORMAT_RGB565;
2678         case PLANE_CTL_FORMAT_NV12:
2679                 return DRM_FORMAT_NV12;
2680         default:
2681         case PLANE_CTL_FORMAT_XRGB_8888:
2682                 if (rgb_order) {
2683                         if (alpha)
2684                                 return DRM_FORMAT_ABGR8888;
2685                         else
2686                                 return DRM_FORMAT_XBGR8888;
2687                 } else {
2688                         if (alpha)
2689                                 return DRM_FORMAT_ARGB8888;
2690                         else
2691                                 return DRM_FORMAT_XRGB8888;
2692                 }
2693         case PLANE_CTL_FORMAT_XRGB_2101010:
2694                 if (rgb_order)
2695                         return DRM_FORMAT_XBGR2101010;
2696                 else
2697                         return DRM_FORMAT_XRGB2101010;
2698         }
2699 }
2700
2701 static bool
2702 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2703                               struct intel_initial_plane_config *plane_config)
2704 {
2705         struct drm_device *dev = crtc->base.dev;
2706         struct drm_i915_private *dev_priv = to_i915(dev);
2707         struct drm_i915_gem_object *obj = NULL;
2708         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2709         struct drm_framebuffer *fb = &plane_config->fb->base;
2710         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2711         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2712                                     PAGE_SIZE);
2713
2714         size_aligned -= base_aligned;
2715
2716         if (plane_config->size == 0)
2717                 return false;
2718
2719         /* If the FB is too big, just don't use it since fbdev is not very
2720          * important and we should probably use that space with FBC or other
2721          * features. */
2722         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2723                 return false;
2724
2725         mutex_lock(&dev->struct_mutex);
2726         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2727                                                              base_aligned,
2728                                                              base_aligned,
2729                                                              size_aligned);
2730         mutex_unlock(&dev->struct_mutex);
2731         if (!obj)
2732                 return false;
2733
2734         if (plane_config->tiling == I915_TILING_X)
2735                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2736
2737         mode_cmd.pixel_format = fb->format->format;
2738         mode_cmd.width = fb->width;
2739         mode_cmd.height = fb->height;
2740         mode_cmd.pitches[0] = fb->pitches[0];
2741         mode_cmd.modifier[0] = fb->modifier;
2742         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2743
2744         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2745                 DRM_DEBUG_KMS("intel fb init failed\n");
2746                 goto out_unref_obj;
2747         }
2748
2749
2750         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2751         return true;
2752
2753 out_unref_obj:
2754         i915_gem_object_put(obj);
2755         return false;
2756 }
2757
2758 static void
2759 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2760                         struct intel_plane_state *plane_state,
2761                         bool visible)
2762 {
2763         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2764
2765         plane_state->base.visible = visible;
2766
2767         /* FIXME pre-g4x don't work like this */
2768         if (visible) {
2769                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2770                 crtc_state->active_planes |= BIT(plane->id);
2771         } else {
2772                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2773                 crtc_state->active_planes &= ~BIT(plane->id);
2774         }
2775
2776         DRM_DEBUG_KMS("%s active planes 0x%x\n",
2777                       crtc_state->base.crtc->name,
2778                       crtc_state->active_planes);
2779 }
2780
2781 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2782                                          struct intel_plane *plane)
2783 {
2784         struct intel_crtc_state *crtc_state =
2785                 to_intel_crtc_state(crtc->base.state);
2786         struct intel_plane_state *plane_state =
2787                 to_intel_plane_state(plane->base.state);
2788
2789         intel_set_plane_visible(crtc_state, plane_state, false);
2790
2791         if (plane->id == PLANE_PRIMARY)
2792                 intel_pre_disable_primary_noatomic(&crtc->base);
2793
2794         trace_intel_disable_plane(&plane->base, crtc);
2795         plane->disable_plane(plane, crtc);
2796 }
2797
2798 static void
2799 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2800                              struct intel_initial_plane_config *plane_config)
2801 {
2802         struct drm_device *dev = intel_crtc->base.dev;
2803         struct drm_i915_private *dev_priv = to_i915(dev);
2804         struct drm_crtc *c;
2805         struct drm_i915_gem_object *obj;
2806         struct drm_plane *primary = intel_crtc->base.primary;
2807         struct drm_plane_state *plane_state = primary->state;
2808         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2809         struct intel_plane *intel_plane = to_intel_plane(primary);
2810         struct intel_plane_state *intel_state =
2811                 to_intel_plane_state(plane_state);
2812         struct drm_framebuffer *fb;
2813
2814         if (!plane_config->fb)
2815                 return;
2816
2817         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2818                 fb = &plane_config->fb->base;
2819                 goto valid_fb;
2820         }
2821
2822         kfree(plane_config->fb);
2823
2824         /*
2825          * Failed to alloc the obj, check to see if we should share
2826          * an fb with another CRTC instead
2827          */
2828         for_each_crtc(dev, c) {
2829                 struct intel_plane_state *state;
2830
2831                 if (c == &intel_crtc->base)
2832                         continue;
2833
2834                 if (!to_intel_crtc(c)->active)
2835                         continue;
2836
2837                 state = to_intel_plane_state(c->primary->state);
2838                 if (!state->vma)
2839                         continue;
2840
2841                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2842                         fb = state->base.fb;
2843                         drm_framebuffer_get(fb);
2844                         goto valid_fb;
2845                 }
2846         }
2847
2848         /*
2849          * We've failed to reconstruct the BIOS FB.  Current display state
2850          * indicates that the primary plane is visible, but has a NULL FB,
2851          * which will lead to problems later if we don't fix it up.  The
2852          * simplest solution is to just disable the primary plane now and
2853          * pretend the BIOS never had it enabled.
2854          */
2855         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2856
2857         return;
2858
2859 valid_fb:
2860         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2861                                 intel_state->base.rotation);
2862         intel_state->color_plane[0].stride =
2863                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2864
2865         mutex_lock(&dev->struct_mutex);
2866         intel_state->vma =
2867                 intel_pin_and_fence_fb_obj(fb,
2868                                            &intel_state->view,
2869                                            intel_plane_uses_fence(intel_state),
2870                                            &intel_state->flags);
2871         mutex_unlock(&dev->struct_mutex);
2872         if (IS_ERR(intel_state->vma)) {
2873                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2874                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2875
2876                 intel_state->vma = NULL;
2877                 drm_framebuffer_put(fb);
2878                 return;
2879         }
2880
2881         obj = intel_fb_obj(fb);
2882         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2883
2884         plane_state->src_x = 0;
2885         plane_state->src_y = 0;
2886         plane_state->src_w = fb->width << 16;
2887         plane_state->src_h = fb->height << 16;
2888
2889         plane_state->crtc_x = 0;
2890         plane_state->crtc_y = 0;
2891         plane_state->crtc_w = fb->width;
2892         plane_state->crtc_h = fb->height;
2893
2894         intel_state->base.src = drm_plane_state_src(plane_state);
2895         intel_state->base.dst = drm_plane_state_dest(plane_state);
2896
2897         if (i915_gem_object_is_tiled(obj))
2898                 dev_priv->preserve_bios_swizzle = true;
2899
2900         plane_state->fb = fb;
2901         plane_state->crtc = &intel_crtc->base;
2902
2903         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2904                                 to_intel_plane_state(plane_state),
2905                                 true);
2906
2907         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2908                   &obj->frontbuffer_bits);
2909 }
2910
2911 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2912                                int color_plane,
2913                                unsigned int rotation)
2914 {
2915         int cpp = fb->format->cpp[color_plane];
2916
2917         switch (fb->modifier) {
2918         case DRM_FORMAT_MOD_LINEAR:
2919         case I915_FORMAT_MOD_X_TILED:
2920                 switch (cpp) {
2921                 case 8:
2922                         return 4096;
2923                 case 4:
2924                 case 2:
2925                 case 1:
2926                         return 8192;
2927                 default:
2928                         MISSING_CASE(cpp);
2929                         break;
2930                 }
2931                 break;
2932         case I915_FORMAT_MOD_Y_TILED_CCS:
2933         case I915_FORMAT_MOD_Yf_TILED_CCS:
2934                 /* FIXME AUX plane? */
2935         case I915_FORMAT_MOD_Y_TILED:
2936         case I915_FORMAT_MOD_Yf_TILED:
2937                 switch (cpp) {
2938                 case 8:
2939                         return 2048;
2940                 case 4:
2941                         return 4096;
2942                 case 2:
2943                 case 1:
2944                         return 8192;
2945                 default:
2946                         MISSING_CASE(cpp);
2947                         break;
2948                 }
2949                 break;
2950         default:
2951                 MISSING_CASE(fb->modifier);
2952         }
2953
2954         return 2048;
2955 }
2956
2957 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2958                                            int main_x, int main_y, u32 main_offset)
2959 {
2960         const struct drm_framebuffer *fb = plane_state->base.fb;
2961         int hsub = fb->format->hsub;
2962         int vsub = fb->format->vsub;
2963         int aux_x = plane_state->color_plane[1].x;
2964         int aux_y = plane_state->color_plane[1].y;
2965         u32 aux_offset = plane_state->color_plane[1].offset;
2966         u32 alignment = intel_surf_alignment(fb, 1);
2967
2968         while (aux_offset >= main_offset && aux_y <= main_y) {
2969                 int x, y;
2970
2971                 if (aux_x == main_x && aux_y == main_y)
2972                         break;
2973
2974                 if (aux_offset == 0)
2975                         break;
2976
2977                 x = aux_x / hsub;
2978                 y = aux_y / vsub;
2979                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2980                                                                aux_offset, aux_offset - alignment);
2981                 aux_x = x * hsub + aux_x % hsub;
2982                 aux_y = y * vsub + aux_y % vsub;
2983         }
2984
2985         if (aux_x != main_x || aux_y != main_y)
2986                 return false;
2987
2988         plane_state->color_plane[1].offset = aux_offset;
2989         plane_state->color_plane[1].x = aux_x;
2990         plane_state->color_plane[1].y = aux_y;
2991
2992         return true;
2993 }
2994
2995 static int skl_check_main_surface(struct intel_plane_state *plane_state)
2996 {
2997         const struct drm_framebuffer *fb = plane_state->base.fb;
2998         unsigned int rotation = plane_state->base.rotation;
2999         int x = plane_state->base.src.x1 >> 16;
3000         int y = plane_state->base.src.y1 >> 16;
3001         int w = drm_rect_width(&plane_state->base.src) >> 16;
3002         int h = drm_rect_height(&plane_state->base.src) >> 16;
3003         int max_width = skl_max_plane_width(fb, 0, rotation);
3004         int max_height = 4096;
3005         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3006
3007         if (w > max_width || h > max_height) {
3008                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3009                               w, h, max_width, max_height);
3010                 return -EINVAL;
3011         }
3012
3013         intel_add_fb_offsets(&x, &y, plane_state, 0);
3014         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3015         alignment = intel_surf_alignment(fb, 0);
3016
3017         /*
3018          * AUX surface offset is specified as the distance from the
3019          * main surface offset, and it must be non-negative. Make
3020          * sure that is what we will get.
3021          */
3022         if (offset > aux_offset)
3023                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3024                                                            offset, aux_offset & ~(alignment - 1));
3025
3026         /*
3027          * When using an X-tiled surface, the plane blows up
3028          * if the x offset + width exceed the stride.
3029          *
3030          * TODO: linear and Y-tiled seem fine, Yf untested,
3031          */
3032         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3033                 int cpp = fb->format->cpp[0];
3034
3035                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3036                         if (offset == 0) {
3037                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3038                                 return -EINVAL;
3039                         }
3040
3041                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3042                                                                    offset, offset - alignment);
3043                 }
3044         }
3045
3046         /*
3047          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3048          * they match with the main surface x/y offsets.
3049          */
3050         if (is_ccs_modifier(fb->modifier)) {
3051                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3052                         if (offset == 0)
3053                                 break;
3054
3055                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3056                                                                    offset, offset - alignment);
3057                 }
3058
3059                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3060                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3061                         return -EINVAL;
3062                 }
3063         }
3064
3065         plane_state->color_plane[0].offset = offset;
3066         plane_state->color_plane[0].x = x;
3067         plane_state->color_plane[0].y = y;
3068
3069         return 0;
3070 }
3071
3072 static int
3073 skl_check_nv12_surface(struct intel_plane_state *plane_state)
3074 {
3075         /* Display WA #1106 */
3076         if (plane_state->base.rotation !=
3077             (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3078             plane_state->base.rotation != DRM_MODE_ROTATE_270)
3079                 return 0;
3080
3081         /*
3082          * src coordinates are rotated here.
3083          * We check height but report it as width
3084          */
3085         if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3086                 DRM_DEBUG_KMS("src width must be multiple "
3087                               "of 4 for rotated NV12\n");
3088                 return -EINVAL;
3089         }
3090
3091         return 0;
3092 }
3093
3094 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3095 {
3096         const struct drm_framebuffer *fb = plane_state->base.fb;
3097         unsigned int rotation = plane_state->base.rotation;
3098         int max_width = skl_max_plane_width(fb, 1, rotation);
3099         int max_height = 4096;
3100         int x = plane_state->base.src.x1 >> 17;
3101         int y = plane_state->base.src.y1 >> 17;
3102         int w = drm_rect_width(&plane_state->base.src) >> 17;
3103         int h = drm_rect_height(&plane_state->base.src) >> 17;
3104         u32 offset;
3105
3106         intel_add_fb_offsets(&x, &y, plane_state, 1);
3107         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3108
3109         /* FIXME not quite sure how/if these apply to the chroma plane */
3110         if (w > max_width || h > max_height) {
3111                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3112                               w, h, max_width, max_height);
3113                 return -EINVAL;
3114         }
3115
3116         plane_state->color_plane[1].offset = offset;
3117         plane_state->color_plane[1].x = x;
3118         plane_state->color_plane[1].y = y;
3119
3120         return 0;
3121 }
3122
3123 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3124 {
3125         const struct drm_framebuffer *fb = plane_state->base.fb;
3126         int src_x = plane_state->base.src.x1 >> 16;
3127         int src_y = plane_state->base.src.y1 >> 16;
3128         int hsub = fb->format->hsub;
3129         int vsub = fb->format->vsub;
3130         int x = src_x / hsub;
3131         int y = src_y / vsub;
3132         u32 offset;
3133
3134         intel_add_fb_offsets(&x, &y, plane_state, 1);
3135         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3136
3137         plane_state->color_plane[1].offset = offset;
3138         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3139         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3140
3141         return 0;
3142 }
3143
3144 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3145 {
3146         const struct drm_framebuffer *fb = plane_state->base.fb;
3147         unsigned int rotation = plane_state->base.rotation;
3148         int ret;
3149
3150         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3151         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3152         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3153
3154         ret = intel_plane_check_stride(plane_state);
3155         if (ret)
3156                 return ret;
3157
3158         if (!plane_state->base.visible)
3159                 return 0;
3160
3161         /* Rotate src coordinates to match rotated GTT view */
3162         if (drm_rotation_90_or_270(rotation))
3163                 drm_rect_rotate(&plane_state->base.src,
3164                                 fb->width << 16, fb->height << 16,
3165                                 DRM_MODE_ROTATE_270);
3166
3167         /*
3168          * Handle the AUX surface first since
3169          * the main surface setup depends on it.
3170          */
3171         if (fb->format->format == DRM_FORMAT_NV12) {
3172                 ret = skl_check_nv12_surface(plane_state);
3173                 if (ret)
3174                         return ret;
3175                 ret = skl_check_nv12_aux_surface(plane_state);
3176                 if (ret)
3177                         return ret;
3178         } else if (is_ccs_modifier(fb->modifier)) {
3179                 ret = skl_check_ccs_aux_surface(plane_state);
3180                 if (ret)
3181                         return ret;
3182         } else {
3183                 plane_state->color_plane[1].offset = ~0xfff;
3184                 plane_state->color_plane[1].x = 0;
3185                 plane_state->color_plane[1].y = 0;
3186         }
3187
3188         ret = skl_check_main_surface(plane_state);
3189         if (ret)
3190                 return ret;
3191
3192         return 0;
3193 }
3194
3195 unsigned int
3196 i9xx_plane_max_stride(struct intel_plane *plane,
3197                       u32 pixel_format, u64 modifier,
3198                       unsigned int rotation)
3199 {
3200         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3201
3202         if (!HAS_GMCH_DISPLAY(dev_priv)) {
3203                 return 32*1024;
3204         } else if (INTEL_GEN(dev_priv) >= 4) {
3205                 if (modifier == I915_FORMAT_MOD_X_TILED)
3206                         return 16*1024;
3207                 else
3208                         return 32*1024;
3209         } else if (INTEL_GEN(dev_priv) >= 3) {
3210                 if (modifier == I915_FORMAT_MOD_X_TILED)
3211                         return 8*1024;
3212                 else
3213                         return 16*1024;
3214         } else {
3215                 if (plane->i9xx_plane == PLANE_C)
3216                         return 4*1024;
3217                 else
3218                         return 8*1024;
3219         }
3220 }
3221
3222 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3223                           const struct intel_plane_state *plane_state)
3224 {
3225         struct drm_i915_private *dev_priv =
3226                 to_i915(plane_state->base.plane->dev);
3227         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3228         const struct drm_framebuffer *fb = plane_state->base.fb;
3229         unsigned int rotation = plane_state->base.rotation;
3230         u32 dspcntr;
3231
3232         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3233
3234         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3235             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
3236                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3237
3238         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3239                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3240
3241         if (INTEL_GEN(dev_priv) < 5)
3242                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3243
3244         switch (fb->format->format) {
3245         case DRM_FORMAT_C8:
3246                 dspcntr |= DISPPLANE_8BPP;
3247                 break;
3248         case DRM_FORMAT_XRGB1555:
3249                 dspcntr |= DISPPLANE_BGRX555;
3250                 break;
3251         case DRM_FORMAT_RGB565:
3252                 dspcntr |= DISPPLANE_BGRX565;
3253                 break;
3254         case DRM_FORMAT_XRGB8888:
3255                 dspcntr |= DISPPLANE_BGRX888;
3256                 break;
3257         case DRM_FORMAT_XBGR8888:
3258                 dspcntr |= DISPPLANE_RGBX888;
3259                 break;
3260         case DRM_FORMAT_XRGB2101010:
3261                 dspcntr |= DISPPLANE_BGRX101010;
3262                 break;
3263         case DRM_FORMAT_XBGR2101010:
3264                 dspcntr |= DISPPLANE_RGBX101010;
3265                 break;
3266         default:
3267                 MISSING_CASE(fb->format->format);
3268                 return 0;
3269         }
3270
3271         if (INTEL_GEN(dev_priv) >= 4 &&
3272             fb->modifier == I915_FORMAT_MOD_X_TILED)
3273                 dspcntr |= DISPPLANE_TILED;
3274
3275         if (rotation & DRM_MODE_ROTATE_180)
3276                 dspcntr |= DISPPLANE_ROTATE_180;
3277
3278         if (rotation & DRM_MODE_REFLECT_X)
3279                 dspcntr |= DISPPLANE_MIRROR;
3280
3281         return dspcntr;
3282 }
3283
3284 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3285 {
3286         struct drm_i915_private *dev_priv =
3287                 to_i915(plane_state->base.plane->dev);
3288         const struct drm_framebuffer *fb = plane_state->base.fb;
3289         unsigned int rotation = plane_state->base.rotation;
3290         int src_x = plane_state->base.src.x1 >> 16;
3291         int src_y = plane_state->base.src.y1 >> 16;
3292         u32 offset;
3293         int ret;
3294
3295         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3296         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3297
3298         ret = intel_plane_check_stride(plane_state);
3299         if (ret)
3300                 return ret;
3301
3302         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3303
3304         if (INTEL_GEN(dev_priv) >= 4)
3305                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3306                                                             plane_state, 0);
3307         else
3308                 offset = 0;
3309
3310         /* HSW/BDW do this automagically in hardware */
3311         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3312                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3313                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3314
3315                 if (rotation & DRM_MODE_ROTATE_180) {
3316                         src_x += src_w - 1;
3317                         src_y += src_h - 1;
3318                 } else if (rotation & DRM_MODE_REFLECT_X) {
3319                         src_x += src_w - 1;
3320                 }
3321         }
3322
3323         plane_state->color_plane[0].offset = offset;
3324         plane_state->color_plane[0].x = src_x;
3325         plane_state->color_plane[0].y = src_y;
3326
3327         return 0;
3328 }
3329
3330 static int
3331 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3332                  struct intel_plane_state *plane_state)
3333 {
3334         int ret;
3335
3336         ret = chv_plane_check_rotation(plane_state);
3337         if (ret)
3338                 return ret;
3339
3340         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3341                                                   &crtc_state->base,
3342                                                   DRM_PLANE_HELPER_NO_SCALING,
3343                                                   DRM_PLANE_HELPER_NO_SCALING,
3344                                                   false, true);
3345         if (ret)
3346                 return ret;
3347
3348         if (!plane_state->base.visible)
3349                 return 0;
3350
3351         ret = intel_plane_check_src_coordinates(plane_state);
3352         if (ret)
3353                 return ret;
3354
3355         ret = i9xx_check_plane_surface(plane_state);
3356         if (ret)
3357                 return ret;
3358
3359         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3360
3361         return 0;
3362 }
3363
3364 static void i9xx_update_plane(struct intel_plane *plane,
3365                               const struct intel_crtc_state *crtc_state,
3366                               const struct intel_plane_state *plane_state)
3367 {
3368         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3369         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3370         u32 linear_offset;
3371         u32 dspcntr = plane_state->ctl;
3372         i915_reg_t reg = DSPCNTR(i9xx_plane);
3373         int x = plane_state->color_plane[0].x;
3374         int y = plane_state->color_plane[0].y;
3375         unsigned long irqflags;
3376         u32 dspaddr_offset;
3377
3378         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3379
3380         if (INTEL_GEN(dev_priv) >= 4)
3381                 dspaddr_offset = plane_state->color_plane[0].offset;
3382         else
3383                 dspaddr_offset = linear_offset;
3384
3385         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3386
3387         if (INTEL_GEN(dev_priv) < 4) {
3388                 /* pipesrc and dspsize control the size that is scaled from,
3389                  * which should always be the user's requested size.
3390                  */
3391                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3392                               ((crtc_state->pipe_src_h - 1) << 16) |
3393                               (crtc_state->pipe_src_w - 1));
3394                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3395         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3396                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3397                               ((crtc_state->pipe_src_h - 1) << 16) |
3398                               (crtc_state->pipe_src_w - 1));
3399                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3400                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3401         }
3402
3403         I915_WRITE_FW(reg, dspcntr);
3404
3405         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3406         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3407                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3408                               intel_plane_ggtt_offset(plane_state) +
3409                               dspaddr_offset);
3410                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3411         } else if (INTEL_GEN(dev_priv) >= 4) {
3412                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3413                               intel_plane_ggtt_offset(plane_state) +
3414                               dspaddr_offset);
3415                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3416                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3417         } else {
3418                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3419                               intel_plane_ggtt_offset(plane_state) +
3420                               dspaddr_offset);
3421         }
3422         POSTING_READ_FW(reg);
3423
3424         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3425 }
3426
3427 static void i9xx_disable_plane(struct intel_plane *plane,
3428                                struct intel_crtc *crtc)
3429 {
3430         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3431         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3432         unsigned long irqflags;
3433
3434         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3435
3436         I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3437         if (INTEL_GEN(dev_priv) >= 4)
3438                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3439         else
3440                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3441         POSTING_READ_FW(DSPCNTR(i9xx_plane));
3442
3443         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3444 }
3445
3446 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3447                                     enum pipe *pipe)
3448 {
3449         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3450         enum intel_display_power_domain power_domain;
3451         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3452         bool ret;
3453         u32 val;
3454
3455         /*
3456          * Not 100% correct for planes that can move between pipes,
3457          * but that's only the case for gen2-4 which don't have any
3458          * display power wells.
3459          */
3460         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3461         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3462                 return false;
3463
3464         val = I915_READ(DSPCNTR(i9xx_plane));
3465
3466         ret = val & DISPLAY_PLANE_ENABLE;
3467
3468         if (INTEL_GEN(dev_priv) >= 5)
3469                 *pipe = plane->pipe;
3470         else
3471                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3472                         DISPPLANE_SEL_PIPE_SHIFT;
3473
3474         intel_display_power_put(dev_priv, power_domain);
3475
3476         return ret;
3477 }
3478
3479 static u32
3480 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3481 {
3482         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3483                 return 64;
3484         else
3485                 return intel_tile_width_bytes(fb, color_plane);
3486 }
3487
3488 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3489 {
3490         struct drm_device *dev = intel_crtc->base.dev;
3491         struct drm_i915_private *dev_priv = to_i915(dev);
3492
3493         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3494         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3495         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3496 }
3497
3498 /*
3499  * This function detaches (aka. unbinds) unused scalers in hardware
3500  */
3501 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3502 {
3503         struct intel_crtc_scaler_state *scaler_state;
3504         int i;
3505
3506         scaler_state = &intel_crtc->config->scaler_state;
3507
3508         /* loop through and disable scalers that aren't in use */
3509         for (i = 0; i < intel_crtc->num_scalers; i++) {
3510                 if (!scaler_state->scalers[i].in_use)
3511                         skl_detach_scaler(intel_crtc, i);
3512         }
3513 }
3514
3515 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3516                      int color_plane)
3517 {
3518         const struct drm_framebuffer *fb = plane_state->base.fb;
3519         unsigned int rotation = plane_state->base.rotation;
3520         u32 stride = plane_state->color_plane[color_plane].stride;
3521
3522         if (color_plane >= fb->format->num_planes)
3523                 return 0;
3524
3525         /*
3526          * The stride is either expressed as a multiple of 64 bytes chunks for
3527          * linear buffers or in number of tiles for tiled buffers.
3528          */
3529         if (drm_rotation_90_or_270(rotation))
3530                 stride /= intel_tile_height(fb, color_plane);
3531         else
3532                 stride /= intel_fb_stride_alignment(fb, color_plane);
3533
3534         return stride;
3535 }
3536
3537 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3538 {
3539         switch (pixel_format) {
3540         case DRM_FORMAT_C8:
3541                 return PLANE_CTL_FORMAT_INDEXED;
3542         case DRM_FORMAT_RGB565:
3543                 return PLANE_CTL_FORMAT_RGB_565;
3544         case DRM_FORMAT_XBGR8888:
3545         case DRM_FORMAT_ABGR8888:
3546                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3547         case DRM_FORMAT_XRGB8888:
3548         case DRM_FORMAT_ARGB8888:
3549                 return PLANE_CTL_FORMAT_XRGB_8888;
3550         case DRM_FORMAT_XRGB2101010:
3551                 return PLANE_CTL_FORMAT_XRGB_2101010;
3552         case DRM_FORMAT_XBGR2101010:
3553                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3554         case DRM_FORMAT_YUYV:
3555                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3556         case DRM_FORMAT_YVYU:
3557                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3558         case DRM_FORMAT_UYVY:
3559                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3560         case DRM_FORMAT_VYUY:
3561                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3562         case DRM_FORMAT_NV12:
3563                 return PLANE_CTL_FORMAT_NV12;
3564         default:
3565                 MISSING_CASE(pixel_format);
3566         }
3567
3568         return 0;
3569 }
3570
3571 /*
3572  * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3573  * to be already pre-multiplied. We need to add a knob (or a different
3574  * DRM_FORMAT) for user-space to configure that.
3575  */
3576 static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3577 {
3578         switch (pixel_format) {
3579         case DRM_FORMAT_ABGR8888:
3580         case DRM_FORMAT_ARGB8888:
3581                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3582         default:
3583                 return PLANE_CTL_ALPHA_DISABLE;
3584         }
3585 }
3586
3587 static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
3588 {
3589         switch (pixel_format) {
3590         case DRM_FORMAT_ABGR8888:
3591         case DRM_FORMAT_ARGB8888:
3592                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3593         default:
3594                 return PLANE_COLOR_ALPHA_DISABLE;
3595         }
3596 }
3597
3598 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3599 {
3600         switch (fb_modifier) {
3601         case DRM_FORMAT_MOD_LINEAR:
3602                 break;
3603         case I915_FORMAT_MOD_X_TILED:
3604                 return PLANE_CTL_TILED_X;
3605         case I915_FORMAT_MOD_Y_TILED:
3606                 return PLANE_CTL_TILED_Y;
3607         case I915_FORMAT_MOD_Y_TILED_CCS:
3608                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3609         case I915_FORMAT_MOD_Yf_TILED:
3610                 return PLANE_CTL_TILED_YF;
3611         case I915_FORMAT_MOD_Yf_TILED_CCS:
3612                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3613         default:
3614                 MISSING_CASE(fb_modifier);
3615         }
3616
3617         return 0;
3618 }
3619
3620 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3621 {
3622         switch (rotate) {
3623         case DRM_MODE_ROTATE_0:
3624                 break;
3625         /*
3626          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3627          * while i915 HW rotation is clockwise, thats why this swapping.
3628          */
3629         case DRM_MODE_ROTATE_90:
3630                 return PLANE_CTL_ROTATE_270;
3631         case DRM_MODE_ROTATE_180:
3632                 return PLANE_CTL_ROTATE_180;
3633         case DRM_MODE_ROTATE_270:
3634                 return PLANE_CTL_ROTATE_90;
3635         default:
3636                 MISSING_CASE(rotate);
3637         }
3638
3639         return 0;
3640 }
3641
3642 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3643 {
3644         switch (reflect) {
3645         case 0:
3646                 break;
3647         case DRM_MODE_REFLECT_X:
3648                 return PLANE_CTL_FLIP_HORIZONTAL;
3649         case DRM_MODE_REFLECT_Y:
3650         default:
3651                 MISSING_CASE(reflect);
3652         }
3653
3654         return 0;
3655 }
3656
3657 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3658                   const struct intel_plane_state *plane_state)
3659 {
3660         struct drm_i915_private *dev_priv =
3661                 to_i915(plane_state->base.plane->dev);
3662         const struct drm_framebuffer *fb = plane_state->base.fb;
3663         unsigned int rotation = plane_state->base.rotation;
3664         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3665         u32 plane_ctl;
3666
3667         plane_ctl = PLANE_CTL_ENABLE;
3668
3669         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3670                 plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
3671                 plane_ctl |=
3672                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3673                         PLANE_CTL_PIPE_CSC_ENABLE |
3674                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3675
3676                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3677                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3678
3679                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3680                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3681         }
3682
3683         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3684         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3685         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3686
3687         if (INTEL_GEN(dev_priv) >= 10)
3688                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3689                                                 DRM_MODE_REFLECT_MASK);
3690
3691         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3692                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3693         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3694                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3695
3696         return plane_ctl;
3697 }
3698
3699 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3700                         const struct intel_plane_state *plane_state)
3701 {
3702         struct drm_i915_private *dev_priv =
3703                 to_i915(plane_state->base.plane->dev);
3704         const struct drm_framebuffer *fb = plane_state->base.fb;
3705         u32 plane_color_ctl = 0;
3706
3707         if (INTEL_GEN(dev_priv) < 11) {
3708                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3709                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3710         }
3711         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3712         plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
3713
3714         if (fb->format->is_yuv) {
3715                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3716                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3717                 else
3718                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3719
3720                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3721                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3722         }
3723
3724         return plane_color_ctl;
3725 }
3726
3727 static int
3728 __intel_display_resume(struct drm_device *dev,
3729                        struct drm_atomic_state *state,
3730                        struct drm_modeset_acquire_ctx *ctx)
3731 {
3732         struct drm_crtc_state *crtc_state;
3733         struct drm_crtc *crtc;
3734         int i, ret;
3735
3736         intel_modeset_setup_hw_state(dev, ctx);
3737         i915_redisable_vga(to_i915(dev));
3738
3739         if (!state)
3740                 return 0;
3741
3742         /*
3743          * We've duplicated the state, pointers to the old state are invalid.
3744          *
3745          * Don't attempt to use the old state until we commit the duplicated state.
3746          */
3747         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3748                 /*
3749                  * Force recalculation even if we restore
3750                  * current state. With fast modeset this may not result
3751                  * in a modeset when the state is compatible.
3752                  */
3753                 crtc_state->mode_changed = true;
3754         }
3755
3756         /* ignore any reset values/BIOS leftovers in the WM registers */
3757         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3758                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3759
3760         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3761
3762         WARN_ON(ret == -EDEADLK);
3763         return ret;
3764 }
3765
3766 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3767 {
3768         return intel_has_gpu_reset(dev_priv) &&
3769                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3770 }
3771
3772 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3773 {
3774         struct drm_device *dev = &dev_priv->drm;
3775         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3776         struct drm_atomic_state *state;
3777         int ret;
3778
3779         /* reset doesn't touch the display */
3780         if (!i915_modparams.force_reset_modeset_test &&
3781             !gpu_reset_clobbers_display(dev_priv))
3782                 return;
3783
3784         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3785         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3786         wake_up_all(&dev_priv->gpu_error.wait_queue);
3787
3788         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3789                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3790                 i915_gem_set_wedged(dev_priv);
3791         }
3792
3793         /*
3794          * Need mode_config.mutex so that we don't
3795          * trample ongoing ->detect() and whatnot.
3796          */
3797         mutex_lock(&dev->mode_config.mutex);
3798         drm_modeset_acquire_init(ctx, 0);
3799         while (1) {
3800                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3801                 if (ret != -EDEADLK)
3802                         break;
3803
3804                 drm_modeset_backoff(ctx);
3805         }
3806         /*
3807          * Disabling the crtcs gracefully seems nicer. Also the
3808          * g33 docs say we should at least disable all the planes.
3809          */
3810         state = drm_atomic_helper_duplicate_state(dev, ctx);
3811         if (IS_ERR(state)) {
3812                 ret = PTR_ERR(state);
3813                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3814                 return;
3815         }
3816
3817         ret = drm_atomic_helper_disable_all(dev, ctx);
3818         if (ret) {
3819                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3820                 drm_atomic_state_put(state);
3821                 return;
3822         }
3823
3824         dev_priv->modeset_restore_state = state;
3825         state->acquire_ctx = ctx;
3826 }
3827
3828 void intel_finish_reset(struct drm_i915_private *dev_priv)
3829 {
3830         struct drm_device *dev = &dev_priv->drm;
3831         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3832         struct drm_atomic_state *state;
3833         int ret;
3834
3835         /* reset doesn't touch the display */
3836         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3837                 return;
3838
3839         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3840         if (!state)
3841                 goto unlock;
3842
3843         /* reset doesn't touch the display */
3844         if (!gpu_reset_clobbers_display(dev_priv)) {
3845                 /* for testing only restore the display */
3846                 ret = __intel_display_resume(dev, state, ctx);
3847                 if (ret)
3848                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3849         } else {
3850                 /*
3851                  * The display has been reset as well,
3852                  * so need a full re-initialization.
3853                  */
3854                 intel_runtime_pm_disable_interrupts(dev_priv);
3855                 intel_runtime_pm_enable_interrupts(dev_priv);
3856
3857                 intel_pps_unlock_regs_wa(dev_priv);
3858                 intel_modeset_init_hw(dev);
3859                 intel_init_clock_gating(dev_priv);
3860
3861                 spin_lock_irq(&dev_priv->irq_lock);
3862                 if (dev_priv->display.hpd_irq_setup)
3863                         dev_priv->display.hpd_irq_setup(dev_priv);
3864                 spin_unlock_irq(&dev_priv->irq_lock);
3865
3866                 ret = __intel_display_resume(dev, state, ctx);
3867                 if (ret)
3868                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3869
3870                 intel_hpd_init(dev_priv);
3871         }
3872
3873         drm_atomic_state_put(state);
3874 unlock:
3875         drm_modeset_drop_locks(ctx);
3876         drm_modeset_acquire_fini(ctx);
3877         mutex_unlock(&dev->mode_config.mutex);
3878
3879         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3880 }
3881
3882 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3883                                      const struct intel_crtc_state *new_crtc_state)
3884 {
3885         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3886         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3887
3888         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3889         crtc->base.mode = new_crtc_state->base.mode;
3890
3891         /*
3892          * Update pipe size and adjust fitter if needed: the reason for this is
3893          * that in compute_mode_changes we check the native mode (not the pfit
3894          * mode) to see if we can flip rather than do a full mode set. In the
3895          * fastboot case, we'll flip, but if we don't update the pipesrc and
3896          * pfit state, we'll end up with a big fb scanned out into the wrong
3897          * sized surface.
3898          */
3899
3900         I915_WRITE(PIPESRC(crtc->pipe),
3901                    ((new_crtc_state->pipe_src_w - 1) << 16) |
3902                    (new_crtc_state->pipe_src_h - 1));
3903
3904         /* on skylake this is done by detaching scalers */
3905         if (INTEL_GEN(dev_priv) >= 9) {
3906                 skl_detach_scalers(crtc);
3907
3908                 if (new_crtc_state->pch_pfit.enabled)
3909                         skylake_pfit_enable(crtc);
3910         } else if (HAS_PCH_SPLIT(dev_priv)) {
3911                 if (new_crtc_state->pch_pfit.enabled)
3912                         ironlake_pfit_enable(crtc);
3913                 else if (old_crtc_state->pch_pfit.enabled)
3914                         ironlake_pfit_disable(crtc, true);
3915         }
3916 }
3917
3918 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3919 {
3920         struct drm_device *dev = crtc->base.dev;
3921         struct drm_i915_private *dev_priv = to_i915(dev);
3922         int pipe = crtc->pipe;
3923         i915_reg_t reg;
3924         u32 temp;
3925
3926         /* enable normal train */
3927         reg = FDI_TX_CTL(pipe);
3928         temp = I915_READ(reg);
3929         if (IS_IVYBRIDGE(dev_priv)) {
3930                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3931                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3932         } else {
3933                 temp &= ~FDI_LINK_TRAIN_NONE;
3934                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3935         }
3936         I915_WRITE(reg, temp);
3937
3938         reg = FDI_RX_CTL(pipe);
3939         temp = I915_READ(reg);
3940         if (HAS_PCH_CPT(dev_priv)) {
3941                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3942                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3943         } else {
3944                 temp &= ~FDI_LINK_TRAIN_NONE;
3945                 temp |= FDI_LINK_TRAIN_NONE;
3946         }
3947         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3948
3949         /* wait one idle pattern time */
3950         POSTING_READ(reg);
3951         udelay(1000);
3952
3953         /* IVB wants error correction enabled */
3954         if (IS_IVYBRIDGE(dev_priv))
3955                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3956                            FDI_FE_ERRC_ENABLE);
3957 }
3958
3959 /* The FDI link training functions for ILK/Ibexpeak. */
3960 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3961                                     const struct intel_crtc_state *crtc_state)
3962 {
3963         struct drm_device *dev = crtc->base.dev;
3964         struct drm_i915_private *dev_priv = to_i915(dev);
3965         int pipe = crtc->pipe;
3966         i915_reg_t reg;
3967         u32 temp, tries;
3968
3969         /* FDI needs bits from pipe first */
3970         assert_pipe_enabled(dev_priv, pipe);
3971
3972         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3973            for train result */
3974         reg = FDI_RX_IMR(pipe);
3975         temp = I915_READ(reg);
3976         temp &= ~FDI_RX_SYMBOL_LOCK;
3977         temp &= ~FDI_RX_BIT_LOCK;
3978         I915_WRITE(reg, temp);
3979         I915_READ(reg);
3980         udelay(150);
3981
3982         /* enable CPU FDI TX and PCH FDI RX */
3983         reg = FDI_TX_CTL(pipe);
3984         temp = I915_READ(reg);
3985         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3986         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3987         temp &= ~FDI_LINK_TRAIN_NONE;
3988         temp |= FDI_LINK_TRAIN_PATTERN_1;
3989         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3990
3991         reg = FDI_RX_CTL(pipe);
3992         temp = I915_READ(reg);
3993         temp &= ~FDI_LINK_TRAIN_NONE;
3994         temp |= FDI_LINK_TRAIN_PATTERN_1;
3995         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3996
3997         POSTING_READ(reg);
3998         udelay(150);
3999
4000         /* Ironlake workaround, enable clock pointer after FDI enable*/
4001         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4002         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4003                    FDI_RX_PHASE_SYNC_POINTER_EN);
4004
4005         reg = FDI_RX_IIR(pipe);
4006         for (tries = 0; tries < 5; tries++) {
4007                 temp = I915_READ(reg);
4008                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4009
4010                 if ((temp & FDI_RX_BIT_LOCK)) {
4011                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4012                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4013                         break;
4014                 }
4015         }
4016         if (tries == 5)
4017                 DRM_ERROR("FDI train 1 fail!\n");
4018
4019         /* Train 2 */
4020         reg = FDI_TX_CTL(pipe);
4021         temp = I915_READ(reg);
4022         temp &= ~FDI_LINK_TRAIN_NONE;
4023         temp |= FDI_LINK_TRAIN_PATTERN_2;
4024         I915_WRITE(reg, temp);
4025
4026         reg = FDI_RX_CTL(pipe);
4027         temp = I915_READ(reg);
4028         temp &= ~FDI_LINK_TRAIN_NONE;
4029         temp |= FDI_LINK_TRAIN_PATTERN_2;
4030         I915_WRITE(reg, temp);
4031
4032         POSTING_READ(reg);
4033         udelay(150);
4034
4035         reg = FDI_RX_IIR(pipe);
4036         for (tries = 0; tries < 5; tries++) {
4037                 temp = I915_READ(reg);
4038                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4039
4040                 if (temp & FDI_RX_SYMBOL_LOCK) {
4041                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4042                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4043                         break;
4044                 }
4045         }
4046         if (tries == 5)
4047                 DRM_ERROR("FDI train 2 fail!\n");
4048
4049         DRM_DEBUG_KMS("FDI train done\n");
4050
4051 }
4052
4053 static const int snb_b_fdi_train_param[] = {
4054         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4055         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4056         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4057         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4058 };
4059
4060 /* The FDI link training functions for SNB/Cougarpoint. */
4061 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4062                                 const struct intel_crtc_state *crtc_state)
4063 {
4064         struct drm_device *dev = crtc->base.dev;
4065         struct drm_i915_private *dev_priv = to_i915(dev);
4066         int pipe = crtc->pipe;
4067         i915_reg_t reg;
4068         u32 temp, i, retry;
4069
4070         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4071            for train result */
4072         reg = FDI_RX_IMR(pipe);
4073         temp = I915_READ(reg);
4074         temp &= ~FDI_RX_SYMBOL_LOCK;
4075         temp &= ~FDI_RX_BIT_LOCK;
4076         I915_WRITE(reg, temp);
4077
4078         POSTING_READ(reg);
4079         udelay(150);
4080
4081         /* enable CPU FDI TX and PCH FDI RX */
4082         reg = FDI_TX_CTL(pipe);
4083         temp = I915_READ(reg);
4084         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4085         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4086         temp &= ~FDI_LINK_TRAIN_NONE;
4087         temp |= FDI_LINK_TRAIN_PATTERN_1;
4088         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4089         /* SNB-B */
4090         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4091         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4092
4093         I915_WRITE(FDI_RX_MISC(pipe),
4094                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4095
4096         reg = FDI_RX_CTL(pipe);
4097         temp = I915_READ(reg);
4098         if (HAS_PCH_CPT(dev_priv)) {
4099                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4100                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4101         } else {
4102                 temp &= ~FDI_LINK_TRAIN_NONE;
4103                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4104         }
4105         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4106
4107         POSTING_READ(reg);
4108         udelay(150);
4109
4110         for (i = 0; i < 4; i++) {
4111                 reg = FDI_TX_CTL(pipe);
4112                 temp = I915_READ(reg);
4113                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4114                 temp |= snb_b_fdi_train_param[i];
4115                 I915_WRITE(reg, temp);
4116
4117                 POSTING_READ(reg);
4118                 udelay(500);
4119
4120                 for (retry = 0; retry < 5; retry++) {
4121                         reg = FDI_RX_IIR(pipe);
4122                         temp = I915_READ(reg);
4123                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4124                         if (temp & FDI_RX_BIT_LOCK) {
4125                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4126                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4127                                 break;
4128                         }
4129                         udelay(50);
4130                 }
4131                 if (retry < 5)
4132                         break;
4133         }
4134         if (i == 4)
4135                 DRM_ERROR("FDI train 1 fail!\n");
4136
4137         /* Train 2 */
4138         reg = FDI_TX_CTL(pipe);
4139         temp = I915_READ(reg);
4140         temp &= ~FDI_LINK_TRAIN_NONE;
4141         temp |= FDI_LINK_TRAIN_PATTERN_2;
4142         if (IS_GEN6(dev_priv)) {
4143                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4144                 /* SNB-B */
4145                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4146         }
4147         I915_WRITE(reg, temp);
4148
4149         reg = FDI_RX_CTL(pipe);
4150         temp = I915_READ(reg);
4151         if (HAS_PCH_CPT(dev_priv)) {
4152                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4153                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4154         } else {
4155                 temp &= ~FDI_LINK_TRAIN_NONE;
4156                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4157         }
4158         I915_WRITE(reg, temp);
4159
4160         POSTING_READ(reg);
4161         udelay(150);
4162
4163         for (i = 0; i < 4; i++) {
4164                 reg = FDI_TX_CTL(pipe);
4165                 temp = I915_READ(reg);
4166                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4167                 temp |= snb_b_fdi_train_param[i];
4168                 I915_WRITE(reg, temp);
4169
4170                 POSTING_READ(reg);
4171                 udelay(500);
4172
4173                 for (retry = 0; retry < 5; retry++) {
4174                         reg = FDI_RX_IIR(pipe);
4175                         temp = I915_READ(reg);
4176                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4177                         if (temp & FDI_RX_SYMBOL_LOCK) {
4178                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4179                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4180                                 break;
4181                         }
4182                         udelay(50);
4183                 }
4184                 if (retry < 5)
4185                         break;
4186         }
4187         if (i == 4)
4188                 DRM_ERROR("FDI train 2 fail!\n");
4189
4190         DRM_DEBUG_KMS("FDI train done.\n");
4191 }
4192
4193 /* Manual link training for Ivy Bridge A0 parts */
4194 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4195                                       const struct intel_crtc_state *crtc_state)
4196 {
4197         struct drm_device *dev = crtc->base.dev;
4198         struct drm_i915_private *dev_priv = to_i915(dev);
4199         int pipe = crtc->pipe;
4200         i915_reg_t reg;
4201         u32 temp, i, j;
4202
4203         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4204            for train result */
4205         reg = FDI_RX_IMR(pipe);
4206         temp = I915_READ(reg);
4207         temp &= ~FDI_RX_SYMBOL_LOCK;
4208         temp &= ~FDI_RX_BIT_LOCK;
4209         I915_WRITE(reg, temp);
4210
4211         POSTING_READ(reg);
4212         udelay(150);
4213
4214         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4215                       I915_READ(FDI_RX_IIR(pipe)));
4216
4217         /* Try each vswing and preemphasis setting twice before moving on */
4218         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4219                 /* disable first in case we need to retry */
4220                 reg = FDI_TX_CTL(pipe);
4221                 temp = I915_READ(reg);
4222                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4223                 temp &= ~FDI_TX_ENABLE;
4224                 I915_WRITE(reg, temp);
4225
4226                 reg = FDI_RX_CTL(pipe);
4227                 temp = I915_READ(reg);
4228                 temp &= ~FDI_LINK_TRAIN_AUTO;
4229                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4230                 temp &= ~FDI_RX_ENABLE;
4231                 I915_WRITE(reg, temp);
4232
4233                 /* enable CPU FDI TX and PCH FDI RX */
4234                 reg = FDI_TX_CTL(pipe);
4235                 temp = I915_READ(reg);
4236                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4237                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4238                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4239                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4240                 temp |= snb_b_fdi_train_param[j/2];
4241                 temp |= FDI_COMPOSITE_SYNC;
4242                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4243
4244                 I915_WRITE(FDI_RX_MISC(pipe),
4245                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4246
4247                 reg = FDI_RX_CTL(pipe);
4248                 temp = I915_READ(reg);
4249                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4250                 temp |= FDI_COMPOSITE_SYNC;
4251                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4252
4253                 POSTING_READ(reg);
4254                 udelay(1); /* should be 0.5us */
4255
4256                 for (i = 0; i < 4; i++) {
4257                         reg = FDI_RX_IIR(pipe);
4258                         temp = I915_READ(reg);
4259                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4260
4261                         if (temp & FDI_RX_BIT_LOCK ||
4262                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4263                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4264                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4265                                               i);
4266                                 break;
4267                         }
4268                         udelay(1); /* should be 0.5us */
4269                 }
4270                 if (i == 4) {
4271                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4272                         continue;
4273                 }
4274
4275                 /* Train 2 */
4276                 reg = FDI_TX_CTL(pipe);
4277                 temp = I915_READ(reg);
4278                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4279                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4280                 I915_WRITE(reg, temp);
4281
4282                 reg = FDI_RX_CTL(pipe);
4283                 temp = I915_READ(reg);
4284                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4285                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4286                 I915_WRITE(reg, temp);
4287
4288                 POSTING_READ(reg);
4289                 udelay(2); /* should be 1.5us */
4290
4291                 for (i = 0; i < 4; i++) {
4292                         reg = FDI_RX_IIR(pipe);
4293                         temp = I915_READ(reg);
4294                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4295
4296                         if (temp & FDI_RX_SYMBOL_LOCK ||
4297                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4298                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4299                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4300                                               i);
4301                                 goto train_done;
4302                         }
4303                         udelay(2); /* should be 1.5us */
4304                 }
4305                 if (i == 4)
4306                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4307         }
4308
4309 train_done:
4310         DRM_DEBUG_KMS("FDI train done.\n");
4311 }
4312
4313 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4314 {
4315         struct drm_device *dev = intel_crtc->base.dev;
4316         struct drm_i915_private *dev_priv = to_i915(dev);
4317         int pipe = intel_crtc->pipe;
4318         i915_reg_t reg;
4319         u32 temp;
4320
4321         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4322         reg = FDI_RX_CTL(pipe);
4323         temp = I915_READ(reg);
4324         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4325         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4326         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4327         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4328
4329         POSTING_READ(reg);
4330         udelay(200);
4331
4332         /* Switch from Rawclk to PCDclk */
4333         temp = I915_READ(reg);
4334         I915_WRITE(reg, temp | FDI_PCDCLK);
4335
4336         POSTING_READ(reg);
4337         udelay(200);
4338
4339         /* Enable CPU FDI TX PLL, always on for Ironlake */
4340         reg = FDI_TX_CTL(pipe);
4341         temp = I915_READ(reg);
4342         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4343                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4344
4345                 POSTING_READ(reg);
4346                 udelay(100);
4347         }
4348 }
4349
4350 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4351 {
4352         struct drm_device *dev = intel_crtc->base.dev;
4353         struct drm_i915_private *dev_priv = to_i915(dev);
4354         int pipe = intel_crtc->pipe;
4355         i915_reg_t reg;
4356         u32 temp;
4357
4358         /* Switch from PCDclk to Rawclk */
4359         reg = FDI_RX_CTL(pipe);
4360         temp = I915_READ(reg);
4361         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4362
4363         /* Disable CPU FDI TX PLL */
4364         reg = FDI_TX_CTL(pipe);
4365         temp = I915_READ(reg);
4366         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4367
4368         POSTING_READ(reg);
4369         udelay(100);
4370
4371         reg = FDI_RX_CTL(pipe);
4372         temp = I915_READ(reg);
4373         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4374
4375         /* Wait for the clocks to turn off. */
4376         POSTING_READ(reg);
4377         udelay(100);
4378 }
4379
4380 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4381 {
4382         struct drm_device *dev = crtc->dev;
4383         struct drm_i915_private *dev_priv = to_i915(dev);
4384         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4385         int pipe = intel_crtc->pipe;
4386         i915_reg_t reg;
4387         u32 temp;
4388
4389         /* disable CPU FDI tx and PCH FDI rx */
4390         reg = FDI_TX_CTL(pipe);
4391         temp = I915_READ(reg);
4392         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4393         POSTING_READ(reg);
4394
4395         reg = FDI_RX_CTL(pipe);
4396         temp = I915_READ(reg);
4397         temp &= ~(0x7 << 16);
4398         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4399         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4400
4401         POSTING_READ(reg);
4402         udelay(100);
4403
4404         /* Ironlake workaround, disable clock pointer after downing FDI */
4405         if (HAS_PCH_IBX(dev_priv))
4406                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4407
4408         /* still set train pattern 1 */
4409         reg = FDI_TX_CTL(pipe);
4410         temp = I915_READ(reg);
4411         temp &= ~FDI_LINK_TRAIN_NONE;
4412         temp |= FDI_LINK_TRAIN_PATTERN_1;
4413         I915_WRITE(reg, temp);
4414
4415         reg = FDI_RX_CTL(pipe);
4416         temp = I915_READ(reg);
4417         if (HAS_PCH_CPT(dev_priv)) {
4418                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4419                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4420         } else {
4421                 temp &= ~FDI_LINK_TRAIN_NONE;
4422                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4423         }
4424         /* BPC in FDI rx is consistent with that in PIPECONF */
4425         temp &= ~(0x07 << 16);
4426         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4427         I915_WRITE(reg, temp);
4428
4429         POSTING_READ(reg);
4430         udelay(100);
4431 }
4432
4433 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4434 {
4435         struct drm_crtc *crtc;
4436         bool cleanup_done;
4437
4438         drm_for_each_crtc(crtc, &dev_priv->drm) {
4439                 struct drm_crtc_commit *commit;
4440                 spin_lock(&crtc->commit_lock);
4441                 commit = list_first_entry_or_null(&crtc->commit_list,
4442                                                   struct drm_crtc_commit, commit_entry);
4443                 cleanup_done = commit ?
4444                         try_wait_for_completion(&commit->cleanup_done) : true;
4445                 spin_unlock(&crtc->commit_lock);
4446
4447                 if (cleanup_done)
4448                         continue;
4449
4450                 drm_crtc_wait_one_vblank(crtc);
4451
4452                 return true;
4453         }
4454
4455         return false;
4456 }
4457
4458 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4459 {
4460         u32 temp;
4461
4462         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4463
4464         mutex_lock(&dev_priv->sb_lock);
4465
4466         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4467         temp |= SBI_SSCCTL_DISABLE;
4468         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4469
4470         mutex_unlock(&dev_priv->sb_lock);
4471 }
4472
4473 /* Program iCLKIP clock to the desired frequency */
4474 static void lpt_program_iclkip(struct intel_crtc *crtc)
4475 {
4476         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4477         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4478         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4479         u32 temp;
4480
4481         lpt_disable_iclkip(dev_priv);
4482
4483         /* The iCLK virtual clock root frequency is in MHz,
4484          * but the adjusted_mode->crtc_clock in in KHz. To get the
4485          * divisors, it is necessary to divide one by another, so we
4486          * convert the virtual clock precision to KHz here for higher
4487          * precision.
4488          */
4489         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4490                 u32 iclk_virtual_root_freq = 172800 * 1000;
4491                 u32 iclk_pi_range = 64;
4492                 u32 desired_divisor;
4493
4494                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4495                                                     clock << auxdiv);
4496                 divsel = (desired_divisor / iclk_pi_range) - 2;
4497                 phaseinc = desired_divisor % iclk_pi_range;
4498
4499                 /*
4500                  * Near 20MHz is a corner case which is
4501                  * out of range for the 7-bit divisor
4502                  */
4503                 if (divsel <= 0x7f)
4504                         break;
4505         }
4506
4507         /* This should not happen with any sane values */
4508         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4509                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4510         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4511                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4512
4513         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4514                         clock,
4515                         auxdiv,
4516                         divsel,
4517                         phasedir,
4518                         phaseinc);
4519
4520         mutex_lock(&dev_priv->sb_lock);
4521
4522         /* Program SSCDIVINTPHASE6 */
4523         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4524         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4525         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4526         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4527         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4528         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4529         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4530         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4531
4532         /* Program SSCAUXDIV */
4533         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4534         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4535         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4536         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4537
4538         /* Enable modulator and associated divider */
4539         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4540         temp &= ~SBI_SSCCTL_DISABLE;
4541         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4542
4543         mutex_unlock(&dev_priv->sb_lock);
4544
4545         /* Wait for initialization time */
4546         udelay(24);
4547
4548         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4549 }
4550
4551 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4552 {
4553         u32 divsel, phaseinc, auxdiv;
4554         u32 iclk_virtual_root_freq = 172800 * 1000;
4555         u32 iclk_pi_range = 64;
4556         u32 desired_divisor;
4557         u32 temp;
4558
4559         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4560                 return 0;
4561
4562         mutex_lock(&dev_priv->sb_lock);
4563
4564         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4565         if (temp & SBI_SSCCTL_DISABLE) {
4566                 mutex_unlock(&dev_priv->sb_lock);
4567                 return 0;
4568         }
4569
4570         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4571         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4572                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4573         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4574                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4575
4576         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4577         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4578                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4579
4580         mutex_unlock(&dev_priv->sb_lock);
4581
4582         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4583
4584         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4585                                  desired_divisor << auxdiv);
4586 }
4587
4588 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4589                                                 enum pipe pch_transcoder)
4590 {
4591         struct drm_device *dev = crtc->base.dev;
4592         struct drm_i915_private *dev_priv = to_i915(dev);
4593         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4594
4595         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4596                    I915_READ(HTOTAL(cpu_transcoder)));
4597         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4598                    I915_READ(HBLANK(cpu_transcoder)));
4599         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4600                    I915_READ(HSYNC(cpu_transcoder)));
4601
4602         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4603                    I915_READ(VTOTAL(cpu_transcoder)));
4604         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4605                    I915_READ(VBLANK(cpu_transcoder)));
4606         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4607                    I915_READ(VSYNC(cpu_transcoder)));
4608         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4609                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4610 }
4611
4612 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4613 {
4614         struct drm_i915_private *dev_priv = to_i915(dev);
4615         uint32_t temp;
4616
4617         temp = I915_READ(SOUTH_CHICKEN1);
4618         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4619                 return;
4620
4621         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4622         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4623
4624         temp &= ~FDI_BC_BIFURCATION_SELECT;
4625         if (enable)
4626                 temp |= FDI_BC_BIFURCATION_SELECT;
4627
4628         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4629         I915_WRITE(SOUTH_CHICKEN1, temp);
4630         POSTING_READ(SOUTH_CHICKEN1);
4631 }
4632
4633 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4634 {
4635         struct drm_device *dev = intel_crtc->base.dev;
4636
4637         switch (intel_crtc->pipe) {
4638         case PIPE_A:
4639                 break;
4640         case PIPE_B:
4641                 if (intel_crtc->config->fdi_lanes > 2)
4642                         cpt_set_fdi_bc_bifurcation(dev, false);
4643                 else
4644                         cpt_set_fdi_bc_bifurcation(dev, true);
4645
4646                 break;
4647         case PIPE_C:
4648                 cpt_set_fdi_bc_bifurcation(dev, true);
4649
4650                 break;
4651         default:
4652                 BUG();
4653         }
4654 }
4655
4656 /*
4657  * Finds the encoder associated with the given CRTC. This can only be
4658  * used when we know that the CRTC isn't feeding multiple encoders!
4659  */
4660 static struct intel_encoder *
4661 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4662                            const struct intel_crtc_state *crtc_state)
4663 {
4664         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4665         const struct drm_connector_state *connector_state;
4666         const struct drm_connector *connector;
4667         struct intel_encoder *encoder = NULL;
4668         int num_encoders = 0;
4669         int i;
4670
4671         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4672                 if (connector_state->crtc != &crtc->base)
4673                         continue;
4674
4675                 encoder = to_intel_encoder(connector_state->best_encoder);
4676                 num_encoders++;
4677         }
4678
4679         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4680              num_encoders, pipe_name(crtc->pipe));
4681
4682         return encoder;
4683 }
4684
4685 /*
4686  * Enable PCH resources required for PCH ports:
4687  *   - PCH PLLs
4688  *   - FDI training & RX/TX
4689  *   - update transcoder timings
4690  *   - DP transcoding bits
4691  *   - transcoder
4692  */
4693 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4694                                 const struct intel_crtc_state *crtc_state)
4695 {
4696         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4697         struct drm_device *dev = crtc->base.dev;
4698         struct drm_i915_private *dev_priv = to_i915(dev);
4699         int pipe = crtc->pipe;
4700         u32 temp;
4701
4702         assert_pch_transcoder_disabled(dev_priv, pipe);
4703
4704         if (IS_IVYBRIDGE(dev_priv))
4705                 ivybridge_update_fdi_bc_bifurcation(crtc);
4706
4707         /* Write the TU size bits before fdi link training, so that error
4708          * detection works. */
4709         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4710                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4711
4712         /* For PCH output, training FDI link */
4713         dev_priv->display.fdi_link_train(crtc, crtc_state);
4714
4715         /* We need to program the right clock selection before writing the pixel
4716          * mutliplier into the DPLL. */
4717         if (HAS_PCH_CPT(dev_priv)) {
4718                 u32 sel;
4719
4720                 temp = I915_READ(PCH_DPLL_SEL);
4721                 temp |= TRANS_DPLL_ENABLE(pipe);
4722                 sel = TRANS_DPLLB_SEL(pipe);
4723                 if (crtc_state->shared_dpll ==
4724                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4725                         temp |= sel;
4726                 else
4727                         temp &= ~sel;
4728                 I915_WRITE(PCH_DPLL_SEL, temp);
4729         }
4730
4731         /* XXX: pch pll's can be enabled any time before we enable the PCH
4732          * transcoder, and we actually should do this to not upset any PCH
4733          * transcoder that already use the clock when we share it.
4734          *
4735          * Note that enable_shared_dpll tries to do the right thing, but
4736          * get_shared_dpll unconditionally resets the pll - we need that to have
4737          * the right LVDS enable sequence. */
4738         intel_enable_shared_dpll(crtc);
4739
4740         /* set transcoder timing, panel must allow it */
4741         assert_panel_unlocked(dev_priv, pipe);
4742         ironlake_pch_transcoder_set_timings(crtc, pipe);
4743
4744         intel_fdi_normal_train(crtc);
4745
4746         /* For PCH DP, enable TRANS_DP_CTL */
4747         if (HAS_PCH_CPT(dev_priv) &&
4748             intel_crtc_has_dp_encoder(crtc_state)) {
4749                 const struct drm_display_mode *adjusted_mode =
4750                         &crtc_state->base.adjusted_mode;
4751                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4752                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4753                 enum port port;
4754
4755                 temp = I915_READ(reg);
4756                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4757                           TRANS_DP_SYNC_MASK |
4758                           TRANS_DP_BPC_MASK);
4759                 temp |= TRANS_DP_OUTPUT_ENABLE;
4760                 temp |= bpc << 9; /* same format but at 11:9 */
4761
4762                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4763                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4764                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4765                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4766
4767                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4768                 WARN_ON(port < PORT_B || port > PORT_D);
4769                 temp |= TRANS_DP_PORT_SEL(port);
4770
4771                 I915_WRITE(reg, temp);
4772         }
4773
4774         ironlake_enable_pch_transcoder(dev_priv, pipe);
4775 }
4776
4777 static void lpt_pch_enable(const struct intel_atomic_state *state,
4778                            const struct intel_crtc_state *crtc_state)
4779 {
4780         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4781         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4782         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4783
4784         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4785
4786         lpt_program_iclkip(crtc);
4787
4788         /* Set transcoder timing. */
4789         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4790
4791         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4792 }
4793
4794 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4795 {
4796         struct drm_i915_private *dev_priv = to_i915(dev);
4797         i915_reg_t dslreg = PIPEDSL(pipe);
4798         u32 temp;
4799
4800         temp = I915_READ(dslreg);
4801         udelay(500);
4802         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4803                 if (wait_for(I915_READ(dslreg) != temp, 5))
4804                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4805         }
4806 }
4807
4808 /*
4809  * The hardware phase 0.0 refers to the center of the pixel.
4810  * We want to start from the top/left edge which is phase
4811  * -0.5. That matches how the hardware calculates the scaling
4812  * factors (from top-left of the first pixel to bottom-right
4813  * of the last pixel, as opposed to the pixel centers).
4814  *
4815  * For 4:2:0 subsampled chroma planes we obviously have to
4816  * adjust that so that the chroma sample position lands in
4817  * the right spot.
4818  *
4819  * Note that for packed YCbCr 4:2:2 formats there is no way to
4820  * control chroma siting. The hardware simply replicates the
4821  * chroma samples for both of the luma samples, and thus we don't
4822  * actually get the expected MPEG2 chroma siting convention :(
4823  * The same behaviour is observed on pre-SKL platforms as well.
4824  */
4825 u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4826 {
4827         int phase = -0x8000;
4828         u16 trip = 0;
4829
4830         if (chroma_cosited)
4831                 phase += (sub - 1) * 0x8000 / sub;
4832
4833         if (phase < 0)
4834                 phase = 0x10000 + phase;
4835         else
4836                 trip = PS_PHASE_TRIP;
4837
4838         return ((phase >> 2) & PS_PHASE_MASK) | trip;
4839 }
4840
4841 static int
4842 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4843                   unsigned int scaler_user, int *scaler_id,
4844                   int src_w, int src_h, int dst_w, int dst_h,
4845                   bool plane_scaler_check,
4846                   uint32_t pixel_format)
4847 {
4848         struct intel_crtc_scaler_state *scaler_state =
4849                 &crtc_state->scaler_state;
4850         struct intel_crtc *intel_crtc =
4851                 to_intel_crtc(crtc_state->base.crtc);
4852         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4853         const struct drm_display_mode *adjusted_mode =
4854                 &crtc_state->base.adjusted_mode;
4855         int need_scaling;
4856
4857         /*
4858          * Src coordinates are already rotated by 270 degrees for
4859          * the 90/270 degree plane rotation cases (to match the
4860          * GTT mapping), hence no need to account for rotation here.
4861          */
4862         need_scaling = src_w != dst_w || src_h != dst_h;
4863
4864         if (plane_scaler_check)
4865                 if (pixel_format == DRM_FORMAT_NV12)
4866                         need_scaling = true;
4867
4868         if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4869                 need_scaling = true;
4870
4871         /*
4872          * Scaling/fitting not supported in IF-ID mode in GEN9+
4873          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4874          * Once NV12 is enabled, handle it here while allocating scaler
4875          * for NV12.
4876          */
4877         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4878             need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4879                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4880                 return -EINVAL;
4881         }
4882
4883         /*
4884          * if plane is being disabled or scaler is no more required or force detach
4885          *  - free scaler binded to this plane/crtc
4886          *  - in order to do this, update crtc->scaler_usage
4887          *
4888          * Here scaler state in crtc_state is set free so that
4889          * scaler can be assigned to other user. Actual register
4890          * update to free the scaler is done in plane/panel-fit programming.
4891          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4892          */
4893         if (force_detach || !need_scaling) {
4894                 if (*scaler_id >= 0) {
4895                         scaler_state->scaler_users &= ~(1 << scaler_user);
4896                         scaler_state->scalers[*scaler_id].in_use = 0;
4897
4898                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4899                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4900                                 intel_crtc->pipe, scaler_user, *scaler_id,
4901                                 scaler_state->scaler_users);
4902                         *scaler_id = -1;
4903                 }
4904                 return 0;
4905         }
4906
4907         if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
4908             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4909                 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4910                 return -EINVAL;
4911         }
4912
4913         /* range checks */
4914         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4915             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4916             (IS_GEN11(dev_priv) &&
4917              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4918               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4919             (!IS_GEN11(dev_priv) &&
4920              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4921               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4922                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4923                         "size is out of scaler range\n",
4924                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4925                 return -EINVAL;
4926         }
4927
4928         /* mark this plane as a scaler user in crtc_state */
4929         scaler_state->scaler_users |= (1 << scaler_user);
4930         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4931                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4932                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4933                 scaler_state->scaler_users);
4934
4935         return 0;
4936 }
4937
4938 /**
4939  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4940  *
4941  * @state: crtc's scaler state
4942  *
4943  * Return
4944  *     0 - scaler_usage updated successfully
4945  *    error - requested scaling cannot be supported or other error condition
4946  */
4947 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4948 {
4949         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4950
4951         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4952                                  &state->scaler_state.scaler_id,
4953                                  state->pipe_src_w, state->pipe_src_h,
4954                                  adjusted_mode->crtc_hdisplay,
4955                                  adjusted_mode->crtc_vdisplay, false, 0);
4956 }
4957
4958 /**
4959  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4960  * @crtc_state: crtc's scaler state
4961  * @plane_state: atomic plane state to update
4962  *
4963  * Return
4964  *     0 - scaler_usage updated successfully
4965  *    error - requested scaling cannot be supported or other error condition
4966  */
4967 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4968                                    struct intel_plane_state *plane_state)
4969 {
4970
4971         struct intel_plane *intel_plane =
4972                 to_intel_plane(plane_state->base.plane);
4973         struct drm_framebuffer *fb = plane_state->base.fb;
4974         int ret;
4975
4976         bool force_detach = !fb || !plane_state->base.visible;
4977
4978         ret = skl_update_scaler(crtc_state, force_detach,
4979                                 drm_plane_index(&intel_plane->base),
4980                                 &plane_state->scaler_id,
4981                                 drm_rect_width(&plane_state->base.src) >> 16,
4982                                 drm_rect_height(&plane_state->base.src) >> 16,
4983                                 drm_rect_width(&plane_state->base.dst),
4984                                 drm_rect_height(&plane_state->base.dst),
4985                                 fb ? true : false, fb ? fb->format->format : 0);
4986
4987         if (ret || plane_state->scaler_id < 0)
4988                 return ret;
4989
4990         /* check colorkey */
4991         if (plane_state->ckey.flags) {
4992                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4993                               intel_plane->base.base.id,
4994                               intel_plane->base.name);
4995                 return -EINVAL;
4996         }
4997
4998         /* Check src format */
4999         switch (fb->format->format) {
5000         case DRM_FORMAT_RGB565:
5001         case DRM_FORMAT_XBGR8888:
5002         case DRM_FORMAT_XRGB8888:
5003         case DRM_FORMAT_ABGR8888:
5004         case DRM_FORMAT_ARGB8888:
5005         case DRM_FORMAT_XRGB2101010:
5006         case DRM_FORMAT_XBGR2101010:
5007         case DRM_FORMAT_YUYV:
5008         case DRM_FORMAT_YVYU:
5009         case DRM_FORMAT_UYVY:
5010         case DRM_FORMAT_VYUY:
5011         case DRM_FORMAT_NV12:
5012                 break;
5013         default:
5014                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5015                               intel_plane->base.base.id, intel_plane->base.name,
5016                               fb->base.id, fb->format->format);
5017                 return -EINVAL;
5018         }
5019
5020         return 0;
5021 }
5022
5023 static void skylake_scaler_disable(struct intel_crtc *crtc)
5024 {
5025         int i;
5026
5027         for (i = 0; i < crtc->num_scalers; i++)
5028                 skl_detach_scaler(crtc, i);
5029 }
5030
5031 static void skylake_pfit_enable(struct intel_crtc *crtc)
5032 {
5033         struct drm_device *dev = crtc->base.dev;
5034         struct drm_i915_private *dev_priv = to_i915(dev);
5035         int pipe = crtc->pipe;
5036         struct intel_crtc_scaler_state *scaler_state =
5037                 &crtc->config->scaler_state;
5038
5039         if (crtc->config->pch_pfit.enabled) {
5040                 u16 uv_rgb_hphase, uv_rgb_vphase;
5041                 int id;
5042
5043                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
5044                         return;
5045
5046                 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
5047                 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
5048
5049                 id = scaler_state->scaler_id;
5050                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5051                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5052                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5053                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5054                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5055                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5056                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
5057                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
5058         }
5059 }
5060
5061 static void ironlake_pfit_enable(struct intel_crtc *crtc)
5062 {
5063         struct drm_device *dev = crtc->base.dev;
5064         struct drm_i915_private *dev_priv = to_i915(dev);
5065         int pipe = crtc->pipe;
5066
5067         if (crtc->config->pch_pfit.enabled) {
5068                 /* Force use of hard-coded filter coefficients
5069                  * as some pre-programmed values are broken,
5070                  * e.g. x201.
5071                  */
5072                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5073                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5074                                                  PF_PIPE_SEL_IVB(pipe));
5075                 else
5076                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5077                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
5078                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
5079         }
5080 }
5081
5082 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5083 {
5084         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5085         struct drm_device *dev = crtc->base.dev;
5086         struct drm_i915_private *dev_priv = to_i915(dev);
5087
5088         if (!crtc_state->ips_enabled)
5089                 return;
5090
5091         /*
5092          * We can only enable IPS after we enable a plane and wait for a vblank
5093          * This function is called from post_plane_update, which is run after
5094          * a vblank wait.
5095          */
5096         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5097
5098         if (IS_BROADWELL(dev_priv)) {
5099                 mutex_lock(&dev_priv->pcu_lock);
5100                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5101                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5102                 mutex_unlock(&dev_priv->pcu_lock);
5103                 /* Quoting Art Runyan: "its not safe to expect any particular
5104                  * value in IPS_CTL bit 31 after enabling IPS through the
5105                  * mailbox." Moreover, the mailbox may return a bogus state,
5106                  * so we need to just enable it and continue on.
5107                  */
5108         } else {
5109                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5110                 /* The bit only becomes 1 in the next vblank, so this wait here
5111                  * is essentially intel_wait_for_vblank. If we don't have this
5112                  * and don't wait for vblanks until the end of crtc_enable, then
5113                  * the HW state readout code will complain that the expected
5114                  * IPS_CTL value is not the one we read. */
5115                 if (intel_wait_for_register(dev_priv,
5116                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5117                                             50))
5118                         DRM_ERROR("Timed out waiting for IPS enable\n");
5119         }
5120 }
5121
5122 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5123 {
5124         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5125         struct drm_device *dev = crtc->base.dev;
5126         struct drm_i915_private *dev_priv = to_i915(dev);
5127
5128         if (!crtc_state->ips_enabled)
5129                 return;
5130
5131         if (IS_BROADWELL(dev_priv)) {
5132                 mutex_lock(&dev_priv->pcu_lock);
5133                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5134                 mutex_unlock(&dev_priv->pcu_lock);
5135                 /*
5136                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5137                  * 42ms timeout value leads to occasional timeouts so use 100ms
5138                  * instead.
5139                  */
5140                 if (intel_wait_for_register(dev_priv,
5141                                             IPS_CTL, IPS_ENABLE, 0,
5142                                             100))
5143                         DRM_ERROR("Timed out waiting for IPS disable\n");
5144         } else {
5145                 I915_WRITE(IPS_CTL, 0);
5146                 POSTING_READ(IPS_CTL);
5147         }
5148
5149         /* We need to wait for a vblank before we can disable the plane. */
5150         intel_wait_for_vblank(dev_priv, crtc->pipe);
5151 }
5152
5153 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5154 {
5155         if (intel_crtc->overlay) {
5156                 struct drm_device *dev = intel_crtc->base.dev;
5157
5158                 mutex_lock(&dev->struct_mutex);
5159                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5160                 mutex_unlock(&dev->struct_mutex);
5161         }
5162
5163         /* Let userspace switch the overlay on again. In most cases userspace
5164          * has to recompute where to put it anyway.
5165          */
5166 }
5167
5168 /**
5169  * intel_post_enable_primary - Perform operations after enabling primary plane
5170  * @crtc: the CRTC whose primary plane was just enabled
5171  * @new_crtc_state: the enabling state
5172  *
5173  * Performs potentially sleeping operations that must be done after the primary
5174  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5175  * called due to an explicit primary plane update, or due to an implicit
5176  * re-enable that is caused when a sprite plane is updated to no longer
5177  * completely hide the primary plane.
5178  */
5179 static void
5180 intel_post_enable_primary(struct drm_crtc *crtc,
5181                           const struct intel_crtc_state *new_crtc_state)
5182 {
5183         struct drm_device *dev = crtc->dev;
5184         struct drm_i915_private *dev_priv = to_i915(dev);
5185         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5186         int pipe = intel_crtc->pipe;
5187
5188         /*
5189          * Gen2 reports pipe underruns whenever all planes are disabled.
5190          * So don't enable underrun reporting before at least some planes
5191          * are enabled.
5192          * FIXME: Need to fix the logic to work when we turn off all planes
5193          * but leave the pipe running.
5194          */
5195         if (IS_GEN2(dev_priv))
5196                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5197
5198         /* Underruns don't always raise interrupts, so check manually. */
5199         intel_check_cpu_fifo_underruns(dev_priv);
5200         intel_check_pch_fifo_underruns(dev_priv);
5201 }
5202
5203 /* FIXME get rid of this and use pre_plane_update */
5204 static void
5205 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5206 {
5207         struct drm_device *dev = crtc->dev;
5208         struct drm_i915_private *dev_priv = to_i915(dev);
5209         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5210         int pipe = intel_crtc->pipe;
5211
5212         /*
5213          * Gen2 reports pipe underruns whenever all planes are disabled.
5214          * So disable underrun reporting before all the planes get disabled.
5215          */
5216         if (IS_GEN2(dev_priv))
5217                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5218
5219         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5220
5221         /*
5222          * Vblank time updates from the shadow to live plane control register
5223          * are blocked if the memory self-refresh mode is active at that
5224          * moment. So to make sure the plane gets truly disabled, disable
5225          * first the self-refresh mode. The self-refresh enable bit in turn
5226          * will be checked/applied by the HW only at the next frame start
5227          * event which is after the vblank start event, so we need to have a
5228          * wait-for-vblank between disabling the plane and the pipe.
5229          */
5230         if (HAS_GMCH_DISPLAY(dev_priv) &&
5231             intel_set_memory_cxsr(dev_priv, false))
5232                 intel_wait_for_vblank(dev_priv, pipe);
5233 }
5234
5235 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5236                                        const struct intel_crtc_state *new_crtc_state)
5237 {
5238         if (!old_crtc_state->ips_enabled)
5239                 return false;
5240
5241         if (needs_modeset(&new_crtc_state->base))
5242                 return true;
5243
5244         return !new_crtc_state->ips_enabled;
5245 }
5246
5247 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5248                                        const struct intel_crtc_state *new_crtc_state)
5249 {
5250         if (!new_crtc_state->ips_enabled)
5251                 return false;
5252
5253         if (needs_modeset(&new_crtc_state->base))
5254                 return true;
5255
5256         /*
5257          * We can't read out IPS on broadwell, assume the worst and
5258          * forcibly enable IPS on the first fastset.
5259          */
5260         if (new_crtc_state->update_pipe &&
5261             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5262                 return true;
5263
5264         return !old_crtc_state->ips_enabled;
5265 }
5266
5267 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5268                           const struct intel_crtc_state *crtc_state)
5269 {
5270         if (!crtc_state->nv12_planes)
5271                 return false;
5272
5273         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5274                 return false;
5275
5276         if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5277             IS_CANNONLAKE(dev_priv))
5278                 return true;
5279
5280         return false;
5281 }
5282
5283 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5284 {
5285         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5286         struct drm_device *dev = crtc->base.dev;
5287         struct drm_i915_private *dev_priv = to_i915(dev);
5288         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5289         struct intel_crtc_state *pipe_config =
5290                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5291                                                 crtc);
5292         struct drm_plane *primary = crtc->base.primary;
5293         struct drm_plane_state *old_primary_state =
5294                 drm_atomic_get_old_plane_state(old_state, primary);
5295
5296         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5297
5298         if (pipe_config->update_wm_post && pipe_config->base.active)
5299                 intel_update_watermarks(crtc);
5300
5301         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5302                 hsw_enable_ips(pipe_config);
5303
5304         if (old_primary_state) {
5305                 struct drm_plane_state *new_primary_state =
5306                         drm_atomic_get_new_plane_state(old_state, primary);
5307
5308                 intel_fbc_post_update(crtc);
5309
5310                 if (new_primary_state->visible &&
5311                     (needs_modeset(&pipe_config->base) ||
5312                      !old_primary_state->visible))
5313                         intel_post_enable_primary(&crtc->base, pipe_config);
5314         }
5315
5316         /* Display WA 827 */
5317         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5318             !needs_nv12_wa(dev_priv, pipe_config)) {
5319                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5320                 skl_wa_528(dev_priv, crtc->pipe, false);
5321         }
5322 }
5323
5324 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5325                                    struct intel_crtc_state *pipe_config)
5326 {
5327         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5328         struct drm_device *dev = crtc->base.dev;
5329         struct drm_i915_private *dev_priv = to_i915(dev);
5330         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5331         struct drm_plane *primary = crtc->base.primary;
5332         struct drm_plane_state *old_primary_state =
5333                 drm_atomic_get_old_plane_state(old_state, primary);
5334         bool modeset = needs_modeset(&pipe_config->base);
5335         struct intel_atomic_state *old_intel_state =
5336                 to_intel_atomic_state(old_state);
5337
5338         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5339                 hsw_disable_ips(old_crtc_state);
5340
5341         if (old_primary_state) {
5342                 struct intel_plane_state *new_primary_state =
5343                         intel_atomic_get_new_plane_state(old_intel_state,
5344                                                          to_intel_plane(primary));
5345
5346                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5347                 /*
5348                  * Gen2 reports pipe underruns whenever all planes are disabled.
5349                  * So disable underrun reporting before all the planes get disabled.
5350                  */
5351                 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5352                     (modeset || !new_primary_state->base.visible))
5353                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5354         }
5355
5356         /* Display WA 827 */
5357         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5358             needs_nv12_wa(dev_priv, pipe_config)) {
5359                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5360                 skl_wa_528(dev_priv, crtc->pipe, true);
5361         }
5362
5363         /*
5364          * Vblank time updates from the shadow to live plane control register
5365          * are blocked if the memory self-refresh mode is active at that
5366          * moment. So to make sure the plane gets truly disabled, disable
5367          * first the self-refresh mode. The self-refresh enable bit in turn
5368          * will be checked/applied by the HW only at the next frame start
5369          * event which is after the vblank start event, so we need to have a
5370          * wait-for-vblank between disabling the plane and the pipe.
5371          */
5372         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5373             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5374                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5375
5376         /*
5377          * IVB workaround: must disable low power watermarks for at least
5378          * one frame before enabling scaling.  LP watermarks can be re-enabled
5379          * when scaling is disabled.
5380          *
5381          * WaCxSRDisabledForSpriteScaling:ivb
5382          */
5383         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5384                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5385
5386         /*
5387          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5388          * watermark programming here.
5389          */
5390         if (needs_modeset(&pipe_config->base))
5391                 return;
5392
5393         /*
5394          * For platforms that support atomic watermarks, program the
5395          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5396          * will be the intermediate values that are safe for both pre- and
5397          * post- vblank; when vblank happens, the 'active' values will be set
5398          * to the final 'target' values and we'll do this again to get the
5399          * optimal watermarks.  For gen9+ platforms, the values we program here
5400          * will be the final target values which will get automatically latched
5401          * at vblank time; no further programming will be necessary.
5402          *
5403          * If a platform hasn't been transitioned to atomic watermarks yet,
5404          * we'll continue to update watermarks the old way, if flags tell
5405          * us to.
5406          */
5407         if (dev_priv->display.initial_watermarks != NULL)
5408                 dev_priv->display.initial_watermarks(old_intel_state,
5409                                                      pipe_config);
5410         else if (pipe_config->update_wm_pre)
5411                 intel_update_watermarks(crtc);
5412 }
5413
5414 static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
5415 {
5416         struct drm_device *dev = crtc->base.dev;
5417         struct intel_plane *plane;
5418         unsigned fb_bits = 0;
5419
5420         intel_crtc_dpms_overlay_disable(crtc);
5421
5422         for_each_intel_plane_on_crtc(dev, crtc, plane) {
5423                 if (plane_mask & BIT(plane->id)) {
5424                         plane->disable_plane(plane, crtc);
5425
5426                         fb_bits |= plane->frontbuffer_bit;
5427                 }
5428         }
5429
5430         intel_frontbuffer_flip(to_i915(dev), fb_bits);
5431 }
5432
5433 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5434                                           struct intel_crtc_state *crtc_state,
5435                                           struct drm_atomic_state *old_state)
5436 {
5437         struct drm_connector_state *conn_state;
5438         struct drm_connector *conn;
5439         int i;
5440
5441         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5442                 struct intel_encoder *encoder =
5443                         to_intel_encoder(conn_state->best_encoder);
5444
5445                 if (conn_state->crtc != crtc)
5446                         continue;
5447
5448                 if (encoder->pre_pll_enable)
5449                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5450         }
5451 }
5452
5453 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5454                                       struct intel_crtc_state *crtc_state,
5455                                       struct drm_atomic_state *old_state)
5456 {
5457         struct drm_connector_state *conn_state;
5458         struct drm_connector *conn;
5459         int i;
5460
5461         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5462                 struct intel_encoder *encoder =
5463                         to_intel_encoder(conn_state->best_encoder);
5464
5465                 if (conn_state->crtc != crtc)
5466                         continue;
5467
5468                 if (encoder->pre_enable)
5469                         encoder->pre_enable(encoder, crtc_state, conn_state);
5470         }
5471 }
5472
5473 static void intel_encoders_enable(struct drm_crtc *crtc,
5474                                   struct intel_crtc_state *crtc_state,
5475                                   struct drm_atomic_state *old_state)
5476 {
5477         struct drm_connector_state *conn_state;
5478         struct drm_connector *conn;
5479         int i;
5480
5481         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5482                 struct intel_encoder *encoder =
5483                         to_intel_encoder(conn_state->best_encoder);
5484
5485                 if (conn_state->crtc != crtc)
5486                         continue;
5487
5488                 encoder->enable(encoder, crtc_state, conn_state);
5489                 intel_opregion_notify_encoder(encoder, true);
5490         }
5491 }
5492
5493 static void intel_encoders_disable(struct drm_crtc *crtc,
5494                                    struct intel_crtc_state *old_crtc_state,
5495                                    struct drm_atomic_state *old_state)
5496 {
5497         struct drm_connector_state *old_conn_state;
5498         struct drm_connector *conn;
5499         int i;
5500
5501         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5502                 struct intel_encoder *encoder =
5503                         to_intel_encoder(old_conn_state->best_encoder);
5504
5505                 if (old_conn_state->crtc != crtc)
5506                         continue;
5507
5508                 intel_opregion_notify_encoder(encoder, false);
5509                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5510         }
5511 }
5512
5513 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5514                                         struct intel_crtc_state *old_crtc_state,
5515                                         struct drm_atomic_state *old_state)
5516 {
5517         struct drm_connector_state *old_conn_state;
5518         struct drm_connector *conn;
5519         int i;
5520
5521         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5522                 struct intel_encoder *encoder =
5523                         to_intel_encoder(old_conn_state->best_encoder);
5524
5525                 if (old_conn_state->crtc != crtc)
5526                         continue;
5527
5528                 if (encoder->post_disable)
5529                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5530         }
5531 }
5532
5533 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5534                                             struct intel_crtc_state *old_crtc_state,
5535                                             struct drm_atomic_state *old_state)
5536 {
5537         struct drm_connector_state *old_conn_state;
5538         struct drm_connector *conn;
5539         int i;
5540
5541         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5542                 struct intel_encoder *encoder =
5543                         to_intel_encoder(old_conn_state->best_encoder);
5544
5545                 if (old_conn_state->crtc != crtc)
5546                         continue;
5547
5548                 if (encoder->post_pll_disable)
5549                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5550         }
5551 }
5552
5553 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5554                                  struct drm_atomic_state *old_state)
5555 {
5556         struct drm_crtc *crtc = pipe_config->base.crtc;
5557         struct drm_device *dev = crtc->dev;
5558         struct drm_i915_private *dev_priv = to_i915(dev);
5559         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5560         int pipe = intel_crtc->pipe;
5561         struct intel_atomic_state *old_intel_state =
5562                 to_intel_atomic_state(old_state);
5563
5564         if (WARN_ON(intel_crtc->active))
5565                 return;
5566
5567         /*
5568          * Sometimes spurious CPU pipe underruns happen during FDI
5569          * training, at least with VGA+HDMI cloning. Suppress them.
5570          *
5571          * On ILK we get an occasional spurious CPU pipe underruns
5572          * between eDP port A enable and vdd enable. Also PCH port
5573          * enable seems to result in the occasional CPU pipe underrun.
5574          *
5575          * Spurious PCH underruns also occur during PCH enabling.
5576          */
5577         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5578         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5579
5580         if (intel_crtc->config->has_pch_encoder)
5581                 intel_prepare_shared_dpll(intel_crtc);
5582
5583         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5584                 intel_dp_set_m_n(intel_crtc, M1_N1);
5585
5586         intel_set_pipe_timings(intel_crtc);
5587         intel_set_pipe_src_size(intel_crtc);
5588
5589         if (intel_crtc->config->has_pch_encoder) {
5590                 intel_cpu_transcoder_set_m_n(intel_crtc,
5591                                      &intel_crtc->config->fdi_m_n, NULL);
5592         }
5593
5594         ironlake_set_pipeconf(crtc);
5595
5596         intel_crtc->active = true;
5597
5598         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5599
5600         if (intel_crtc->config->has_pch_encoder) {
5601                 /* Note: FDI PLL enabling _must_ be done before we enable the
5602                  * cpu pipes, hence this is separate from all the other fdi/pch
5603                  * enabling. */
5604                 ironlake_fdi_pll_enable(intel_crtc);
5605         } else {
5606                 assert_fdi_tx_disabled(dev_priv, pipe);
5607                 assert_fdi_rx_disabled(dev_priv, pipe);
5608         }
5609
5610         ironlake_pfit_enable(intel_crtc);
5611
5612         /*
5613          * On ILK+ LUT must be loaded before the pipe is running but with
5614          * clocks enabled
5615          */
5616         intel_color_load_luts(&pipe_config->base);
5617
5618         if (dev_priv->display.initial_watermarks != NULL)
5619                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5620         intel_enable_pipe(pipe_config);
5621
5622         if (intel_crtc->config->has_pch_encoder)
5623                 ironlake_pch_enable(old_intel_state, pipe_config);
5624
5625         assert_vblank_disabled(crtc);
5626         drm_crtc_vblank_on(crtc);
5627
5628         intel_encoders_enable(crtc, pipe_config, old_state);
5629
5630         if (HAS_PCH_CPT(dev_priv))
5631                 cpt_verify_modeset(dev, intel_crtc->pipe);
5632
5633         /*
5634          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5635          * And a second vblank wait is needed at least on ILK with
5636          * some interlaced HDMI modes. Let's do the double wait always
5637          * in case there are more corner cases we don't know about.
5638          */
5639         if (intel_crtc->config->has_pch_encoder) {
5640                 intel_wait_for_vblank(dev_priv, pipe);
5641                 intel_wait_for_vblank(dev_priv, pipe);
5642         }
5643         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5644         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5645 }
5646
5647 /* IPS only exists on ULT machines and is tied to pipe A. */
5648 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5649 {
5650         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5651 }
5652
5653 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5654                                             enum pipe pipe, bool apply)
5655 {
5656         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5657         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5658
5659         if (apply)
5660                 val |= mask;
5661         else
5662                 val &= ~mask;
5663
5664         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5665 }
5666
5667 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5668 {
5669         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5670         enum pipe pipe = crtc->pipe;
5671         uint32_t val;
5672
5673         val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5674
5675         /* Program B credit equally to all pipes */
5676         val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5677
5678         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5679 }
5680
5681 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5682                                 struct drm_atomic_state *old_state)
5683 {
5684         struct drm_crtc *crtc = pipe_config->base.crtc;
5685         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5686         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5687         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5688         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5689         struct intel_atomic_state *old_intel_state =
5690                 to_intel_atomic_state(old_state);
5691         bool psl_clkgate_wa;
5692         u32 pipe_chicken;
5693
5694         if (WARN_ON(intel_crtc->active))
5695                 return;
5696
5697         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5698
5699         if (intel_crtc->config->shared_dpll)
5700                 intel_enable_shared_dpll(intel_crtc);
5701
5702         if (INTEL_GEN(dev_priv) >= 11)
5703                 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5704
5705         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5706
5707         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5708                 intel_dp_set_m_n(intel_crtc, M1_N1);
5709
5710         if (!transcoder_is_dsi(cpu_transcoder))
5711                 intel_set_pipe_timings(intel_crtc);
5712
5713         intel_set_pipe_src_size(intel_crtc);
5714
5715         if (cpu_transcoder != TRANSCODER_EDP &&
5716             !transcoder_is_dsi(cpu_transcoder)) {
5717                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5718                            intel_crtc->config->pixel_multiplier - 1);
5719         }
5720
5721         if (intel_crtc->config->has_pch_encoder) {
5722                 intel_cpu_transcoder_set_m_n(intel_crtc,
5723                                      &intel_crtc->config->fdi_m_n, NULL);
5724         }
5725
5726         if (!transcoder_is_dsi(cpu_transcoder))
5727                 haswell_set_pipeconf(crtc);
5728
5729         haswell_set_pipemisc(crtc);
5730
5731         intel_color_set_csc(&pipe_config->base);
5732
5733         intel_crtc->active = true;
5734
5735         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5736         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5737                          intel_crtc->config->pch_pfit.enabled;
5738         if (psl_clkgate_wa)
5739                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5740
5741         if (INTEL_GEN(dev_priv) >= 9)
5742                 skylake_pfit_enable(intel_crtc);
5743         else
5744                 ironlake_pfit_enable(intel_crtc);
5745
5746         /*
5747          * On ILK+ LUT must be loaded before the pipe is running but with
5748          * clocks enabled
5749          */
5750         intel_color_load_luts(&pipe_config->base);
5751
5752         /*
5753          * Display WA #1153: enable hardware to bypass the alpha math
5754          * and rounding for per-pixel values 00 and 0xff
5755          */
5756         if (INTEL_GEN(dev_priv) >= 11) {
5757                 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5758                 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5759                         I915_WRITE_FW(PIPE_CHICKEN(pipe),
5760                                       pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5761         }
5762
5763         intel_ddi_set_pipe_settings(pipe_config);
5764         if (!transcoder_is_dsi(cpu_transcoder))
5765                 intel_ddi_enable_transcoder_func(pipe_config);
5766
5767         if (dev_priv->display.initial_watermarks != NULL)
5768                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5769
5770         if (INTEL_GEN(dev_priv) >= 11)
5771                 icl_pipe_mbus_enable(intel_crtc);
5772
5773         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5774         if (!transcoder_is_dsi(cpu_transcoder))
5775                 intel_enable_pipe(pipe_config);
5776
5777         if (intel_crtc->config->has_pch_encoder)
5778                 lpt_pch_enable(old_intel_state, pipe_config);
5779
5780         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5781                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5782
5783         assert_vblank_disabled(crtc);
5784         drm_crtc_vblank_on(crtc);
5785
5786         intel_encoders_enable(crtc, pipe_config, old_state);
5787
5788         if (psl_clkgate_wa) {
5789                 intel_wait_for_vblank(dev_priv, pipe);
5790                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5791         }
5792
5793         /* If we change the relative order between pipe/planes enabling, we need
5794          * to change the workaround. */
5795         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5796         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5797                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5798                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5799         }
5800 }
5801
5802 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5803 {
5804         struct drm_device *dev = crtc->base.dev;
5805         struct drm_i915_private *dev_priv = to_i915(dev);
5806         int pipe = crtc->pipe;
5807
5808         /* To avoid upsetting the power well on haswell only disable the pfit if
5809          * it's in use. The hw state code will make sure we get this right. */
5810         if (force || crtc->config->pch_pfit.enabled) {
5811                 I915_WRITE(PF_CTL(pipe), 0);
5812                 I915_WRITE(PF_WIN_POS(pipe), 0);
5813                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5814         }
5815 }
5816
5817 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5818                                   struct drm_atomic_state *old_state)
5819 {
5820         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5821         struct drm_device *dev = crtc->dev;
5822         struct drm_i915_private *dev_priv = to_i915(dev);
5823         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5824         int pipe = intel_crtc->pipe;
5825
5826         /*
5827          * Sometimes spurious CPU pipe underruns happen when the
5828          * pipe is already disabled, but FDI RX/TX is still enabled.
5829          * Happens at least with VGA+HDMI cloning. Suppress them.
5830          */
5831         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5832         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5833
5834         intel_encoders_disable(crtc, old_crtc_state, old_state);
5835
5836         drm_crtc_vblank_off(crtc);
5837         assert_vblank_disabled(crtc);
5838
5839         intel_disable_pipe(old_crtc_state);
5840
5841         ironlake_pfit_disable(intel_crtc, false);
5842
5843         if (intel_crtc->config->has_pch_encoder)
5844                 ironlake_fdi_disable(crtc);
5845
5846         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5847
5848         if (intel_crtc->config->has_pch_encoder) {
5849                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5850
5851                 if (HAS_PCH_CPT(dev_priv)) {
5852                         i915_reg_t reg;
5853                         u32 temp;
5854
5855                         /* disable TRANS_DP_CTL */
5856                         reg = TRANS_DP_CTL(pipe);
5857                         temp = I915_READ(reg);
5858                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5859                                   TRANS_DP_PORT_SEL_MASK);
5860                         temp |= TRANS_DP_PORT_SEL_NONE;
5861                         I915_WRITE(reg, temp);
5862
5863                         /* disable DPLL_SEL */
5864                         temp = I915_READ(PCH_DPLL_SEL);
5865                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5866                         I915_WRITE(PCH_DPLL_SEL, temp);
5867                 }
5868
5869                 ironlake_fdi_pll_disable(intel_crtc);
5870         }
5871
5872         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5873         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5874 }
5875
5876 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5877                                  struct drm_atomic_state *old_state)
5878 {
5879         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5880         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5881         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5882         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
5883
5884         intel_encoders_disable(crtc, old_crtc_state, old_state);
5885
5886         drm_crtc_vblank_off(crtc);
5887         assert_vblank_disabled(crtc);
5888
5889         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5890         if (!transcoder_is_dsi(cpu_transcoder))
5891                 intel_disable_pipe(old_crtc_state);
5892
5893         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5894                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
5895
5896         if (!transcoder_is_dsi(cpu_transcoder))
5897                 intel_ddi_disable_transcoder_func(old_crtc_state);
5898
5899         if (INTEL_GEN(dev_priv) >= 9)
5900                 skylake_scaler_disable(intel_crtc);
5901         else
5902                 ironlake_pfit_disable(intel_crtc, false);
5903
5904         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5905
5906         if (INTEL_GEN(dev_priv) >= 11)
5907                 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5908 }
5909
5910 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5911 {
5912         struct drm_device *dev = crtc->base.dev;
5913         struct drm_i915_private *dev_priv = to_i915(dev);
5914         struct intel_crtc_state *pipe_config = crtc->config;
5915
5916         if (!pipe_config->gmch_pfit.control)
5917                 return;
5918
5919         /*
5920          * The panel fitter should only be adjusted whilst the pipe is disabled,
5921          * according to register description and PRM.
5922          */
5923         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5924         assert_pipe_disabled(dev_priv, crtc->pipe);
5925
5926         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5927         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5928
5929         /* Border color in case we don't scale up to the full screen. Black by
5930          * default, change to something else for debugging. */
5931         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5932 }
5933
5934 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5935 {
5936         if (IS_ICELAKE(dev_priv))
5937                 return port >= PORT_C && port <= PORT_F;
5938
5939         return false;
5940 }
5941
5942 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5943 {
5944         if (!intel_port_is_tc(dev_priv, port))
5945                 return PORT_TC_NONE;
5946
5947         return port - PORT_C;
5948 }
5949
5950 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5951 {
5952         switch (port) {
5953         case PORT_A:
5954                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5955         case PORT_B:
5956                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5957         case PORT_C:
5958                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5959         case PORT_D:
5960                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5961         case PORT_E:
5962                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5963         case PORT_F:
5964                 return POWER_DOMAIN_PORT_DDI_F_LANES;
5965         default:
5966                 MISSING_CASE(port);
5967                 return POWER_DOMAIN_PORT_OTHER;
5968         }
5969 }
5970
5971 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5972                                   struct intel_crtc_state *crtc_state)
5973 {
5974         struct drm_device *dev = crtc->dev;
5975         struct drm_i915_private *dev_priv = to_i915(dev);
5976         struct drm_encoder *encoder;
5977         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5978         enum pipe pipe = intel_crtc->pipe;
5979         u64 mask;
5980         enum transcoder transcoder = crtc_state->cpu_transcoder;
5981
5982         if (!crtc_state->base.active)
5983                 return 0;
5984
5985         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5986         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5987         if (crtc_state->pch_pfit.enabled ||
5988             crtc_state->pch_pfit.force_thru)
5989                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5990
5991         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5992                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5993
5994                 mask |= BIT_ULL(intel_encoder->power_domain);
5995         }
5996
5997         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5998                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5999
6000         if (crtc_state->shared_dpll)
6001                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6002
6003         return mask;
6004 }
6005
6006 static u64
6007 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6008                                struct intel_crtc_state *crtc_state)
6009 {
6010         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6011         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6012         enum intel_display_power_domain domain;
6013         u64 domains, new_domains, old_domains;
6014
6015         old_domains = intel_crtc->enabled_power_domains;
6016         intel_crtc->enabled_power_domains = new_domains =
6017                 get_crtc_power_domains(crtc, crtc_state);
6018
6019         domains = new_domains & ~old_domains;
6020
6021         for_each_power_domain(domain, domains)
6022                 intel_display_power_get(dev_priv, domain);
6023
6024         return old_domains & ~new_domains;
6025 }
6026
6027 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6028                                       u64 domains)
6029 {
6030         enum intel_display_power_domain domain;
6031
6032         for_each_power_domain(domain, domains)
6033                 intel_display_power_put(dev_priv, domain);
6034 }
6035
6036 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6037                                    struct drm_atomic_state *old_state)
6038 {
6039         struct intel_atomic_state *old_intel_state =
6040                 to_intel_atomic_state(old_state);
6041         struct drm_crtc *crtc = pipe_config->base.crtc;
6042         struct drm_device *dev = crtc->dev;
6043         struct drm_i915_private *dev_priv = to_i915(dev);
6044         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6045         int pipe = intel_crtc->pipe;
6046
6047         if (WARN_ON(intel_crtc->active))
6048                 return;
6049
6050         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6051                 intel_dp_set_m_n(intel_crtc, M1_N1);
6052
6053         intel_set_pipe_timings(intel_crtc);
6054         intel_set_pipe_src_size(intel_crtc);
6055
6056         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6057                 struct drm_i915_private *dev_priv = to_i915(dev);
6058
6059                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6060                 I915_WRITE(CHV_CANVAS(pipe), 0);
6061         }
6062
6063         i9xx_set_pipeconf(intel_crtc);
6064
6065         intel_color_set_csc(&pipe_config->base);
6066
6067         intel_crtc->active = true;
6068
6069         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6070
6071         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6072
6073         if (IS_CHERRYVIEW(dev_priv)) {
6074                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6075                 chv_enable_pll(intel_crtc, intel_crtc->config);
6076         } else {
6077                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6078                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6079         }
6080
6081         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6082
6083         i9xx_pfit_enable(intel_crtc);
6084
6085         intel_color_load_luts(&pipe_config->base);
6086
6087         dev_priv->display.initial_watermarks(old_intel_state,
6088                                              pipe_config);
6089         intel_enable_pipe(pipe_config);
6090
6091         assert_vblank_disabled(crtc);
6092         drm_crtc_vblank_on(crtc);
6093
6094         intel_encoders_enable(crtc, pipe_config, old_state);
6095 }
6096
6097 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6098 {
6099         struct drm_device *dev = crtc->base.dev;
6100         struct drm_i915_private *dev_priv = to_i915(dev);
6101
6102         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6103         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6104 }
6105
6106 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6107                              struct drm_atomic_state *old_state)
6108 {
6109         struct intel_atomic_state *old_intel_state =
6110                 to_intel_atomic_state(old_state);
6111         struct drm_crtc *crtc = pipe_config->base.crtc;
6112         struct drm_device *dev = crtc->dev;
6113         struct drm_i915_private *dev_priv = to_i915(dev);
6114         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6115         enum pipe pipe = intel_crtc->pipe;
6116
6117         if (WARN_ON(intel_crtc->active))
6118                 return;
6119
6120         i9xx_set_pll_dividers(intel_crtc);
6121
6122         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6123                 intel_dp_set_m_n(intel_crtc, M1_N1);
6124
6125         intel_set_pipe_timings(intel_crtc);
6126         intel_set_pipe_src_size(intel_crtc);
6127
6128         i9xx_set_pipeconf(intel_crtc);
6129
6130         intel_crtc->active = true;
6131
6132         if (!IS_GEN2(dev_priv))
6133                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6134
6135         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6136
6137         i9xx_enable_pll(intel_crtc, pipe_config);
6138
6139         i9xx_pfit_enable(intel_crtc);
6140
6141         intel_color_load_luts(&pipe_config->base);
6142
6143         if (dev_priv->display.initial_watermarks != NULL)
6144                 dev_priv->display.initial_watermarks(old_intel_state,
6145                                                      intel_crtc->config);
6146         else
6147                 intel_update_watermarks(intel_crtc);
6148         intel_enable_pipe(pipe_config);
6149
6150         assert_vblank_disabled(crtc);
6151         drm_crtc_vblank_on(crtc);
6152
6153         intel_encoders_enable(crtc, pipe_config, old_state);
6154 }
6155
6156 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6157 {
6158         struct drm_device *dev = crtc->base.dev;
6159         struct drm_i915_private *dev_priv = to_i915(dev);
6160
6161         if (!crtc->config->gmch_pfit.control)
6162                 return;
6163
6164         assert_pipe_disabled(dev_priv, crtc->pipe);
6165
6166         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6167                       I915_READ(PFIT_CONTROL));
6168         I915_WRITE(PFIT_CONTROL, 0);
6169 }
6170
6171 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6172                               struct drm_atomic_state *old_state)
6173 {
6174         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6175         struct drm_device *dev = crtc->dev;
6176         struct drm_i915_private *dev_priv = to_i915(dev);
6177         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6178         int pipe = intel_crtc->pipe;
6179
6180         /*
6181          * On gen2 planes are double buffered but the pipe isn't, so we must
6182          * wait for planes to fully turn off before disabling the pipe.
6183          */
6184         if (IS_GEN2(dev_priv))
6185                 intel_wait_for_vblank(dev_priv, pipe);
6186
6187         intel_encoders_disable(crtc, old_crtc_state, old_state);
6188
6189         drm_crtc_vblank_off(crtc);
6190         assert_vblank_disabled(crtc);
6191
6192         intel_disable_pipe(old_crtc_state);
6193
6194         i9xx_pfit_disable(intel_crtc);
6195
6196         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6197
6198         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6199                 if (IS_CHERRYVIEW(dev_priv))
6200                         chv_disable_pll(dev_priv, pipe);
6201                 else if (IS_VALLEYVIEW(dev_priv))
6202                         vlv_disable_pll(dev_priv, pipe);
6203                 else
6204                         i9xx_disable_pll(intel_crtc);
6205         }
6206
6207         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6208
6209         if (!IS_GEN2(dev_priv))
6210                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6211
6212         if (!dev_priv->display.initial_watermarks)
6213                 intel_update_watermarks(intel_crtc);
6214
6215         /* clock the pipe down to 640x480@60 to potentially save power */
6216         if (IS_I830(dev_priv))
6217                 i830_enable_pipe(dev_priv, pipe);
6218 }
6219
6220 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6221                                         struct drm_modeset_acquire_ctx *ctx)
6222 {
6223         struct intel_encoder *encoder;
6224         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6225         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6226         enum intel_display_power_domain domain;
6227         struct intel_plane *plane;
6228         u64 domains;
6229         struct drm_atomic_state *state;
6230         struct intel_crtc_state *crtc_state;
6231         int ret;
6232
6233         if (!intel_crtc->active)
6234                 return;
6235
6236         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6237                 const struct intel_plane_state *plane_state =
6238                         to_intel_plane_state(plane->base.state);
6239
6240                 if (plane_state->base.visible)
6241                         intel_plane_disable_noatomic(intel_crtc, plane);
6242         }
6243
6244         state = drm_atomic_state_alloc(crtc->dev);
6245         if (!state) {
6246                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6247                               crtc->base.id, crtc->name);
6248                 return;
6249         }
6250
6251         state->acquire_ctx = ctx;
6252
6253         /* Everything's already locked, -EDEADLK can't happen. */
6254         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6255         ret = drm_atomic_add_affected_connectors(state, crtc);
6256
6257         WARN_ON(IS_ERR(crtc_state) || ret);
6258
6259         dev_priv->display.crtc_disable(crtc_state, state);
6260
6261         drm_atomic_state_put(state);
6262
6263         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6264                       crtc->base.id, crtc->name);
6265
6266         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6267         crtc->state->active = false;
6268         intel_crtc->active = false;
6269         crtc->enabled = false;
6270         crtc->state->connector_mask = 0;
6271         crtc->state->encoder_mask = 0;
6272
6273         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6274                 encoder->base.crtc = NULL;
6275
6276         intel_fbc_disable(intel_crtc);
6277         intel_update_watermarks(intel_crtc);
6278         intel_disable_shared_dpll(intel_crtc);
6279
6280         domains = intel_crtc->enabled_power_domains;
6281         for_each_power_domain(domain, domains)
6282                 intel_display_power_put(dev_priv, domain);
6283         intel_crtc->enabled_power_domains = 0;
6284
6285         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6286         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6287         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6288 }
6289
6290 /*
6291  * turn all crtc's off, but do not adjust state
6292  * This has to be paired with a call to intel_modeset_setup_hw_state.
6293  */
6294 int intel_display_suspend(struct drm_device *dev)
6295 {
6296         struct drm_i915_private *dev_priv = to_i915(dev);
6297         struct drm_atomic_state *state;
6298         int ret;
6299
6300         state = drm_atomic_helper_suspend(dev);
6301         ret = PTR_ERR_OR_ZERO(state);
6302         if (ret)
6303                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6304         else
6305                 dev_priv->modeset_restore_state = state;
6306         return ret;
6307 }
6308
6309 void intel_encoder_destroy(struct drm_encoder *encoder)
6310 {
6311         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6312
6313         drm_encoder_cleanup(encoder);
6314         kfree(intel_encoder);
6315 }
6316
6317 /* Cross check the actual hw state with our own modeset state tracking (and it's
6318  * internal consistency). */
6319 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6320                                          struct drm_connector_state *conn_state)
6321 {
6322         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6323
6324         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6325                       connector->base.base.id,
6326                       connector->base.name);
6327
6328         if (connector->get_hw_state(connector)) {
6329                 struct intel_encoder *encoder = connector->encoder;
6330
6331                 I915_STATE_WARN(!crtc_state,
6332                          "connector enabled without attached crtc\n");
6333
6334                 if (!crtc_state)
6335                         return;
6336
6337                 I915_STATE_WARN(!crtc_state->active,
6338                       "connector is active, but attached crtc isn't\n");
6339
6340                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6341                         return;
6342
6343                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6344                         "atomic encoder doesn't match attached encoder\n");
6345
6346                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6347                         "attached encoder crtc differs from connector crtc\n");
6348         } else {
6349                 I915_STATE_WARN(crtc_state && crtc_state->active,
6350                         "attached crtc is active, but connector isn't\n");
6351                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6352                         "best encoder set without crtc!\n");
6353         }
6354 }
6355
6356 int intel_connector_init(struct intel_connector *connector)
6357 {
6358         struct intel_digital_connector_state *conn_state;
6359
6360         /*
6361          * Allocate enough memory to hold intel_digital_connector_state,
6362          * This might be a few bytes too many, but for connectors that don't
6363          * need it we'll free the state and allocate a smaller one on the first
6364          * succesful commit anyway.
6365          */
6366         conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6367         if (!conn_state)
6368                 return -ENOMEM;
6369
6370         __drm_atomic_helper_connector_reset(&connector->base,
6371                                             &conn_state->base);
6372
6373         return 0;
6374 }
6375
6376 struct intel_connector *intel_connector_alloc(void)
6377 {
6378         struct intel_connector *connector;
6379
6380         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6381         if (!connector)
6382                 return NULL;
6383
6384         if (intel_connector_init(connector) < 0) {
6385                 kfree(connector);
6386                 return NULL;
6387         }
6388
6389         return connector;
6390 }
6391
6392 /*
6393  * Free the bits allocated by intel_connector_alloc.
6394  * This should only be used after intel_connector_alloc has returned
6395  * successfully, and before drm_connector_init returns successfully.
6396  * Otherwise the destroy callbacks for the connector and the state should
6397  * take care of proper cleanup/free
6398  */
6399 void intel_connector_free(struct intel_connector *connector)
6400 {
6401         kfree(to_intel_digital_connector_state(connector->base.state));
6402         kfree(connector);
6403 }
6404
6405 /* Simple connector->get_hw_state implementation for encoders that support only
6406  * one connector and no cloning and hence the encoder state determines the state
6407  * of the connector. */
6408 bool intel_connector_get_hw_state(struct intel_connector *connector)
6409 {
6410         enum pipe pipe = 0;
6411         struct intel_encoder *encoder = connector->encoder;
6412
6413         return encoder->get_hw_state(encoder, &pipe);
6414 }
6415
6416 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6417 {
6418         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6419                 return crtc_state->fdi_lanes;
6420
6421         return 0;
6422 }
6423
6424 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6425                                      struct intel_crtc_state *pipe_config)
6426 {
6427         struct drm_i915_private *dev_priv = to_i915(dev);
6428         struct drm_atomic_state *state = pipe_config->base.state;
6429         struct intel_crtc *other_crtc;
6430         struct intel_crtc_state *other_crtc_state;
6431
6432         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6433                       pipe_name(pipe), pipe_config->fdi_lanes);
6434         if (pipe_config->fdi_lanes > 4) {
6435                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6436                               pipe_name(pipe), pipe_config->fdi_lanes);
6437                 return -EINVAL;
6438         }
6439
6440         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6441                 if (pipe_config->fdi_lanes > 2) {
6442                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6443                                       pipe_config->fdi_lanes);
6444                         return -EINVAL;
6445                 } else {
6446                         return 0;
6447                 }
6448         }
6449
6450         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6451                 return 0;
6452
6453         /* Ivybridge 3 pipe is really complicated */
6454         switch (pipe) {
6455         case PIPE_A:
6456                 return 0;
6457         case PIPE_B:
6458                 if (pipe_config->fdi_lanes <= 2)
6459                         return 0;
6460
6461                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6462                 other_crtc_state =
6463                         intel_atomic_get_crtc_state(state, other_crtc);
6464                 if (IS_ERR(other_crtc_state))
6465                         return PTR_ERR(other_crtc_state);
6466
6467                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6468                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6469                                       pipe_name(pipe), pipe_config->fdi_lanes);
6470                         return -EINVAL;
6471                 }
6472                 return 0;
6473         case PIPE_C:
6474                 if (pipe_config->fdi_lanes > 2) {
6475                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6476                                       pipe_name(pipe), pipe_config->fdi_lanes);
6477                         return -EINVAL;
6478                 }
6479
6480                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6481                 other_crtc_state =
6482                         intel_atomic_get_crtc_state(state, other_crtc);
6483                 if (IS_ERR(other_crtc_state))
6484                         return PTR_ERR(other_crtc_state);
6485
6486                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6487                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6488                         return -EINVAL;
6489                 }
6490                 return 0;
6491         default:
6492                 BUG();
6493         }
6494 }
6495
6496 #define RETRY 1
6497 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6498                                        struct intel_crtc_state *pipe_config)
6499 {
6500         struct drm_device *dev = intel_crtc->base.dev;
6501         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6502         int lane, link_bw, fdi_dotclock, ret;
6503         bool needs_recompute = false;
6504
6505 retry:
6506         /* FDI is a binary signal running at ~2.7GHz, encoding
6507          * each output octet as 10 bits. The actual frequency
6508          * is stored as a divider into a 100MHz clock, and the
6509          * mode pixel clock is stored in units of 1KHz.
6510          * Hence the bw of each lane in terms of the mode signal
6511          * is:
6512          */
6513         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6514
6515         fdi_dotclock = adjusted_mode->crtc_clock;
6516
6517         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6518                                            pipe_config->pipe_bpp);
6519
6520         pipe_config->fdi_lanes = lane;
6521
6522         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6523                                link_bw, &pipe_config->fdi_m_n, false);
6524
6525         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6526         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6527                 pipe_config->pipe_bpp -= 2*3;
6528                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6529                               pipe_config->pipe_bpp);
6530                 needs_recompute = true;
6531                 pipe_config->bw_constrained = true;
6532
6533                 goto retry;
6534         }
6535
6536         if (needs_recompute)
6537                 return RETRY;
6538
6539         return ret;
6540 }
6541
6542 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6543 {
6544         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6545         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6546
6547         /* IPS only exists on ULT machines and is tied to pipe A. */
6548         if (!hsw_crtc_supports_ips(crtc))
6549                 return false;
6550
6551         if (!i915_modparams.enable_ips)
6552                 return false;
6553
6554         if (crtc_state->pipe_bpp > 24)
6555                 return false;
6556
6557         /*
6558          * We compare against max which means we must take
6559          * the increased cdclk requirement into account when
6560          * calculating the new cdclk.
6561          *
6562          * Should measure whether using a lower cdclk w/o IPS
6563          */
6564         if (IS_BROADWELL(dev_priv) &&
6565             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6566                 return false;
6567
6568         return true;
6569 }
6570
6571 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6572 {
6573         struct drm_i915_private *dev_priv =
6574                 to_i915(crtc_state->base.crtc->dev);
6575         struct intel_atomic_state *intel_state =
6576                 to_intel_atomic_state(crtc_state->base.state);
6577
6578         if (!hsw_crtc_state_ips_capable(crtc_state))
6579                 return false;
6580
6581         if (crtc_state->ips_force_disable)
6582                 return false;
6583
6584         /* IPS should be fine as long as at least one plane is enabled. */
6585         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6586                 return false;
6587
6588         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6589         if (IS_BROADWELL(dev_priv) &&
6590             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6591                 return false;
6592
6593         return true;
6594 }
6595
6596 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6597 {
6598         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6599
6600         /* GDG double wide on either pipe, otherwise pipe A only */
6601         return INTEL_GEN(dev_priv) < 4 &&
6602                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6603 }
6604
6605 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6606 {
6607         uint32_t pixel_rate;
6608
6609         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6610
6611         /*
6612          * We only use IF-ID interlacing. If we ever use
6613          * PF-ID we'll need to adjust the pixel_rate here.
6614          */
6615
6616         if (pipe_config->pch_pfit.enabled) {
6617                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6618                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6619
6620                 pipe_w = pipe_config->pipe_src_w;
6621                 pipe_h = pipe_config->pipe_src_h;
6622
6623                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6624                 pfit_h = pfit_size & 0xFFFF;
6625                 if (pipe_w < pfit_w)
6626                         pipe_w = pfit_w;
6627                 if (pipe_h < pfit_h)
6628                         pipe_h = pfit_h;
6629
6630                 if (WARN_ON(!pfit_w || !pfit_h))
6631                         return pixel_rate;
6632
6633                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6634                                      pfit_w * pfit_h);
6635         }
6636
6637         return pixel_rate;
6638 }
6639
6640 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6641 {
6642         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6643
6644         if (HAS_GMCH_DISPLAY(dev_priv))
6645                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6646                 crtc_state->pixel_rate =
6647                         crtc_state->base.adjusted_mode.crtc_clock;
6648         else
6649                 crtc_state->pixel_rate =
6650                         ilk_pipe_pixel_rate(crtc_state);
6651 }
6652
6653 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6654                                      struct intel_crtc_state *pipe_config)
6655 {
6656         struct drm_device *dev = crtc->base.dev;
6657         struct drm_i915_private *dev_priv = to_i915(dev);
6658         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6659         int clock_limit = dev_priv->max_dotclk_freq;
6660
6661         if (INTEL_GEN(dev_priv) < 4) {
6662                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6663
6664                 /*
6665                  * Enable double wide mode when the dot clock
6666                  * is > 90% of the (display) core speed.
6667                  */
6668                 if (intel_crtc_supports_double_wide(crtc) &&
6669                     adjusted_mode->crtc_clock > clock_limit) {
6670                         clock_limit = dev_priv->max_dotclk_freq;
6671                         pipe_config->double_wide = true;
6672                 }
6673         }
6674
6675         if (adjusted_mode->crtc_clock > clock_limit) {
6676                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6677                               adjusted_mode->crtc_clock, clock_limit,
6678                               yesno(pipe_config->double_wide));
6679                 return -EINVAL;
6680         }
6681
6682         if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6683                 /*
6684                  * There is only one pipe CSC unit per pipe, and we need that
6685                  * for output conversion from RGB->YCBCR. So if CTM is already
6686                  * applied we can't support YCBCR420 output.
6687                  */
6688                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6689                 return -EINVAL;
6690         }
6691
6692         /*
6693          * Pipe horizontal size must be even in:
6694          * - DVO ganged mode
6695          * - LVDS dual channel mode
6696          * - Double wide pipe
6697          */
6698         if (pipe_config->pipe_src_w & 1) {
6699                 if (pipe_config->double_wide) {
6700                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6701                         return -EINVAL;
6702                 }
6703
6704                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6705                     intel_is_dual_link_lvds(dev)) {
6706                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6707                         return -EINVAL;
6708                 }
6709         }
6710
6711         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6712          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6713          */
6714         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6715                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6716                 return -EINVAL;
6717
6718         intel_crtc_compute_pixel_rate(pipe_config);
6719
6720         if (pipe_config->has_pch_encoder)
6721                 return ironlake_fdi_compute_config(crtc, pipe_config);
6722
6723         return 0;
6724 }
6725
6726 static void
6727 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6728 {
6729         while (*num > DATA_LINK_M_N_MASK ||
6730                *den > DATA_LINK_M_N_MASK) {
6731                 *num >>= 1;
6732                 *den >>= 1;
6733         }
6734 }
6735
6736 static void compute_m_n(unsigned int m, unsigned int n,
6737                         uint32_t *ret_m, uint32_t *ret_n,
6738                         bool constant_n)
6739 {
6740         /*
6741          * Several DP dongles in particular seem to be fussy about
6742          * too large link M/N values. Give N value as 0x8000 that
6743          * should be acceptable by specific devices. 0x8000 is the
6744          * specified fixed N value for asynchronous clock mode,
6745          * which the devices expect also in synchronous clock mode.
6746          */
6747         if (constant_n)
6748                 *ret_n = 0x8000;
6749         else
6750                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6751
6752         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6753         intel_reduce_m_n_ratio(ret_m, ret_n);
6754 }
6755
6756 void
6757 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6758                        int pixel_clock, int link_clock,
6759                        struct intel_link_m_n *m_n,
6760                        bool constant_n)
6761 {
6762         m_n->tu = 64;
6763
6764         compute_m_n(bits_per_pixel * pixel_clock,
6765                     link_clock * nlanes * 8,
6766                     &m_n->gmch_m, &m_n->gmch_n,
6767                     constant_n);
6768
6769         compute_m_n(pixel_clock, link_clock,
6770                     &m_n->link_m, &m_n->link_n,
6771                     constant_n);
6772 }
6773
6774 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6775 {
6776         if (i915_modparams.panel_use_ssc >= 0)
6777                 return i915_modparams.panel_use_ssc != 0;
6778         return dev_priv->vbt.lvds_use_ssc
6779                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6780 }
6781
6782 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6783 {
6784         return (1 << dpll->n) << 16 | dpll->m2;
6785 }
6786
6787 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6788 {
6789         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6790 }
6791
6792 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6793                                      struct intel_crtc_state *crtc_state,
6794                                      struct dpll *reduced_clock)
6795 {
6796         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6797         u32 fp, fp2 = 0;
6798
6799         if (IS_PINEVIEW(dev_priv)) {
6800                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6801                 if (reduced_clock)
6802                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6803         } else {
6804                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6805                 if (reduced_clock)
6806                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6807         }
6808
6809         crtc_state->dpll_hw_state.fp0 = fp;
6810
6811         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6812             reduced_clock) {
6813                 crtc_state->dpll_hw_state.fp1 = fp2;
6814         } else {
6815                 crtc_state->dpll_hw_state.fp1 = fp;
6816         }
6817 }
6818
6819 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6820                 pipe)
6821 {
6822         u32 reg_val;
6823
6824         /*
6825          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6826          * and set it to a reasonable value instead.
6827          */
6828         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6829         reg_val &= 0xffffff00;
6830         reg_val |= 0x00000030;
6831         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6832
6833         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6834         reg_val &= 0x00ffffff;
6835         reg_val |= 0x8c000000;
6836         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6837
6838         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6839         reg_val &= 0xffffff00;
6840         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6841
6842         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6843         reg_val &= 0x00ffffff;
6844         reg_val |= 0xb0000000;
6845         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6846 }
6847
6848 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6849                                          struct intel_link_m_n *m_n)
6850 {
6851         struct drm_device *dev = crtc->base.dev;
6852         struct drm_i915_private *dev_priv = to_i915(dev);
6853         int pipe = crtc->pipe;
6854
6855         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6856         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6857         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6858         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6859 }
6860
6861 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6862                                          struct intel_link_m_n *m_n,
6863                                          struct intel_link_m_n *m2_n2)
6864 {
6865         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6866         int pipe = crtc->pipe;
6867         enum transcoder transcoder = crtc->config->cpu_transcoder;
6868
6869         if (INTEL_GEN(dev_priv) >= 5) {
6870                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6871                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6872                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6873                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6874                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6875                  * for gen < 8) and if DRRS is supported (to make sure the
6876                  * registers are not unnecessarily accessed).
6877                  */
6878                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6879                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6880                         I915_WRITE(PIPE_DATA_M2(transcoder),
6881                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6882                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6883                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6884                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6885                 }
6886         } else {
6887                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6888                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6889                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6890                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6891         }
6892 }
6893
6894 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6895 {
6896         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6897
6898         if (m_n == M1_N1) {
6899                 dp_m_n = &crtc->config->dp_m_n;
6900                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6901         } else if (m_n == M2_N2) {
6902
6903                 /*
6904                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6905                  * needs to be programmed into M1_N1.
6906                  */
6907                 dp_m_n = &crtc->config->dp_m2_n2;
6908         } else {
6909                 DRM_ERROR("Unsupported divider value\n");
6910                 return;
6911         }
6912
6913         if (crtc->config->has_pch_encoder)
6914                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6915         else
6916                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6917 }
6918
6919 static void vlv_compute_dpll(struct intel_crtc *crtc,
6920                              struct intel_crtc_state *pipe_config)
6921 {
6922         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6923                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6924         if (crtc->pipe != PIPE_A)
6925                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6926
6927         /* DPLL not used with DSI, but still need the rest set up */
6928         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6929                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6930                         DPLL_EXT_BUFFER_ENABLE_VLV;
6931
6932         pipe_config->dpll_hw_state.dpll_md =
6933                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6934 }
6935
6936 static void chv_compute_dpll(struct intel_crtc *crtc,
6937                              struct intel_crtc_state *pipe_config)
6938 {
6939         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6940                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6941         if (crtc->pipe != PIPE_A)
6942                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6943
6944         /* DPLL not used with DSI, but still need the rest set up */
6945         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6946                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6947
6948         pipe_config->dpll_hw_state.dpll_md =
6949                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6950 }
6951
6952 static void vlv_prepare_pll(struct intel_crtc *crtc,
6953                             const struct intel_crtc_state *pipe_config)
6954 {
6955         struct drm_device *dev = crtc->base.dev;
6956         struct drm_i915_private *dev_priv = to_i915(dev);
6957         enum pipe pipe = crtc->pipe;
6958         u32 mdiv;
6959         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6960         u32 coreclk, reg_val;
6961
6962         /* Enable Refclk */
6963         I915_WRITE(DPLL(pipe),
6964                    pipe_config->dpll_hw_state.dpll &
6965                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6966
6967         /* No need to actually set up the DPLL with DSI */
6968         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6969                 return;
6970
6971         mutex_lock(&dev_priv->sb_lock);
6972
6973         bestn = pipe_config->dpll.n;
6974         bestm1 = pipe_config->dpll.m1;
6975         bestm2 = pipe_config->dpll.m2;
6976         bestp1 = pipe_config->dpll.p1;
6977         bestp2 = pipe_config->dpll.p2;
6978
6979         /* See eDP HDMI DPIO driver vbios notes doc */
6980
6981         /* PLL B needs special handling */
6982         if (pipe == PIPE_B)
6983                 vlv_pllb_recal_opamp(dev_priv, pipe);
6984
6985         /* Set up Tx target for periodic Rcomp update */
6986         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6987
6988         /* Disable target IRef on PLL */
6989         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6990         reg_val &= 0x00ffffff;
6991         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6992
6993         /* Disable fast lock */
6994         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6995
6996         /* Set idtafcrecal before PLL is enabled */
6997         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6998         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6999         mdiv |= ((bestn << DPIO_N_SHIFT));
7000         mdiv |= (1 << DPIO_K_SHIFT);
7001
7002         /*
7003          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7004          * but we don't support that).
7005          * Note: don't use the DAC post divider as it seems unstable.
7006          */
7007         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7008         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7009
7010         mdiv |= DPIO_ENABLE_CALIBRATION;
7011         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7012
7013         /* Set HBR and RBR LPF coefficients */
7014         if (pipe_config->port_clock == 162000 ||
7015             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7016             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7017                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7018                                  0x009f0003);
7019         else
7020                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7021                                  0x00d0000f);
7022
7023         if (intel_crtc_has_dp_encoder(pipe_config)) {
7024                 /* Use SSC source */
7025                 if (pipe == PIPE_A)
7026                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7027                                          0x0df40000);
7028                 else
7029                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7030                                          0x0df70000);
7031         } else { /* HDMI or VGA */
7032                 /* Use bend source */
7033                 if (pipe == PIPE_A)
7034                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7035                                          0x0df70000);
7036                 else
7037                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7038                                          0x0df40000);
7039         }
7040
7041         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7042         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7043         if (intel_crtc_has_dp_encoder(crtc->config))
7044                 coreclk |= 0x01000000;
7045         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7046
7047         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7048         mutex_unlock(&dev_priv->sb_lock);
7049 }
7050
7051 static void chv_prepare_pll(struct intel_crtc *crtc,
7052                             const struct intel_crtc_state *pipe_config)
7053 {
7054         struct drm_device *dev = crtc->base.dev;
7055         struct drm_i915_private *dev_priv = to_i915(dev);
7056         enum pipe pipe = crtc->pipe;
7057         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7058         u32 loopfilter, tribuf_calcntr;
7059         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7060         u32 dpio_val;
7061         int vco;
7062
7063         /* Enable Refclk and SSC */
7064         I915_WRITE(DPLL(pipe),
7065                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7066
7067         /* No need to actually set up the DPLL with DSI */
7068         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7069                 return;
7070
7071         bestn = pipe_config->dpll.n;
7072         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7073         bestm1 = pipe_config->dpll.m1;
7074         bestm2 = pipe_config->dpll.m2 >> 22;
7075         bestp1 = pipe_config->dpll.p1;
7076         bestp2 = pipe_config->dpll.p2;
7077         vco = pipe_config->dpll.vco;
7078         dpio_val = 0;
7079         loopfilter = 0;
7080
7081         mutex_lock(&dev_priv->sb_lock);
7082
7083         /* p1 and p2 divider */
7084         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7085                         5 << DPIO_CHV_S1_DIV_SHIFT |
7086                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7087                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7088                         1 << DPIO_CHV_K_DIV_SHIFT);
7089
7090         /* Feedback post-divider - m2 */
7091         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7092
7093         /* Feedback refclk divider - n and m1 */
7094         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7095                         DPIO_CHV_M1_DIV_BY_2 |
7096                         1 << DPIO_CHV_N_DIV_SHIFT);
7097
7098         /* M2 fraction division */
7099         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7100
7101         /* M2 fraction division enable */
7102         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7103         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7104         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7105         if (bestm2_frac)
7106                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7107         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7108
7109         /* Program digital lock detect threshold */
7110         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7111         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7112                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7113         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7114         if (!bestm2_frac)
7115                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7116         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7117
7118         /* Loop filter */
7119         if (vco == 5400000) {
7120                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7121                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7122                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7123                 tribuf_calcntr = 0x9;
7124         } else if (vco <= 6200000) {
7125                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7126                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7127                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7128                 tribuf_calcntr = 0x9;
7129         } else if (vco <= 6480000) {
7130                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7131                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7132                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7133                 tribuf_calcntr = 0x8;
7134         } else {
7135                 /* Not supported. Apply the same limits as in the max case */
7136                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7137                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7138                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7139                 tribuf_calcntr = 0;
7140         }
7141         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7142
7143         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7144         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7145         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7146         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7147
7148         /* AFC Recal */
7149         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7150                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7151                         DPIO_AFC_RECAL);
7152
7153         mutex_unlock(&dev_priv->sb_lock);
7154 }
7155
7156 /**
7157  * vlv_force_pll_on - forcibly enable just the PLL
7158  * @dev_priv: i915 private structure
7159  * @pipe: pipe PLL to enable
7160  * @dpll: PLL configuration
7161  *
7162  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7163  * in cases where we need the PLL enabled even when @pipe is not going to
7164  * be enabled.
7165  */
7166 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7167                      const struct dpll *dpll)
7168 {
7169         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7170         struct intel_crtc_state *pipe_config;
7171
7172         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7173         if (!pipe_config)
7174                 return -ENOMEM;
7175
7176         pipe_config->base.crtc = &crtc->base;
7177         pipe_config->pixel_multiplier = 1;
7178         pipe_config->dpll = *dpll;
7179
7180         if (IS_CHERRYVIEW(dev_priv)) {
7181                 chv_compute_dpll(crtc, pipe_config);
7182                 chv_prepare_pll(crtc, pipe_config);
7183                 chv_enable_pll(crtc, pipe_config);
7184         } else {
7185                 vlv_compute_dpll(crtc, pipe_config);
7186                 vlv_prepare_pll(crtc, pipe_config);
7187                 vlv_enable_pll(crtc, pipe_config);
7188         }
7189
7190         kfree(pipe_config);
7191
7192         return 0;
7193 }
7194
7195 /**
7196  * vlv_force_pll_off - forcibly disable just the PLL
7197  * @dev_priv: i915 private structure
7198  * @pipe: pipe PLL to disable
7199  *
7200  * Disable the PLL for @pipe. To be used in cases where we need
7201  * the PLL enabled even when @pipe is not going to be enabled.
7202  */
7203 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7204 {
7205         if (IS_CHERRYVIEW(dev_priv))
7206                 chv_disable_pll(dev_priv, pipe);
7207         else
7208                 vlv_disable_pll(dev_priv, pipe);
7209 }
7210
7211 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7212                               struct intel_crtc_state *crtc_state,
7213                               struct dpll *reduced_clock)
7214 {
7215         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7216         u32 dpll;
7217         struct dpll *clock = &crtc_state->dpll;
7218
7219         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7220
7221         dpll = DPLL_VGA_MODE_DIS;
7222
7223         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7224                 dpll |= DPLLB_MODE_LVDS;
7225         else
7226                 dpll |= DPLLB_MODE_DAC_SERIAL;
7227
7228         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7229             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7230                 dpll |= (crtc_state->pixel_multiplier - 1)
7231                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7232         }
7233
7234         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7235             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7236                 dpll |= DPLL_SDVO_HIGH_SPEED;
7237
7238         if (intel_crtc_has_dp_encoder(crtc_state))
7239                 dpll |= DPLL_SDVO_HIGH_SPEED;
7240
7241         /* compute bitmask from p1 value */
7242         if (IS_PINEVIEW(dev_priv))
7243                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7244         else {
7245                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7246                 if (IS_G4X(dev_priv) && reduced_clock)
7247                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7248         }
7249         switch (clock->p2) {
7250         case 5:
7251                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7252                 break;
7253         case 7:
7254                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7255                 break;
7256         case 10:
7257                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7258                 break;
7259         case 14:
7260                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7261                 break;
7262         }
7263         if (INTEL_GEN(dev_priv) >= 4)
7264                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7265
7266         if (crtc_state->sdvo_tv_clock)
7267                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7268         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7269                  intel_panel_use_ssc(dev_priv))
7270                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7271         else
7272                 dpll |= PLL_REF_INPUT_DREFCLK;
7273
7274         dpll |= DPLL_VCO_ENABLE;
7275         crtc_state->dpll_hw_state.dpll = dpll;
7276
7277         if (INTEL_GEN(dev_priv) >= 4) {
7278                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7279                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7280                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7281         }
7282 }
7283
7284 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7285                               struct intel_crtc_state *crtc_state,
7286                               struct dpll *reduced_clock)
7287 {
7288         struct drm_device *dev = crtc->base.dev;
7289         struct drm_i915_private *dev_priv = to_i915(dev);
7290         u32 dpll;
7291         struct dpll *clock = &crtc_state->dpll;
7292
7293         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7294
7295         dpll = DPLL_VGA_MODE_DIS;
7296
7297         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7298                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7299         } else {
7300                 if (clock->p1 == 2)
7301                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7302                 else
7303                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7304                 if (clock->p2 == 4)
7305                         dpll |= PLL_P2_DIVIDE_BY_4;
7306         }
7307
7308         if (!IS_I830(dev_priv) &&
7309             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7310                 dpll |= DPLL_DVO_2X_MODE;
7311
7312         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7313             intel_panel_use_ssc(dev_priv))
7314                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7315         else
7316                 dpll |= PLL_REF_INPUT_DREFCLK;
7317
7318         dpll |= DPLL_VCO_ENABLE;
7319         crtc_state->dpll_hw_state.dpll = dpll;
7320 }
7321
7322 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7323 {
7324         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7325         enum pipe pipe = intel_crtc->pipe;
7326         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7327         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7328         uint32_t crtc_vtotal, crtc_vblank_end;
7329         int vsyncshift = 0;
7330
7331         /* We need to be careful not to changed the adjusted mode, for otherwise
7332          * the hw state checker will get angry at the mismatch. */
7333         crtc_vtotal = adjusted_mode->crtc_vtotal;
7334         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7335
7336         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7337                 /* the chip adds 2 halflines automatically */
7338                 crtc_vtotal -= 1;
7339                 crtc_vblank_end -= 1;
7340
7341                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7342                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7343                 else
7344                         vsyncshift = adjusted_mode->crtc_hsync_start -
7345                                 adjusted_mode->crtc_htotal / 2;
7346                 if (vsyncshift < 0)
7347                         vsyncshift += adjusted_mode->crtc_htotal;
7348         }
7349
7350         if (INTEL_GEN(dev_priv) > 3)
7351                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7352
7353         I915_WRITE(HTOTAL(cpu_transcoder),
7354                    (adjusted_mode->crtc_hdisplay - 1) |
7355                    ((adjusted_mode->crtc_htotal - 1) << 16));
7356         I915_WRITE(HBLANK(cpu_transcoder),
7357                    (adjusted_mode->crtc_hblank_start - 1) |
7358                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7359         I915_WRITE(HSYNC(cpu_transcoder),
7360                    (adjusted_mode->crtc_hsync_start - 1) |
7361                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7362
7363         I915_WRITE(VTOTAL(cpu_transcoder),
7364                    (adjusted_mode->crtc_vdisplay - 1) |
7365                    ((crtc_vtotal - 1) << 16));
7366         I915_WRITE(VBLANK(cpu_transcoder),
7367                    (adjusted_mode->crtc_vblank_start - 1) |
7368                    ((crtc_vblank_end - 1) << 16));
7369         I915_WRITE(VSYNC(cpu_transcoder),
7370                    (adjusted_mode->crtc_vsync_start - 1) |
7371                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7372
7373         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7374          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7375          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7376          * bits. */
7377         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7378             (pipe == PIPE_B || pipe == PIPE_C))
7379                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7380
7381 }
7382
7383 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7384 {
7385         struct drm_device *dev = intel_crtc->base.dev;
7386         struct drm_i915_private *dev_priv = to_i915(dev);
7387         enum pipe pipe = intel_crtc->pipe;
7388
7389         /* pipesrc controls the size that is scaled from, which should
7390          * always be the user's requested size.
7391          */
7392         I915_WRITE(PIPESRC(pipe),
7393                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
7394                    (intel_crtc->config->pipe_src_h - 1));
7395 }
7396
7397 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7398                                    struct intel_crtc_state *pipe_config)
7399 {
7400         struct drm_device *dev = crtc->base.dev;
7401         struct drm_i915_private *dev_priv = to_i915(dev);
7402         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7403         uint32_t tmp;
7404
7405         tmp = I915_READ(HTOTAL(cpu_transcoder));
7406         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7407         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7408         tmp = I915_READ(HBLANK(cpu_transcoder));
7409         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7410         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7411         tmp = I915_READ(HSYNC(cpu_transcoder));
7412         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7413         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7414
7415         tmp = I915_READ(VTOTAL(cpu_transcoder));
7416         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7417         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7418         tmp = I915_READ(VBLANK(cpu_transcoder));
7419         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7420         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7421         tmp = I915_READ(VSYNC(cpu_transcoder));
7422         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7423         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7424
7425         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7426                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7427                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7428                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7429         }
7430 }
7431
7432 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7433                                     struct intel_crtc_state *pipe_config)
7434 {
7435         struct drm_device *dev = crtc->base.dev;
7436         struct drm_i915_private *dev_priv = to_i915(dev);
7437         u32 tmp;
7438
7439         tmp = I915_READ(PIPESRC(crtc->pipe));
7440         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7441         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7442
7443         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7444         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7445 }
7446
7447 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7448                                  struct intel_crtc_state *pipe_config)
7449 {
7450         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7451         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7452         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7453         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7454
7455         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7456         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7457         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7458         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7459
7460         mode->flags = pipe_config->base.adjusted_mode.flags;
7461         mode->type = DRM_MODE_TYPE_DRIVER;
7462
7463         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7464
7465         mode->hsync = drm_mode_hsync(mode);
7466         mode->vrefresh = drm_mode_vrefresh(mode);
7467         drm_mode_set_name(mode);
7468 }
7469
7470 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7471 {
7472         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7473         uint32_t pipeconf;
7474
7475         pipeconf = 0;
7476
7477         /* we keep both pipes enabled on 830 */
7478         if (IS_I830(dev_priv))
7479                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7480
7481         if (intel_crtc->config->double_wide)
7482                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7483
7484         /* only g4x and later have fancy bpc/dither controls */
7485         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7486             IS_CHERRYVIEW(dev_priv)) {
7487                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7488                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7489                         pipeconf |= PIPECONF_DITHER_EN |
7490                                     PIPECONF_DITHER_TYPE_SP;
7491
7492                 switch (intel_crtc->config->pipe_bpp) {
7493                 case 18:
7494                         pipeconf |= PIPECONF_6BPC;
7495                         break;
7496                 case 24:
7497                         pipeconf |= PIPECONF_8BPC;
7498                         break;
7499                 case 30:
7500                         pipeconf |= PIPECONF_10BPC;
7501                         break;
7502                 default:
7503                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7504                         BUG();
7505                 }
7506         }
7507
7508         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7509                 if (INTEL_GEN(dev_priv) < 4 ||
7510                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7511                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7512                 else
7513                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7514         } else
7515                 pipeconf |= PIPECONF_PROGRESSIVE;
7516
7517         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7518              intel_crtc->config->limited_color_range)
7519                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7520
7521         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7522         POSTING_READ(PIPECONF(intel_crtc->pipe));
7523 }
7524
7525 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7526                                    struct intel_crtc_state *crtc_state)
7527 {
7528         struct drm_device *dev = crtc->base.dev;
7529         struct drm_i915_private *dev_priv = to_i915(dev);
7530         const struct intel_limit *limit;
7531         int refclk = 48000;
7532
7533         memset(&crtc_state->dpll_hw_state, 0,
7534                sizeof(crtc_state->dpll_hw_state));
7535
7536         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7537                 if (intel_panel_use_ssc(dev_priv)) {
7538                         refclk = dev_priv->vbt.lvds_ssc_freq;
7539                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7540                 }
7541
7542                 limit = &intel_limits_i8xx_lvds;
7543         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7544                 limit = &intel_limits_i8xx_dvo;
7545         } else {
7546                 limit = &intel_limits_i8xx_dac;
7547         }
7548
7549         if (!crtc_state->clock_set &&
7550             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7551                                  refclk, NULL, &crtc_state->dpll)) {
7552                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7553                 return -EINVAL;
7554         }
7555
7556         i8xx_compute_dpll(crtc, crtc_state, NULL);
7557
7558         return 0;
7559 }
7560
7561 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7562                                   struct intel_crtc_state *crtc_state)
7563 {
7564         struct drm_device *dev = crtc->base.dev;
7565         struct drm_i915_private *dev_priv = to_i915(dev);
7566         const struct intel_limit *limit;
7567         int refclk = 96000;
7568
7569         memset(&crtc_state->dpll_hw_state, 0,
7570                sizeof(crtc_state->dpll_hw_state));
7571
7572         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7573                 if (intel_panel_use_ssc(dev_priv)) {
7574                         refclk = dev_priv->vbt.lvds_ssc_freq;
7575                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7576                 }
7577
7578                 if (intel_is_dual_link_lvds(dev))
7579                         limit = &intel_limits_g4x_dual_channel_lvds;
7580                 else
7581                         limit = &intel_limits_g4x_single_channel_lvds;
7582         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7583                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7584                 limit = &intel_limits_g4x_hdmi;
7585         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7586                 limit = &intel_limits_g4x_sdvo;
7587         } else {
7588                 /* The option is for other outputs */
7589                 limit = &intel_limits_i9xx_sdvo;
7590         }
7591
7592         if (!crtc_state->clock_set &&
7593             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7594                                 refclk, NULL, &crtc_state->dpll)) {
7595                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7596                 return -EINVAL;
7597         }
7598
7599         i9xx_compute_dpll(crtc, crtc_state, NULL);
7600
7601         return 0;
7602 }
7603
7604 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7605                                   struct intel_crtc_state *crtc_state)
7606 {
7607         struct drm_device *dev = crtc->base.dev;
7608         struct drm_i915_private *dev_priv = to_i915(dev);
7609         const struct intel_limit *limit;
7610         int refclk = 96000;
7611
7612         memset(&crtc_state->dpll_hw_state, 0,
7613                sizeof(crtc_state->dpll_hw_state));
7614
7615         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7616                 if (intel_panel_use_ssc(dev_priv)) {
7617                         refclk = dev_priv->vbt.lvds_ssc_freq;
7618                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7619                 }
7620
7621                 limit = &intel_limits_pineview_lvds;
7622         } else {
7623                 limit = &intel_limits_pineview_sdvo;
7624         }
7625
7626         if (!crtc_state->clock_set &&
7627             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7628                                 refclk, NULL, &crtc_state->dpll)) {
7629                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7630                 return -EINVAL;
7631         }
7632
7633         i9xx_compute_dpll(crtc, crtc_state, NULL);
7634
7635         return 0;
7636 }
7637
7638 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7639                                    struct intel_crtc_state *crtc_state)
7640 {
7641         struct drm_device *dev = crtc->base.dev;
7642         struct drm_i915_private *dev_priv = to_i915(dev);
7643         const struct intel_limit *limit;
7644         int refclk = 96000;
7645
7646         memset(&crtc_state->dpll_hw_state, 0,
7647                sizeof(crtc_state->dpll_hw_state));
7648
7649         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7650                 if (intel_panel_use_ssc(dev_priv)) {
7651                         refclk = dev_priv->vbt.lvds_ssc_freq;
7652                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7653                 }
7654
7655                 limit = &intel_limits_i9xx_lvds;
7656         } else {
7657                 limit = &intel_limits_i9xx_sdvo;
7658         }
7659
7660         if (!crtc_state->clock_set &&
7661             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7662                                  refclk, NULL, &crtc_state->dpll)) {
7663                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7664                 return -EINVAL;
7665         }
7666
7667         i9xx_compute_dpll(crtc, crtc_state, NULL);
7668
7669         return 0;
7670 }
7671
7672 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7673                                   struct intel_crtc_state *crtc_state)
7674 {
7675         int refclk = 100000;
7676         const struct intel_limit *limit = &intel_limits_chv;
7677
7678         memset(&crtc_state->dpll_hw_state, 0,
7679                sizeof(crtc_state->dpll_hw_state));
7680
7681         if (!crtc_state->clock_set &&
7682             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7683                                 refclk, NULL, &crtc_state->dpll)) {
7684                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7685                 return -EINVAL;
7686         }
7687
7688         chv_compute_dpll(crtc, crtc_state);
7689
7690         return 0;
7691 }
7692
7693 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7694                                   struct intel_crtc_state *crtc_state)
7695 {
7696         int refclk = 100000;
7697         const struct intel_limit *limit = &intel_limits_vlv;
7698
7699         memset(&crtc_state->dpll_hw_state, 0,
7700                sizeof(crtc_state->dpll_hw_state));
7701
7702         if (!crtc_state->clock_set &&
7703             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7704                                 refclk, NULL, &crtc_state->dpll)) {
7705                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7706                 return -EINVAL;
7707         }
7708
7709         vlv_compute_dpll(crtc, crtc_state);
7710
7711         return 0;
7712 }
7713
7714 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7715                                  struct intel_crtc_state *pipe_config)
7716 {
7717         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7718         uint32_t tmp;
7719
7720         if (INTEL_GEN(dev_priv) <= 3 &&
7721             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7722                 return;
7723
7724         tmp = I915_READ(PFIT_CONTROL);
7725         if (!(tmp & PFIT_ENABLE))
7726                 return;
7727
7728         /* Check whether the pfit is attached to our pipe. */
7729         if (INTEL_GEN(dev_priv) < 4) {
7730                 if (crtc->pipe != PIPE_B)
7731                         return;
7732         } else {
7733                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7734                         return;
7735         }
7736
7737         pipe_config->gmch_pfit.control = tmp;
7738         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7739 }
7740
7741 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7742                                struct intel_crtc_state *pipe_config)
7743 {
7744         struct drm_device *dev = crtc->base.dev;
7745         struct drm_i915_private *dev_priv = to_i915(dev);
7746         int pipe = pipe_config->cpu_transcoder;
7747         struct dpll clock;
7748         u32 mdiv;
7749         int refclk = 100000;
7750
7751         /* In case of DSI, DPLL will not be used */
7752         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7753                 return;
7754
7755         mutex_lock(&dev_priv->sb_lock);
7756         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7757         mutex_unlock(&dev_priv->sb_lock);
7758
7759         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7760         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7761         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7762         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7763         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7764
7765         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7766 }
7767
7768 static void
7769 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7770                               struct intel_initial_plane_config *plane_config)
7771 {
7772         struct drm_device *dev = crtc->base.dev;
7773         struct drm_i915_private *dev_priv = to_i915(dev);
7774         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7775         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7776         enum pipe pipe;
7777         u32 val, base, offset;
7778         int fourcc, pixel_format;
7779         unsigned int aligned_height;
7780         struct drm_framebuffer *fb;
7781         struct intel_framebuffer *intel_fb;
7782
7783         if (!plane->get_hw_state(plane, &pipe))
7784                 return;
7785
7786         WARN_ON(pipe != crtc->pipe);
7787
7788         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7789         if (!intel_fb) {
7790                 DRM_DEBUG_KMS("failed to alloc fb\n");
7791                 return;
7792         }
7793
7794         fb = &intel_fb->base;
7795
7796         fb->dev = dev;
7797
7798         val = I915_READ(DSPCNTR(i9xx_plane));
7799
7800         if (INTEL_GEN(dev_priv) >= 4) {
7801                 if (val & DISPPLANE_TILED) {
7802                         plane_config->tiling = I915_TILING_X;
7803                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7804                 }
7805         }
7806
7807         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7808         fourcc = i9xx_format_to_fourcc(pixel_format);
7809         fb->format = drm_format_info(fourcc);
7810
7811         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7812                 offset = I915_READ(DSPOFFSET(i9xx_plane));
7813                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7814         } else if (INTEL_GEN(dev_priv) >= 4) {
7815                 if (plane_config->tiling)
7816                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
7817                 else
7818                         offset = I915_READ(DSPLINOFF(i9xx_plane));
7819                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7820         } else {
7821                 base = I915_READ(DSPADDR(i9xx_plane));
7822         }
7823         plane_config->base = base;
7824
7825         val = I915_READ(PIPESRC(pipe));
7826         fb->width = ((val >> 16) & 0xfff) + 1;
7827         fb->height = ((val >> 0) & 0xfff) + 1;
7828
7829         val = I915_READ(DSPSTRIDE(i9xx_plane));
7830         fb->pitches[0] = val & 0xffffffc0;
7831
7832         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7833
7834         plane_config->size = fb->pitches[0] * aligned_height;
7835
7836         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7837                       crtc->base.name, plane->base.name, fb->width, fb->height,
7838                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7839                       plane_config->size);
7840
7841         plane_config->fb = intel_fb;
7842 }
7843
7844 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7845                                struct intel_crtc_state *pipe_config)
7846 {
7847         struct drm_device *dev = crtc->base.dev;
7848         struct drm_i915_private *dev_priv = to_i915(dev);
7849         int pipe = pipe_config->cpu_transcoder;
7850         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7851         struct dpll clock;
7852         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7853         int refclk = 100000;
7854
7855         /* In case of DSI, DPLL will not be used */
7856         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7857                 return;
7858
7859         mutex_lock(&dev_priv->sb_lock);
7860         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7861         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7862         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7863         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7864         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7865         mutex_unlock(&dev_priv->sb_lock);
7866
7867         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7868         clock.m2 = (pll_dw0 & 0xff) << 22;
7869         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7870                 clock.m2 |= pll_dw2 & 0x3fffff;
7871         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7872         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7873         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7874
7875         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7876 }
7877
7878 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7879                                  struct intel_crtc_state *pipe_config)
7880 {
7881         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7882         enum intel_display_power_domain power_domain;
7883         uint32_t tmp;
7884         bool ret;
7885
7886         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7887         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7888                 return false;
7889
7890         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7891         pipe_config->shared_dpll = NULL;
7892
7893         ret = false;
7894
7895         tmp = I915_READ(PIPECONF(crtc->pipe));
7896         if (!(tmp & PIPECONF_ENABLE))
7897                 goto out;
7898
7899         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7900             IS_CHERRYVIEW(dev_priv)) {
7901                 switch (tmp & PIPECONF_BPC_MASK) {
7902                 case PIPECONF_6BPC:
7903                         pipe_config->pipe_bpp = 18;
7904                         break;
7905                 case PIPECONF_8BPC:
7906                         pipe_config->pipe_bpp = 24;
7907                         break;
7908                 case PIPECONF_10BPC:
7909                         pipe_config->pipe_bpp = 30;
7910                         break;
7911                 default:
7912                         break;
7913                 }
7914         }
7915
7916         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7917             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7918                 pipe_config->limited_color_range = true;
7919
7920         if (INTEL_GEN(dev_priv) < 4)
7921                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7922
7923         intel_get_pipe_timings(crtc, pipe_config);
7924         intel_get_pipe_src_size(crtc, pipe_config);
7925
7926         i9xx_get_pfit_config(crtc, pipe_config);
7927
7928         if (INTEL_GEN(dev_priv) >= 4) {
7929                 /* No way to read it out on pipes B and C */
7930                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7931                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7932                 else
7933                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7934                 pipe_config->pixel_multiplier =
7935                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7936                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7937                 pipe_config->dpll_hw_state.dpll_md = tmp;
7938         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7939                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7940                 tmp = I915_READ(DPLL(crtc->pipe));
7941                 pipe_config->pixel_multiplier =
7942                         ((tmp & SDVO_MULTIPLIER_MASK)
7943                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7944         } else {
7945                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7946                  * port and will be fixed up in the encoder->get_config
7947                  * function. */
7948                 pipe_config->pixel_multiplier = 1;
7949         }
7950         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7951         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7952                 /*
7953                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7954                  * on 830. Filter it out here so that we don't
7955                  * report errors due to that.
7956                  */
7957                 if (IS_I830(dev_priv))
7958                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7959
7960                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7961                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7962         } else {
7963                 /* Mask out read-only status bits. */
7964                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7965                                                      DPLL_PORTC_READY_MASK |
7966                                                      DPLL_PORTB_READY_MASK);
7967         }
7968
7969         if (IS_CHERRYVIEW(dev_priv))
7970                 chv_crtc_clock_get(crtc, pipe_config);
7971         else if (IS_VALLEYVIEW(dev_priv))
7972                 vlv_crtc_clock_get(crtc, pipe_config);
7973         else
7974                 i9xx_crtc_clock_get(crtc, pipe_config);
7975
7976         /*
7977          * Normally the dotclock is filled in by the encoder .get_config()
7978          * but in case the pipe is enabled w/o any ports we need a sane
7979          * default.
7980          */
7981         pipe_config->base.adjusted_mode.crtc_clock =
7982                 pipe_config->port_clock / pipe_config->pixel_multiplier;
7983
7984         ret = true;
7985
7986 out:
7987         intel_display_power_put(dev_priv, power_domain);
7988
7989         return ret;
7990 }
7991
7992 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
7993 {
7994         struct intel_encoder *encoder;
7995         int i;
7996         u32 val, final;
7997         bool has_lvds = false;
7998         bool has_cpu_edp = false;
7999         bool has_panel = false;
8000         bool has_ck505 = false;
8001         bool can_ssc = false;
8002         bool using_ssc_source = false;
8003
8004         /* We need to take the global config into account */
8005         for_each_intel_encoder(&dev_priv->drm, encoder) {
8006                 switch (encoder->type) {
8007                 case INTEL_OUTPUT_LVDS:
8008                         has_panel = true;
8009                         has_lvds = true;
8010                         break;
8011                 case INTEL_OUTPUT_EDP:
8012                         has_panel = true;
8013                         if (encoder->port == PORT_A)
8014                                 has_cpu_edp = true;
8015                         break;
8016                 default:
8017                         break;
8018                 }
8019         }
8020
8021         if (HAS_PCH_IBX(dev_priv)) {
8022                 has_ck505 = dev_priv->vbt.display_clock_mode;
8023                 can_ssc = has_ck505;
8024         } else {
8025                 has_ck505 = false;
8026                 can_ssc = true;
8027         }
8028
8029         /* Check if any DPLLs are using the SSC source */
8030         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8031                 u32 temp = I915_READ(PCH_DPLL(i));
8032
8033                 if (!(temp & DPLL_VCO_ENABLE))
8034                         continue;
8035
8036                 if ((temp & PLL_REF_INPUT_MASK) ==
8037                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8038                         using_ssc_source = true;
8039                         break;
8040                 }
8041         }
8042
8043         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8044                       has_panel, has_lvds, has_ck505, using_ssc_source);
8045
8046         /* Ironlake: try to setup display ref clock before DPLL
8047          * enabling. This is only under driver's control after
8048          * PCH B stepping, previous chipset stepping should be
8049          * ignoring this setting.
8050          */
8051         val = I915_READ(PCH_DREF_CONTROL);
8052
8053         /* As we must carefully and slowly disable/enable each source in turn,
8054          * compute the final state we want first and check if we need to
8055          * make any changes at all.
8056          */
8057         final = val;
8058         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8059         if (has_ck505)
8060                 final |= DREF_NONSPREAD_CK505_ENABLE;
8061         else
8062                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8063
8064         final &= ~DREF_SSC_SOURCE_MASK;
8065         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8066         final &= ~DREF_SSC1_ENABLE;
8067
8068         if (has_panel) {
8069                 final |= DREF_SSC_SOURCE_ENABLE;
8070
8071                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8072                         final |= DREF_SSC1_ENABLE;
8073
8074                 if (has_cpu_edp) {
8075                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8076                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8077                         else
8078                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8079                 } else
8080                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8081         } else if (using_ssc_source) {
8082                 final |= DREF_SSC_SOURCE_ENABLE;
8083                 final |= DREF_SSC1_ENABLE;
8084         }
8085
8086         if (final == val)
8087                 return;
8088
8089         /* Always enable nonspread source */
8090         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8091
8092         if (has_ck505)
8093                 val |= DREF_NONSPREAD_CK505_ENABLE;
8094         else
8095                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8096
8097         if (has_panel) {
8098                 val &= ~DREF_SSC_SOURCE_MASK;
8099                 val |= DREF_SSC_SOURCE_ENABLE;
8100
8101                 /* SSC must be turned on before enabling the CPU output  */
8102                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8103                         DRM_DEBUG_KMS("Using SSC on panel\n");
8104                         val |= DREF_SSC1_ENABLE;
8105                 } else
8106                         val &= ~DREF_SSC1_ENABLE;
8107
8108                 /* Get SSC going before enabling the outputs */
8109                 I915_WRITE(PCH_DREF_CONTROL, val);
8110                 POSTING_READ(PCH_DREF_CONTROL);
8111                 udelay(200);
8112
8113                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8114
8115                 /* Enable CPU source on CPU attached eDP */
8116                 if (has_cpu_edp) {
8117                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8118                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8119                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8120                         } else
8121                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8122                 } else
8123                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8124
8125                 I915_WRITE(PCH_DREF_CONTROL, val);
8126                 POSTING_READ(PCH_DREF_CONTROL);
8127                 udelay(200);
8128         } else {
8129                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8130
8131                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8132
8133                 /* Turn off CPU output */
8134                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8135
8136                 I915_WRITE(PCH_DREF_CONTROL, val);
8137                 POSTING_READ(PCH_DREF_CONTROL);
8138                 udelay(200);
8139
8140                 if (!using_ssc_source) {
8141                         DRM_DEBUG_KMS("Disabling SSC source\n");
8142
8143                         /* Turn off the SSC source */
8144                         val &= ~DREF_SSC_SOURCE_MASK;
8145                         val |= DREF_SSC_SOURCE_DISABLE;
8146
8147                         /* Turn off SSC1 */
8148                         val &= ~DREF_SSC1_ENABLE;
8149
8150                         I915_WRITE(PCH_DREF_CONTROL, val);
8151                         POSTING_READ(PCH_DREF_CONTROL);
8152                         udelay(200);
8153                 }
8154         }
8155
8156         BUG_ON(val != final);
8157 }
8158
8159 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8160 {
8161         uint32_t tmp;
8162
8163         tmp = I915_READ(SOUTH_CHICKEN2);
8164         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8165         I915_WRITE(SOUTH_CHICKEN2, tmp);
8166
8167         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8168                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8169                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8170
8171         tmp = I915_READ(SOUTH_CHICKEN2);
8172         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8173         I915_WRITE(SOUTH_CHICKEN2, tmp);
8174
8175         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8176                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8177                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8178 }
8179
8180 /* WaMPhyProgramming:hsw */
8181 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8182 {
8183         uint32_t tmp;
8184
8185         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8186         tmp &= ~(0xFF << 24);
8187         tmp |= (0x12 << 24);
8188         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8189
8190         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8191         tmp |= (1 << 11);
8192         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8193
8194         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8195         tmp |= (1 << 11);
8196         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8197
8198         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8199         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8200         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8201
8202         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8203         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8204         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8205
8206         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8207         tmp &= ~(7 << 13);
8208         tmp |= (5 << 13);
8209         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8210
8211         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8212         tmp &= ~(7 << 13);
8213         tmp |= (5 << 13);
8214         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8215
8216         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8217         tmp &= ~0xFF;
8218         tmp |= 0x1C;
8219         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8220
8221         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8222         tmp &= ~0xFF;
8223         tmp |= 0x1C;
8224         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8225
8226         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8227         tmp &= ~(0xFF << 16);
8228         tmp |= (0x1C << 16);
8229         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8230
8231         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8232         tmp &= ~(0xFF << 16);
8233         tmp |= (0x1C << 16);
8234         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8235
8236         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8237         tmp |= (1 << 27);
8238         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8239
8240         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8241         tmp |= (1 << 27);
8242         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8243
8244         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8245         tmp &= ~(0xF << 28);
8246         tmp |= (4 << 28);
8247         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8248
8249         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8250         tmp &= ~(0xF << 28);
8251         tmp |= (4 << 28);
8252         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8253 }
8254
8255 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8256  * Programming" based on the parameters passed:
8257  * - Sequence to enable CLKOUT_DP
8258  * - Sequence to enable CLKOUT_DP without spread
8259  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8260  */
8261 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8262                                  bool with_spread, bool with_fdi)
8263 {
8264         uint32_t reg, tmp;
8265
8266         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8267                 with_spread = true;
8268         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8269             with_fdi, "LP PCH doesn't have FDI\n"))
8270                 with_fdi = false;
8271
8272         mutex_lock(&dev_priv->sb_lock);
8273
8274         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8275         tmp &= ~SBI_SSCCTL_DISABLE;
8276         tmp |= SBI_SSCCTL_PATHALT;
8277         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8278
8279         udelay(24);
8280
8281         if (with_spread) {
8282                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8283                 tmp &= ~SBI_SSCCTL_PATHALT;
8284                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8285
8286                 if (with_fdi) {
8287                         lpt_reset_fdi_mphy(dev_priv);
8288                         lpt_program_fdi_mphy(dev_priv);
8289                 }
8290         }
8291
8292         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8293         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8294         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8295         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8296
8297         mutex_unlock(&dev_priv->sb_lock);
8298 }
8299
8300 /* Sequence to disable CLKOUT_DP */
8301 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8302 {
8303         uint32_t reg, tmp;
8304
8305         mutex_lock(&dev_priv->sb_lock);
8306
8307         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8308         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8309         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8310         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8311
8312         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8313         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8314                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8315                         tmp |= SBI_SSCCTL_PATHALT;
8316                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8317                         udelay(32);
8318                 }
8319                 tmp |= SBI_SSCCTL_DISABLE;
8320                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8321         }
8322
8323         mutex_unlock(&dev_priv->sb_lock);
8324 }
8325
8326 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8327
8328 static const uint16_t sscdivintphase[] = {
8329         [BEND_IDX( 50)] = 0x3B23,
8330         [BEND_IDX( 45)] = 0x3B23,
8331         [BEND_IDX( 40)] = 0x3C23,
8332         [BEND_IDX( 35)] = 0x3C23,
8333         [BEND_IDX( 30)] = 0x3D23,
8334         [BEND_IDX( 25)] = 0x3D23,
8335         [BEND_IDX( 20)] = 0x3E23,
8336         [BEND_IDX( 15)] = 0x3E23,
8337         [BEND_IDX( 10)] = 0x3F23,
8338         [BEND_IDX(  5)] = 0x3F23,
8339         [BEND_IDX(  0)] = 0x0025,
8340         [BEND_IDX( -5)] = 0x0025,
8341         [BEND_IDX(-10)] = 0x0125,
8342         [BEND_IDX(-15)] = 0x0125,
8343         [BEND_IDX(-20)] = 0x0225,
8344         [BEND_IDX(-25)] = 0x0225,
8345         [BEND_IDX(-30)] = 0x0325,
8346         [BEND_IDX(-35)] = 0x0325,
8347         [BEND_IDX(-40)] = 0x0425,
8348         [BEND_IDX(-45)] = 0x0425,
8349         [BEND_IDX(-50)] = 0x0525,
8350 };
8351
8352 /*
8353  * Bend CLKOUT_DP
8354  * steps -50 to 50 inclusive, in steps of 5
8355  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8356  * change in clock period = -(steps / 10) * 5.787 ps
8357  */
8358 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8359 {
8360         uint32_t tmp;
8361         int idx = BEND_IDX(steps);
8362
8363         if (WARN_ON(steps % 5 != 0))
8364                 return;
8365
8366         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8367                 return;
8368
8369         mutex_lock(&dev_priv->sb_lock);
8370
8371         if (steps % 10 != 0)
8372                 tmp = 0xAAAAAAAB;
8373         else
8374                 tmp = 0x00000000;
8375         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8376
8377         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8378         tmp &= 0xffff0000;
8379         tmp |= sscdivintphase[idx];
8380         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8381
8382         mutex_unlock(&dev_priv->sb_lock);
8383 }
8384
8385 #undef BEND_IDX
8386
8387 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8388 {
8389         struct intel_encoder *encoder;
8390         bool has_vga = false;
8391
8392         for_each_intel_encoder(&dev_priv->drm, encoder) {
8393                 switch (encoder->type) {
8394                 case INTEL_OUTPUT_ANALOG:
8395                         has_vga = true;
8396                         break;
8397                 default:
8398                         break;
8399                 }
8400         }
8401
8402         if (has_vga) {
8403                 lpt_bend_clkout_dp(dev_priv, 0);
8404                 lpt_enable_clkout_dp(dev_priv, true, true);
8405         } else {
8406                 lpt_disable_clkout_dp(dev_priv);
8407         }
8408 }
8409
8410 /*
8411  * Initialize reference clocks when the driver loads
8412  */
8413 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8414 {
8415         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8416                 ironlake_init_pch_refclk(dev_priv);
8417         else if (HAS_PCH_LPT(dev_priv))
8418                 lpt_init_pch_refclk(dev_priv);
8419 }
8420
8421 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8422 {
8423         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8424         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8425         int pipe = intel_crtc->pipe;
8426         uint32_t val;
8427
8428         val = 0;
8429
8430         switch (intel_crtc->config->pipe_bpp) {
8431         case 18:
8432                 val |= PIPECONF_6BPC;
8433                 break;
8434         case 24:
8435                 val |= PIPECONF_8BPC;
8436                 break;
8437         case 30:
8438                 val |= PIPECONF_10BPC;
8439                 break;
8440         case 36:
8441                 val |= PIPECONF_12BPC;
8442                 break;
8443         default:
8444                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8445                 BUG();
8446         }
8447
8448         if (intel_crtc->config->dither)
8449                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8450
8451         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8452                 val |= PIPECONF_INTERLACED_ILK;
8453         else
8454                 val |= PIPECONF_PROGRESSIVE;
8455
8456         if (intel_crtc->config->limited_color_range)
8457                 val |= PIPECONF_COLOR_RANGE_SELECT;
8458
8459         I915_WRITE(PIPECONF(pipe), val);
8460         POSTING_READ(PIPECONF(pipe));
8461 }
8462
8463 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8464 {
8465         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8466         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8467         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8468         u32 val = 0;
8469
8470         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8471                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8472
8473         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8474                 val |= PIPECONF_INTERLACED_ILK;
8475         else
8476                 val |= PIPECONF_PROGRESSIVE;
8477
8478         I915_WRITE(PIPECONF(cpu_transcoder), val);
8479         POSTING_READ(PIPECONF(cpu_transcoder));
8480 }
8481
8482 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8483 {
8484         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8485         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8486         struct intel_crtc_state *config = intel_crtc->config;
8487
8488         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8489                 u32 val = 0;
8490
8491                 switch (intel_crtc->config->pipe_bpp) {
8492                 case 18:
8493                         val |= PIPEMISC_DITHER_6_BPC;
8494                         break;
8495                 case 24:
8496                         val |= PIPEMISC_DITHER_8_BPC;
8497                         break;
8498                 case 30:
8499                         val |= PIPEMISC_DITHER_10_BPC;
8500                         break;
8501                 case 36:
8502                         val |= PIPEMISC_DITHER_12_BPC;
8503                         break;
8504                 default:
8505                         /* Case prevented by pipe_config_set_bpp. */
8506                         BUG();
8507                 }
8508
8509                 if (intel_crtc->config->dither)
8510                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8511
8512                 if (config->ycbcr420) {
8513                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8514                                 PIPEMISC_YUV420_ENABLE |
8515                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8516                 }
8517
8518                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8519         }
8520 }
8521
8522 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8523 {
8524         /*
8525          * Account for spread spectrum to avoid
8526          * oversubscribing the link. Max center spread
8527          * is 2.5%; use 5% for safety's sake.
8528          */
8529         u32 bps = target_clock * bpp * 21 / 20;
8530         return DIV_ROUND_UP(bps, link_bw * 8);
8531 }
8532
8533 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8534 {
8535         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8536 }
8537
8538 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8539                                   struct intel_crtc_state *crtc_state,
8540                                   struct dpll *reduced_clock)
8541 {
8542         struct drm_crtc *crtc = &intel_crtc->base;
8543         struct drm_device *dev = crtc->dev;
8544         struct drm_i915_private *dev_priv = to_i915(dev);
8545         u32 dpll, fp, fp2;
8546         int factor;
8547
8548         /* Enable autotuning of the PLL clock (if permissible) */
8549         factor = 21;
8550         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8551                 if ((intel_panel_use_ssc(dev_priv) &&
8552                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8553                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8554                         factor = 25;
8555         } else if (crtc_state->sdvo_tv_clock)
8556                 factor = 20;
8557
8558         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8559
8560         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8561                 fp |= FP_CB_TUNE;
8562
8563         if (reduced_clock) {
8564                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8565
8566                 if (reduced_clock->m < factor * reduced_clock->n)
8567                         fp2 |= FP_CB_TUNE;
8568         } else {
8569                 fp2 = fp;
8570         }
8571
8572         dpll = 0;
8573
8574         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8575                 dpll |= DPLLB_MODE_LVDS;
8576         else
8577                 dpll |= DPLLB_MODE_DAC_SERIAL;
8578
8579         dpll |= (crtc_state->pixel_multiplier - 1)
8580                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8581
8582         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8583             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8584                 dpll |= DPLL_SDVO_HIGH_SPEED;
8585
8586         if (intel_crtc_has_dp_encoder(crtc_state))
8587                 dpll |= DPLL_SDVO_HIGH_SPEED;
8588
8589         /*
8590          * The high speed IO clock is only really required for
8591          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8592          * possible to share the DPLL between CRT and HDMI. Enabling
8593          * the clock needlessly does no real harm, except use up a
8594          * bit of power potentially.
8595          *
8596          * We'll limit this to IVB with 3 pipes, since it has only two
8597          * DPLLs and so DPLL sharing is the only way to get three pipes
8598          * driving PCH ports at the same time. On SNB we could do this,
8599          * and potentially avoid enabling the second DPLL, but it's not
8600          * clear if it''s a win or loss power wise. No point in doing
8601          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8602          */
8603         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8604             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8605                 dpll |= DPLL_SDVO_HIGH_SPEED;
8606
8607         /* compute bitmask from p1 value */
8608         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8609         /* also FPA1 */
8610         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8611
8612         switch (crtc_state->dpll.p2) {
8613         case 5:
8614                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8615                 break;
8616         case 7:
8617                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8618                 break;
8619         case 10:
8620                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8621                 break;
8622         case 14:
8623                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8624                 break;
8625         }
8626
8627         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8628             intel_panel_use_ssc(dev_priv))
8629                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8630         else
8631                 dpll |= PLL_REF_INPUT_DREFCLK;
8632
8633         dpll |= DPLL_VCO_ENABLE;
8634
8635         crtc_state->dpll_hw_state.dpll = dpll;
8636         crtc_state->dpll_hw_state.fp0 = fp;
8637         crtc_state->dpll_hw_state.fp1 = fp2;
8638 }
8639
8640 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8641                                        struct intel_crtc_state *crtc_state)
8642 {
8643         struct drm_device *dev = crtc->base.dev;
8644         struct drm_i915_private *dev_priv = to_i915(dev);
8645         const struct intel_limit *limit;
8646         int refclk = 120000;
8647
8648         memset(&crtc_state->dpll_hw_state, 0,
8649                sizeof(crtc_state->dpll_hw_state));
8650
8651         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8652         if (!crtc_state->has_pch_encoder)
8653                 return 0;
8654
8655         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8656                 if (intel_panel_use_ssc(dev_priv)) {
8657                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8658                                       dev_priv->vbt.lvds_ssc_freq);
8659                         refclk = dev_priv->vbt.lvds_ssc_freq;
8660                 }
8661
8662                 if (intel_is_dual_link_lvds(dev)) {
8663                         if (refclk == 100000)
8664                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8665                         else
8666                                 limit = &intel_limits_ironlake_dual_lvds;
8667                 } else {
8668                         if (refclk == 100000)
8669                                 limit = &intel_limits_ironlake_single_lvds_100m;
8670                         else
8671                                 limit = &intel_limits_ironlake_single_lvds;
8672                 }
8673         } else {
8674                 limit = &intel_limits_ironlake_dac;
8675         }
8676
8677         if (!crtc_state->clock_set &&
8678             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8679                                 refclk, NULL, &crtc_state->dpll)) {
8680                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8681                 return -EINVAL;
8682         }
8683
8684         ironlake_compute_dpll(crtc, crtc_state, NULL);
8685
8686         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8687                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8688                               pipe_name(crtc->pipe));
8689                 return -EINVAL;
8690         }
8691
8692         return 0;
8693 }
8694
8695 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8696                                          struct intel_link_m_n *m_n)
8697 {
8698         struct drm_device *dev = crtc->base.dev;
8699         struct drm_i915_private *dev_priv = to_i915(dev);
8700         enum pipe pipe = crtc->pipe;
8701
8702         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8703         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8704         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8705                 & ~TU_SIZE_MASK;
8706         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8707         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8708                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8709 }
8710
8711 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8712                                          enum transcoder transcoder,
8713                                          struct intel_link_m_n *m_n,
8714                                          struct intel_link_m_n *m2_n2)
8715 {
8716         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8717         enum pipe pipe = crtc->pipe;
8718
8719         if (INTEL_GEN(dev_priv) >= 5) {
8720                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8721                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8722                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8723                         & ~TU_SIZE_MASK;
8724                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8725                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8726                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8727                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8728                  * gen < 8) and if DRRS is supported (to make sure the
8729                  * registers are not unnecessarily read).
8730                  */
8731                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8732                         crtc->config->has_drrs) {
8733                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8734                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8735                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8736                                         & ~TU_SIZE_MASK;
8737                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8738                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8739                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8740                 }
8741         } else {
8742                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8743                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8744                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8745                         & ~TU_SIZE_MASK;
8746                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8747                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8748                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8749         }
8750 }
8751
8752 void intel_dp_get_m_n(struct intel_crtc *crtc,
8753                       struct intel_crtc_state *pipe_config)
8754 {
8755         if (pipe_config->has_pch_encoder)
8756                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8757         else
8758                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8759                                              &pipe_config->dp_m_n,
8760                                              &pipe_config->dp_m2_n2);
8761 }
8762
8763 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8764                                         struct intel_crtc_state *pipe_config)
8765 {
8766         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8767                                      &pipe_config->fdi_m_n, NULL);
8768 }
8769
8770 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8771                                     struct intel_crtc_state *pipe_config)
8772 {
8773         struct drm_device *dev = crtc->base.dev;
8774         struct drm_i915_private *dev_priv = to_i915(dev);
8775         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8776         uint32_t ps_ctrl = 0;
8777         int id = -1;
8778         int i;
8779
8780         /* find scaler attached to this pipe */
8781         for (i = 0; i < crtc->num_scalers; i++) {
8782                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8783                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8784                         id = i;
8785                         pipe_config->pch_pfit.enabled = true;
8786                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8787                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8788                         break;
8789                 }
8790         }
8791
8792         scaler_state->scaler_id = id;
8793         if (id >= 0) {
8794                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8795         } else {
8796                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8797         }
8798 }
8799
8800 static void
8801 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8802                                  struct intel_initial_plane_config *plane_config)
8803 {
8804         struct drm_device *dev = crtc->base.dev;
8805         struct drm_i915_private *dev_priv = to_i915(dev);
8806         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8807         enum plane_id plane_id = plane->id;
8808         enum pipe pipe;
8809         u32 val, base, offset, stride_mult, tiling, alpha;
8810         int fourcc, pixel_format;
8811         unsigned int aligned_height;
8812         struct drm_framebuffer *fb;
8813         struct intel_framebuffer *intel_fb;
8814
8815         if (!plane->get_hw_state(plane, &pipe))
8816                 return;
8817
8818         WARN_ON(pipe != crtc->pipe);
8819
8820         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8821         if (!intel_fb) {
8822                 DRM_DEBUG_KMS("failed to alloc fb\n");
8823                 return;
8824         }
8825
8826         fb = &intel_fb->base;
8827
8828         fb->dev = dev;
8829
8830         val = I915_READ(PLANE_CTL(pipe, plane_id));
8831
8832         if (INTEL_GEN(dev_priv) >= 11)
8833                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8834         else
8835                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
8836
8837         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
8838                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
8839                 alpha &= PLANE_COLOR_ALPHA_MASK;
8840         } else {
8841                 alpha = val & PLANE_CTL_ALPHA_MASK;
8842         }
8843
8844         fourcc = skl_format_to_fourcc(pixel_format,
8845                                       val & PLANE_CTL_ORDER_RGBX, alpha);
8846         fb->format = drm_format_info(fourcc);
8847
8848         tiling = val & PLANE_CTL_TILED_MASK;
8849         switch (tiling) {
8850         case PLANE_CTL_TILED_LINEAR:
8851                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8852                 break;
8853         case PLANE_CTL_TILED_X:
8854                 plane_config->tiling = I915_TILING_X;
8855                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8856                 break;
8857         case PLANE_CTL_TILED_Y:
8858                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8859                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8860                 else
8861                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
8862                 break;
8863         case PLANE_CTL_TILED_YF:
8864                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8865                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8866                 else
8867                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8868                 break;
8869         default:
8870                 MISSING_CASE(tiling);
8871                 goto error;
8872         }
8873
8874         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8875         plane_config->base = base;
8876
8877         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
8878
8879         val = I915_READ(PLANE_SIZE(pipe, plane_id));
8880         fb->height = ((val >> 16) & 0xfff) + 1;
8881         fb->width = ((val >> 0) & 0x1fff) + 1;
8882
8883         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
8884         stride_mult = intel_fb_stride_alignment(fb, 0);
8885         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8886
8887         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8888
8889         plane_config->size = fb->pitches[0] * aligned_height;
8890
8891         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8892                       crtc->base.name, plane->base.name, fb->width, fb->height,
8893                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8894                       plane_config->size);
8895
8896         plane_config->fb = intel_fb;
8897         return;
8898
8899 error:
8900         kfree(intel_fb);
8901 }
8902
8903 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8904                                      struct intel_crtc_state *pipe_config)
8905 {
8906         struct drm_device *dev = crtc->base.dev;
8907         struct drm_i915_private *dev_priv = to_i915(dev);
8908         uint32_t tmp;
8909
8910         tmp = I915_READ(PF_CTL(crtc->pipe));
8911
8912         if (tmp & PF_ENABLE) {
8913                 pipe_config->pch_pfit.enabled = true;
8914                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8915                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8916
8917                 /* We currently do not free assignements of panel fitters on
8918                  * ivb/hsw (since we don't use the higher upscaling modes which
8919                  * differentiates them) so just WARN about this case for now. */
8920                 if (IS_GEN7(dev_priv)) {
8921                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8922                                 PF_PIPE_SEL_IVB(crtc->pipe));
8923                 }
8924         }
8925 }
8926
8927 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8928                                      struct intel_crtc_state *pipe_config)
8929 {
8930         struct drm_device *dev = crtc->base.dev;
8931         struct drm_i915_private *dev_priv = to_i915(dev);
8932         enum intel_display_power_domain power_domain;
8933         uint32_t tmp;
8934         bool ret;
8935
8936         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8937         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8938                 return false;
8939
8940         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8941         pipe_config->shared_dpll = NULL;
8942
8943         ret = false;
8944         tmp = I915_READ(PIPECONF(crtc->pipe));
8945         if (!(tmp & PIPECONF_ENABLE))
8946                 goto out;
8947
8948         switch (tmp & PIPECONF_BPC_MASK) {
8949         case PIPECONF_6BPC:
8950                 pipe_config->pipe_bpp = 18;
8951                 break;
8952         case PIPECONF_8BPC:
8953                 pipe_config->pipe_bpp = 24;
8954                 break;
8955         case PIPECONF_10BPC:
8956                 pipe_config->pipe_bpp = 30;
8957                 break;
8958         case PIPECONF_12BPC:
8959                 pipe_config->pipe_bpp = 36;
8960                 break;
8961         default:
8962                 break;
8963         }
8964
8965         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8966                 pipe_config->limited_color_range = true;
8967
8968         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8969                 struct intel_shared_dpll *pll;
8970                 enum intel_dpll_id pll_id;
8971
8972                 pipe_config->has_pch_encoder = true;
8973
8974                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8975                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8976                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8977
8978                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8979
8980                 if (HAS_PCH_IBX(dev_priv)) {
8981                         /*
8982                          * The pipe->pch transcoder and pch transcoder->pll
8983                          * mapping is fixed.
8984                          */
8985                         pll_id = (enum intel_dpll_id) crtc->pipe;
8986                 } else {
8987                         tmp = I915_READ(PCH_DPLL_SEL);
8988                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8989                                 pll_id = DPLL_ID_PCH_PLL_B;
8990                         else
8991                                 pll_id= DPLL_ID_PCH_PLL_A;
8992                 }
8993
8994                 pipe_config->shared_dpll =
8995                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
8996                 pll = pipe_config->shared_dpll;
8997
8998                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8999                                                 &pipe_config->dpll_hw_state));
9000
9001                 tmp = pipe_config->dpll_hw_state.dpll;
9002                 pipe_config->pixel_multiplier =
9003                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9004                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9005
9006                 ironlake_pch_clock_get(crtc, pipe_config);
9007         } else {
9008                 pipe_config->pixel_multiplier = 1;
9009         }
9010
9011         intel_get_pipe_timings(crtc, pipe_config);
9012         intel_get_pipe_src_size(crtc, pipe_config);
9013
9014         ironlake_get_pfit_config(crtc, pipe_config);
9015
9016         ret = true;
9017
9018 out:
9019         intel_display_power_put(dev_priv, power_domain);
9020
9021         return ret;
9022 }
9023
9024 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9025 {
9026         struct drm_device *dev = &dev_priv->drm;
9027         struct intel_crtc *crtc;
9028
9029         for_each_intel_crtc(dev, crtc)
9030                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9031                      pipe_name(crtc->pipe));
9032
9033         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9034                         "Display power well on\n");
9035         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9036         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9037         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9038         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9039         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9040              "CPU PWM1 enabled\n");
9041         if (IS_HASWELL(dev_priv))
9042                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9043                      "CPU PWM2 enabled\n");
9044         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9045              "PCH PWM1 enabled\n");
9046         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9047              "Utility pin enabled\n");
9048         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9049
9050         /*
9051          * In theory we can still leave IRQs enabled, as long as only the HPD
9052          * interrupts remain enabled. We used to check for that, but since it's
9053          * gen-specific and since we only disable LCPLL after we fully disable
9054          * the interrupts, the check below should be enough.
9055          */
9056         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9057 }
9058
9059 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9060 {
9061         if (IS_HASWELL(dev_priv))
9062                 return I915_READ(D_COMP_HSW);
9063         else
9064                 return I915_READ(D_COMP_BDW);
9065 }
9066
9067 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9068 {
9069         if (IS_HASWELL(dev_priv)) {
9070                 mutex_lock(&dev_priv->pcu_lock);
9071                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9072                                             val))
9073                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9074                 mutex_unlock(&dev_priv->pcu_lock);
9075         } else {
9076                 I915_WRITE(D_COMP_BDW, val);
9077                 POSTING_READ(D_COMP_BDW);
9078         }
9079 }
9080
9081 /*
9082  * This function implements pieces of two sequences from BSpec:
9083  * - Sequence for display software to disable LCPLL
9084  * - Sequence for display software to allow package C8+
9085  * The steps implemented here are just the steps that actually touch the LCPLL
9086  * register. Callers should take care of disabling all the display engine
9087  * functions, doing the mode unset, fixing interrupts, etc.
9088  */
9089 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9090                               bool switch_to_fclk, bool allow_power_down)
9091 {
9092         uint32_t val;
9093
9094         assert_can_disable_lcpll(dev_priv);
9095
9096         val = I915_READ(LCPLL_CTL);
9097
9098         if (switch_to_fclk) {
9099                 val |= LCPLL_CD_SOURCE_FCLK;
9100                 I915_WRITE(LCPLL_CTL, val);
9101
9102                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9103                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9104                         DRM_ERROR("Switching to FCLK failed\n");
9105
9106                 val = I915_READ(LCPLL_CTL);
9107         }
9108
9109         val |= LCPLL_PLL_DISABLE;
9110         I915_WRITE(LCPLL_CTL, val);
9111         POSTING_READ(LCPLL_CTL);
9112
9113         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9114                 DRM_ERROR("LCPLL still locked\n");
9115
9116         val = hsw_read_dcomp(dev_priv);
9117         val |= D_COMP_COMP_DISABLE;
9118         hsw_write_dcomp(dev_priv, val);
9119         ndelay(100);
9120
9121         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9122                      1))
9123                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9124
9125         if (allow_power_down) {
9126                 val = I915_READ(LCPLL_CTL);
9127                 val |= LCPLL_POWER_DOWN_ALLOW;
9128                 I915_WRITE(LCPLL_CTL, val);
9129                 POSTING_READ(LCPLL_CTL);
9130         }
9131 }
9132
9133 /*
9134  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9135  * source.
9136  */
9137 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9138 {
9139         uint32_t val;
9140
9141         val = I915_READ(LCPLL_CTL);
9142
9143         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9144                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9145                 return;
9146
9147         /*
9148          * Make sure we're not on PC8 state before disabling PC8, otherwise
9149          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9150          */
9151         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9152
9153         if (val & LCPLL_POWER_DOWN_ALLOW) {
9154                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9155                 I915_WRITE(LCPLL_CTL, val);
9156                 POSTING_READ(LCPLL_CTL);
9157         }
9158
9159         val = hsw_read_dcomp(dev_priv);
9160         val |= D_COMP_COMP_FORCE;
9161         val &= ~D_COMP_COMP_DISABLE;
9162         hsw_write_dcomp(dev_priv, val);
9163
9164         val = I915_READ(LCPLL_CTL);
9165         val &= ~LCPLL_PLL_DISABLE;
9166         I915_WRITE(LCPLL_CTL, val);
9167
9168         if (intel_wait_for_register(dev_priv,
9169                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9170                                     5))
9171                 DRM_ERROR("LCPLL not locked yet\n");
9172
9173         if (val & LCPLL_CD_SOURCE_FCLK) {
9174                 val = I915_READ(LCPLL_CTL);
9175                 val &= ~LCPLL_CD_SOURCE_FCLK;
9176                 I915_WRITE(LCPLL_CTL, val);
9177
9178                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9179                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9180                         DRM_ERROR("Switching back to LCPLL failed\n");
9181         }
9182
9183         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9184
9185         intel_update_cdclk(dev_priv);
9186         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9187 }
9188
9189 /*
9190  * Package states C8 and deeper are really deep PC states that can only be
9191  * reached when all the devices on the system allow it, so even if the graphics
9192  * device allows PC8+, it doesn't mean the system will actually get to these
9193  * states. Our driver only allows PC8+ when going into runtime PM.
9194  *
9195  * The requirements for PC8+ are that all the outputs are disabled, the power
9196  * well is disabled and most interrupts are disabled, and these are also
9197  * requirements for runtime PM. When these conditions are met, we manually do
9198  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9199  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9200  * hang the machine.
9201  *
9202  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9203  * the state of some registers, so when we come back from PC8+ we need to
9204  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9205  * need to take care of the registers kept by RC6. Notice that this happens even
9206  * if we don't put the device in PCI D3 state (which is what currently happens
9207  * because of the runtime PM support).
9208  *
9209  * For more, read "Display Sequences for Package C8" on the hardware
9210  * documentation.
9211  */
9212 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9213 {
9214         uint32_t val;
9215
9216         DRM_DEBUG_KMS("Enabling package C8+\n");
9217
9218         if (HAS_PCH_LPT_LP(dev_priv)) {
9219                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9220                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9221                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9222         }
9223
9224         lpt_disable_clkout_dp(dev_priv);
9225         hsw_disable_lcpll(dev_priv, true, true);
9226 }
9227
9228 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9229 {
9230         uint32_t val;
9231
9232         DRM_DEBUG_KMS("Disabling package C8+\n");
9233
9234         hsw_restore_lcpll(dev_priv);
9235         lpt_init_pch_refclk(dev_priv);
9236
9237         if (HAS_PCH_LPT_LP(dev_priv)) {
9238                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9239                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9240                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9241         }
9242 }
9243
9244 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9245                                       struct intel_crtc_state *crtc_state)
9246 {
9247         struct intel_atomic_state *state =
9248                 to_intel_atomic_state(crtc_state->base.state);
9249
9250         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9251                 struct intel_encoder *encoder =
9252                         intel_get_crtc_new_encoder(state, crtc_state);
9253
9254                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9255                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9256                                       pipe_name(crtc->pipe));
9257                         return -EINVAL;
9258                 }
9259         }
9260
9261         return 0;
9262 }
9263
9264 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9265                                    enum port port,
9266                                    struct intel_crtc_state *pipe_config)
9267 {
9268         enum intel_dpll_id id;
9269         u32 temp;
9270
9271         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9272         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9273
9274         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9275                 return;
9276
9277         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9278 }
9279
9280 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9281                                 enum port port,
9282                                 struct intel_crtc_state *pipe_config)
9283 {
9284         enum intel_dpll_id id;
9285         u32 temp;
9286
9287         /* TODO: TBT pll not implemented. */
9288         switch (port) {
9289         case PORT_A:
9290         case PORT_B:
9291                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9292                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9293                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9294
9295                 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
9296                         return;
9297                 break;
9298         case PORT_C:
9299                 id = DPLL_ID_ICL_MGPLL1;
9300                 break;
9301         case PORT_D:
9302                 id = DPLL_ID_ICL_MGPLL2;
9303                 break;
9304         case PORT_E:
9305                 id = DPLL_ID_ICL_MGPLL3;
9306                 break;
9307         case PORT_F:
9308                 id = DPLL_ID_ICL_MGPLL4;
9309                 break;
9310         default:
9311                 MISSING_CASE(port);
9312                 return;
9313         }
9314
9315         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9316 }
9317
9318 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9319                                 enum port port,
9320                                 struct intel_crtc_state *pipe_config)
9321 {
9322         enum intel_dpll_id id;
9323
9324         switch (port) {
9325         case PORT_A:
9326                 id = DPLL_ID_SKL_DPLL0;
9327                 break;
9328         case PORT_B:
9329                 id = DPLL_ID_SKL_DPLL1;
9330                 break;
9331         case PORT_C:
9332                 id = DPLL_ID_SKL_DPLL2;
9333                 break;
9334         default:
9335                 DRM_ERROR("Incorrect port type\n");
9336                 return;
9337         }
9338
9339         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9340 }
9341
9342 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9343                                 enum port port,
9344                                 struct intel_crtc_state *pipe_config)
9345 {
9346         enum intel_dpll_id id;
9347         u32 temp;
9348
9349         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9350         id = temp >> (port * 3 + 1);
9351
9352         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9353                 return;
9354
9355         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9356 }
9357
9358 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9359                                 enum port port,
9360                                 struct intel_crtc_state *pipe_config)
9361 {
9362         enum intel_dpll_id id;
9363         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9364
9365         switch (ddi_pll_sel) {
9366         case PORT_CLK_SEL_WRPLL1:
9367                 id = DPLL_ID_WRPLL1;
9368                 break;
9369         case PORT_CLK_SEL_WRPLL2:
9370                 id = DPLL_ID_WRPLL2;
9371                 break;
9372         case PORT_CLK_SEL_SPLL:
9373                 id = DPLL_ID_SPLL;
9374                 break;
9375         case PORT_CLK_SEL_LCPLL_810:
9376                 id = DPLL_ID_LCPLL_810;
9377                 break;
9378         case PORT_CLK_SEL_LCPLL_1350:
9379                 id = DPLL_ID_LCPLL_1350;
9380                 break;
9381         case PORT_CLK_SEL_LCPLL_2700:
9382                 id = DPLL_ID_LCPLL_2700;
9383                 break;
9384         default:
9385                 MISSING_CASE(ddi_pll_sel);
9386                 /* fall through */
9387         case PORT_CLK_SEL_NONE:
9388                 return;
9389         }
9390
9391         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9392 }
9393
9394 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9395                                      struct intel_crtc_state *pipe_config,
9396                                      u64 *power_domain_mask)
9397 {
9398         struct drm_device *dev = crtc->base.dev;
9399         struct drm_i915_private *dev_priv = to_i915(dev);
9400         enum intel_display_power_domain power_domain;
9401         u32 tmp;
9402
9403         /*
9404          * The pipe->transcoder mapping is fixed with the exception of the eDP
9405          * transcoder handled below.
9406          */
9407         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9408
9409         /*
9410          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9411          * consistency and less surprising code; it's in always on power).
9412          */
9413         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9414         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9415                 enum pipe trans_edp_pipe;
9416                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9417                 default:
9418                         WARN(1, "unknown pipe linked to edp transcoder\n");
9419                         /* fall through */
9420                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9421                 case TRANS_DDI_EDP_INPUT_A_ON:
9422                         trans_edp_pipe = PIPE_A;
9423                         break;
9424                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9425                         trans_edp_pipe = PIPE_B;
9426                         break;
9427                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9428                         trans_edp_pipe = PIPE_C;
9429                         break;
9430                 }
9431
9432                 if (trans_edp_pipe == crtc->pipe)
9433                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
9434         }
9435
9436         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9437         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9438                 return false;
9439         *power_domain_mask |= BIT_ULL(power_domain);
9440
9441         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9442
9443         return tmp & PIPECONF_ENABLE;
9444 }
9445
9446 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9447                                          struct intel_crtc_state *pipe_config,
9448                                          u64 *power_domain_mask)
9449 {
9450         struct drm_device *dev = crtc->base.dev;
9451         struct drm_i915_private *dev_priv = to_i915(dev);
9452         enum intel_display_power_domain power_domain;
9453         enum port port;
9454         enum transcoder cpu_transcoder;
9455         u32 tmp;
9456
9457         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9458                 if (port == PORT_A)
9459                         cpu_transcoder = TRANSCODER_DSI_A;
9460                 else
9461                         cpu_transcoder = TRANSCODER_DSI_C;
9462
9463                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9464                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9465                         continue;
9466                 *power_domain_mask |= BIT_ULL(power_domain);
9467
9468                 /*
9469                  * The PLL needs to be enabled with a valid divider
9470                  * configuration, otherwise accessing DSI registers will hang
9471                  * the machine. See BSpec North Display Engine
9472                  * registers/MIPI[BXT]. We can break out here early, since we
9473                  * need the same DSI PLL to be enabled for both DSI ports.
9474                  */
9475                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9476                         break;
9477
9478                 /* XXX: this works for video mode only */
9479                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9480                 if (!(tmp & DPI_ENABLE))
9481                         continue;
9482
9483                 tmp = I915_READ(MIPI_CTRL(port));
9484                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9485                         continue;
9486
9487                 pipe_config->cpu_transcoder = cpu_transcoder;
9488                 break;
9489         }
9490
9491         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9492 }
9493
9494 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9495                                        struct intel_crtc_state *pipe_config)
9496 {
9497         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9498         struct intel_shared_dpll *pll;
9499         enum port port;
9500         uint32_t tmp;
9501
9502         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9503
9504         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9505
9506         if (IS_ICELAKE(dev_priv))
9507                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9508         else if (IS_CANNONLAKE(dev_priv))
9509                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9510         else if (IS_GEN9_BC(dev_priv))
9511                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9512         else if (IS_GEN9_LP(dev_priv))
9513                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9514         else
9515                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9516
9517         pll = pipe_config->shared_dpll;
9518         if (pll) {
9519                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9520                                                 &pipe_config->dpll_hw_state));
9521         }
9522
9523         /*
9524          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9525          * DDI E. So just check whether this pipe is wired to DDI E and whether
9526          * the PCH transcoder is on.
9527          */
9528         if (INTEL_GEN(dev_priv) < 9 &&
9529             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9530                 pipe_config->has_pch_encoder = true;
9531
9532                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9533                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9534                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9535
9536                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9537         }
9538 }
9539
9540 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9541                                     struct intel_crtc_state *pipe_config)
9542 {
9543         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9544         enum intel_display_power_domain power_domain;
9545         u64 power_domain_mask;
9546         bool active;
9547
9548         intel_crtc_init_scalers(crtc, pipe_config);
9549
9550         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9551         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9552                 return false;
9553         power_domain_mask = BIT_ULL(power_domain);
9554
9555         pipe_config->shared_dpll = NULL;
9556
9557         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9558
9559         if (IS_GEN9_LP(dev_priv) &&
9560             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9561                 WARN_ON(active);
9562                 active = true;
9563         }
9564
9565         if (!active)
9566                 goto out;
9567
9568         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9569                 haswell_get_ddi_port_state(crtc, pipe_config);
9570                 intel_get_pipe_timings(crtc, pipe_config);
9571         }
9572
9573         intel_get_pipe_src_size(crtc, pipe_config);
9574
9575         pipe_config->gamma_mode =
9576                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9577
9578         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9579                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9580                 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9581
9582                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9583                         bool blend_mode_420 = tmp &
9584                                               PIPEMISC_YUV420_MODE_FULL_BLEND;
9585
9586                         pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9587                         if (pipe_config->ycbcr420 != clrspace_yuv ||
9588                             pipe_config->ycbcr420 != blend_mode_420)
9589                                 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9590                 } else if (clrspace_yuv) {
9591                         DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9592                 }
9593         }
9594
9595         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9596         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9597                 power_domain_mask |= BIT_ULL(power_domain);
9598                 if (INTEL_GEN(dev_priv) >= 9)
9599                         skylake_get_pfit_config(crtc, pipe_config);
9600                 else
9601                         ironlake_get_pfit_config(crtc, pipe_config);
9602         }
9603
9604         if (hsw_crtc_supports_ips(crtc)) {
9605                 if (IS_HASWELL(dev_priv))
9606                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9607                 else {
9608                         /*
9609                          * We cannot readout IPS state on broadwell, set to
9610                          * true so we can set it to a defined state on first
9611                          * commit.
9612                          */
9613                         pipe_config->ips_enabled = true;
9614                 }
9615         }
9616
9617         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9618             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9619                 pipe_config->pixel_multiplier =
9620                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9621         } else {
9622                 pipe_config->pixel_multiplier = 1;
9623         }
9624
9625 out:
9626         for_each_power_domain(power_domain, power_domain_mask)
9627                 intel_display_power_put(dev_priv, power_domain);
9628
9629         return active;
9630 }
9631
9632 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9633 {
9634         struct drm_i915_private *dev_priv =
9635                 to_i915(plane_state->base.plane->dev);
9636         const struct drm_framebuffer *fb = plane_state->base.fb;
9637         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9638         u32 base;
9639
9640         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9641                 base = obj->phys_handle->busaddr;
9642         else
9643                 base = intel_plane_ggtt_offset(plane_state);
9644
9645         base += plane_state->color_plane[0].offset;
9646
9647         /* ILK+ do this automagically */
9648         if (HAS_GMCH_DISPLAY(dev_priv) &&
9649             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9650                 base += (plane_state->base.crtc_h *
9651                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9652
9653         return base;
9654 }
9655
9656 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9657 {
9658         int x = plane_state->base.crtc_x;
9659         int y = plane_state->base.crtc_y;
9660         u32 pos = 0;
9661
9662         if (x < 0) {
9663                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9664                 x = -x;
9665         }
9666         pos |= x << CURSOR_X_SHIFT;
9667
9668         if (y < 0) {
9669                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9670                 y = -y;
9671         }
9672         pos |= y << CURSOR_Y_SHIFT;
9673
9674         return pos;
9675 }
9676
9677 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9678 {
9679         const struct drm_mode_config *config =
9680                 &plane_state->base.plane->dev->mode_config;
9681         int width = plane_state->base.crtc_w;
9682         int height = plane_state->base.crtc_h;
9683
9684         return width > 0 && width <= config->cursor_width &&
9685                 height > 0 && height <= config->cursor_height;
9686 }
9687
9688 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
9689 {
9690         const struct drm_framebuffer *fb = plane_state->base.fb;
9691         unsigned int rotation = plane_state->base.rotation;
9692         int src_x, src_y;
9693         u32 offset;
9694         int ret;
9695
9696         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9697         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9698
9699         ret = intel_plane_check_stride(plane_state);
9700         if (ret)
9701                 return ret;
9702
9703         src_x = plane_state->base.src_x >> 16;
9704         src_y = plane_state->base.src_y >> 16;
9705
9706         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9707         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9708                                                     plane_state, 0);
9709
9710         if (src_x != 0 || src_y != 0) {
9711                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9712                 return -EINVAL;
9713         }
9714
9715         plane_state->color_plane[0].offset = offset;
9716
9717         return 0;
9718 }
9719
9720 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9721                               struct intel_plane_state *plane_state)
9722 {
9723         const struct drm_framebuffer *fb = plane_state->base.fb;
9724         int ret;
9725
9726         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9727                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9728                 return -EINVAL;
9729         }
9730
9731         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9732                                                   &crtc_state->base,
9733                                                   DRM_PLANE_HELPER_NO_SCALING,
9734                                                   DRM_PLANE_HELPER_NO_SCALING,
9735                                                   true, true);
9736         if (ret)
9737                 return ret;
9738
9739         if (!plane_state->base.visible)
9740                 return 0;
9741
9742         ret = intel_plane_check_src_coordinates(plane_state);
9743         if (ret)
9744                 return ret;
9745
9746         ret = intel_cursor_check_surface(plane_state);
9747         if (ret)
9748                 return ret;
9749
9750         return 0;
9751 }
9752
9753 static unsigned int
9754 i845_cursor_max_stride(struct intel_plane *plane,
9755                        u32 pixel_format, u64 modifier,
9756                        unsigned int rotation)
9757 {
9758         return 2048;
9759 }
9760
9761 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9762                            const struct intel_plane_state *plane_state)
9763 {
9764         return CURSOR_ENABLE |
9765                 CURSOR_GAMMA_ENABLE |
9766                 CURSOR_FORMAT_ARGB |
9767                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
9768 }
9769
9770 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9771 {
9772         int width = plane_state->base.crtc_w;
9773
9774         /*
9775          * 845g/865g are only limited by the width of their cursors,
9776          * the height is arbitrary up to the precision of the register.
9777          */
9778         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9779 }
9780
9781 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
9782                              struct intel_plane_state *plane_state)
9783 {
9784         const struct drm_framebuffer *fb = plane_state->base.fb;
9785         int ret;
9786
9787         ret = intel_check_cursor(crtc_state, plane_state);
9788         if (ret)
9789                 return ret;
9790
9791         /* if we want to turn off the cursor ignore width and height */
9792         if (!fb)
9793                 return 0;
9794
9795         /* Check for which cursor types we support */
9796         if (!i845_cursor_size_ok(plane_state)) {
9797                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9798                           plane_state->base.crtc_w,
9799                           plane_state->base.crtc_h);
9800                 return -EINVAL;
9801         }
9802
9803         WARN_ON(plane_state->base.visible &&
9804                 plane_state->color_plane[0].stride != fb->pitches[0]);
9805
9806         switch (fb->pitches[0]) {
9807         case 256:
9808         case 512:
9809         case 1024:
9810         case 2048:
9811                 break;
9812         default:
9813                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9814                               fb->pitches[0]);
9815                 return -EINVAL;
9816         }
9817
9818         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9819
9820         return 0;
9821 }
9822
9823 static void i845_update_cursor(struct intel_plane *plane,
9824                                const struct intel_crtc_state *crtc_state,
9825                                const struct intel_plane_state *plane_state)
9826 {
9827         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9828         u32 cntl = 0, base = 0, pos = 0, size = 0;
9829         unsigned long irqflags;
9830
9831         if (plane_state && plane_state->base.visible) {
9832                 unsigned int width = plane_state->base.crtc_w;
9833                 unsigned int height = plane_state->base.crtc_h;
9834
9835                 cntl = plane_state->ctl;
9836                 size = (height << 12) | width;
9837
9838                 base = intel_cursor_base(plane_state);
9839                 pos = intel_cursor_position(plane_state);
9840         }
9841
9842         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9843
9844         /* On these chipsets we can only modify the base/size/stride
9845          * whilst the cursor is disabled.
9846          */
9847         if (plane->cursor.base != base ||
9848             plane->cursor.size != size ||
9849             plane->cursor.cntl != cntl) {
9850                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9851                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9852                 I915_WRITE_FW(CURSIZE, size);
9853                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9854                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9855
9856                 plane->cursor.base = base;
9857                 plane->cursor.size = size;
9858                 plane->cursor.cntl = cntl;
9859         } else {
9860                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9861         }
9862
9863         POSTING_READ_FW(CURCNTR(PIPE_A));
9864
9865         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9866 }
9867
9868 static void i845_disable_cursor(struct intel_plane *plane,
9869                                 struct intel_crtc *crtc)
9870 {
9871         i845_update_cursor(plane, NULL, NULL);
9872 }
9873
9874 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9875                                      enum pipe *pipe)
9876 {
9877         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9878         enum intel_display_power_domain power_domain;
9879         bool ret;
9880
9881         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9882         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9883                 return false;
9884
9885         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9886
9887         *pipe = PIPE_A;
9888
9889         intel_display_power_put(dev_priv, power_domain);
9890
9891         return ret;
9892 }
9893
9894 static unsigned int
9895 i9xx_cursor_max_stride(struct intel_plane *plane,
9896                        u32 pixel_format, u64 modifier,
9897                        unsigned int rotation)
9898 {
9899         return plane->base.dev->mode_config.cursor_width * 4;
9900 }
9901
9902 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9903                            const struct intel_plane_state *plane_state)
9904 {
9905         struct drm_i915_private *dev_priv =
9906                 to_i915(plane_state->base.plane->dev);
9907         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9908         u32 cntl = 0;
9909
9910         if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9911                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9912
9913         if (INTEL_GEN(dev_priv) <= 10) {
9914                 cntl |= MCURSOR_GAMMA_ENABLE;
9915
9916                 if (HAS_DDI(dev_priv))
9917                         cntl |= MCURSOR_PIPE_CSC_ENABLE;
9918         }
9919
9920         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9921                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9922
9923         switch (plane_state->base.crtc_w) {
9924         case 64:
9925                 cntl |= MCURSOR_MODE_64_ARGB_AX;
9926                 break;
9927         case 128:
9928                 cntl |= MCURSOR_MODE_128_ARGB_AX;
9929                 break;
9930         case 256:
9931                 cntl |= MCURSOR_MODE_256_ARGB_AX;
9932                 break;
9933         default:
9934                 MISSING_CASE(plane_state->base.crtc_w);
9935                 return 0;
9936         }
9937
9938         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
9939                 cntl |= MCURSOR_ROTATE_180;
9940
9941         return cntl;
9942 }
9943
9944 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9945 {
9946         struct drm_i915_private *dev_priv =
9947                 to_i915(plane_state->base.plane->dev);
9948         int width = plane_state->base.crtc_w;
9949         int height = plane_state->base.crtc_h;
9950
9951         if (!intel_cursor_size_ok(plane_state))
9952                 return false;
9953
9954         /* Cursor width is limited to a few power-of-two sizes */
9955         switch (width) {
9956         case 256:
9957         case 128:
9958         case 64:
9959                 break;
9960         default:
9961                 return false;
9962         }
9963
9964         /*
9965          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9966          * height from 8 lines up to the cursor width, when the
9967          * cursor is not rotated. Everything else requires square
9968          * cursors.
9969          */
9970         if (HAS_CUR_FBC(dev_priv) &&
9971             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
9972                 if (height < 8 || height > width)
9973                         return false;
9974         } else {
9975                 if (height != width)
9976                         return false;
9977         }
9978
9979         return true;
9980 }
9981
9982 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
9983                              struct intel_plane_state *plane_state)
9984 {
9985         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
9986         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9987         const struct drm_framebuffer *fb = plane_state->base.fb;
9988         enum pipe pipe = plane->pipe;
9989         int ret;
9990
9991         ret = intel_check_cursor(crtc_state, plane_state);
9992         if (ret)
9993                 return ret;
9994
9995         /* if we want to turn off the cursor ignore width and height */
9996         if (!fb)
9997                 return 0;
9998
9999         /* Check for which cursor types we support */
10000         if (!i9xx_cursor_size_ok(plane_state)) {
10001                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10002                           plane_state->base.crtc_w,
10003                           plane_state->base.crtc_h);
10004                 return -EINVAL;
10005         }
10006
10007         WARN_ON(plane_state->base.visible &&
10008                 plane_state->color_plane[0].stride != fb->pitches[0]);
10009
10010         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10011                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10012                               fb->pitches[0], plane_state->base.crtc_w);
10013                 return -EINVAL;
10014         }
10015
10016         /*
10017          * There's something wrong with the cursor on CHV pipe C.
10018          * If it straddles the left edge of the screen then
10019          * moving it away from the edge or disabling it often
10020          * results in a pipe underrun, and often that can lead to
10021          * dead pipe (constant underrun reported, and it scans
10022          * out just a solid color). To recover from that, the
10023          * display power well must be turned off and on again.
10024          * Refuse the put the cursor into that compromised position.
10025          */
10026         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10027             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10028                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10029                 return -EINVAL;
10030         }
10031
10032         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10033
10034         return 0;
10035 }
10036
10037 static void i9xx_update_cursor(struct intel_plane *plane,
10038                                const struct intel_crtc_state *crtc_state,
10039                                const struct intel_plane_state *plane_state)
10040 {
10041         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10042         enum pipe pipe = plane->pipe;
10043         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10044         unsigned long irqflags;
10045
10046         if (plane_state && plane_state->base.visible) {
10047                 cntl = plane_state->ctl;
10048
10049                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10050                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10051
10052                 base = intel_cursor_base(plane_state);
10053                 pos = intel_cursor_position(plane_state);
10054         }
10055
10056         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10057
10058         /*
10059          * On some platforms writing CURCNTR first will also
10060          * cause CURPOS to be armed by the CURBASE write.
10061          * Without the CURCNTR write the CURPOS write would
10062          * arm itself. Thus we always start the full update
10063          * with a CURCNTR write.
10064          *
10065          * On other platforms CURPOS always requires the
10066          * CURBASE write to arm the update. Additonally
10067          * a write to any of the cursor register will cancel
10068          * an already armed cursor update. Thus leaving out
10069          * the CURBASE write after CURPOS could lead to a
10070          * cursor that doesn't appear to move, or even change
10071          * shape. Thus we always write CURBASE.
10072          *
10073          * CURCNTR and CUR_FBC_CTL are always
10074          * armed by the CURBASE write only.
10075          */
10076         if (plane->cursor.base != base ||
10077             plane->cursor.size != fbc_ctl ||
10078             plane->cursor.cntl != cntl) {
10079                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10080                 if (HAS_CUR_FBC(dev_priv))
10081                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10082                 I915_WRITE_FW(CURPOS(pipe), pos);
10083                 I915_WRITE_FW(CURBASE(pipe), base);
10084
10085                 plane->cursor.base = base;
10086                 plane->cursor.size = fbc_ctl;
10087                 plane->cursor.cntl = cntl;
10088         } else {
10089                 I915_WRITE_FW(CURPOS(pipe), pos);
10090                 I915_WRITE_FW(CURBASE(pipe), base);
10091         }
10092
10093         POSTING_READ_FW(CURBASE(pipe));
10094
10095         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10096 }
10097
10098 static void i9xx_disable_cursor(struct intel_plane *plane,
10099                                 struct intel_crtc *crtc)
10100 {
10101         i9xx_update_cursor(plane, NULL, NULL);
10102 }
10103
10104 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10105                                      enum pipe *pipe)
10106 {
10107         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10108         enum intel_display_power_domain power_domain;
10109         bool ret;
10110         u32 val;
10111
10112         /*
10113          * Not 100% correct for planes that can move between pipes,
10114          * but that's only the case for gen2-3 which don't have any
10115          * display power wells.
10116          */
10117         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10118         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10119                 return false;
10120
10121         val = I915_READ(CURCNTR(plane->pipe));
10122
10123         ret = val & MCURSOR_MODE;
10124
10125         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10126                 *pipe = plane->pipe;
10127         else
10128                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10129                         MCURSOR_PIPE_SELECT_SHIFT;
10130
10131         intel_display_power_put(dev_priv, power_domain);
10132
10133         return ret;
10134 }
10135
10136 /* VESA 640x480x72Hz mode to set on the pipe */
10137 static const struct drm_display_mode load_detect_mode = {
10138         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10139                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10140 };
10141
10142 struct drm_framebuffer *
10143 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10144                          struct drm_mode_fb_cmd2 *mode_cmd)
10145 {
10146         struct intel_framebuffer *intel_fb;
10147         int ret;
10148
10149         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10150         if (!intel_fb)
10151                 return ERR_PTR(-ENOMEM);
10152
10153         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10154         if (ret)
10155                 goto err;
10156
10157         return &intel_fb->base;
10158
10159 err:
10160         kfree(intel_fb);
10161         return ERR_PTR(ret);
10162 }
10163
10164 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10165                                         struct drm_crtc *crtc)
10166 {
10167         struct drm_plane *plane;
10168         struct drm_plane_state *plane_state;
10169         int ret, i;
10170
10171         ret = drm_atomic_add_affected_planes(state, crtc);
10172         if (ret)
10173                 return ret;
10174
10175         for_each_new_plane_in_state(state, plane, plane_state, i) {
10176                 if (plane_state->crtc != crtc)
10177                         continue;
10178
10179                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10180                 if (ret)
10181                         return ret;
10182
10183                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10184         }
10185
10186         return 0;
10187 }
10188
10189 int intel_get_load_detect_pipe(struct drm_connector *connector,
10190                                const struct drm_display_mode *mode,
10191                                struct intel_load_detect_pipe *old,
10192                                struct drm_modeset_acquire_ctx *ctx)
10193 {
10194         struct intel_crtc *intel_crtc;
10195         struct intel_encoder *intel_encoder =
10196                 intel_attached_encoder(connector);
10197         struct drm_crtc *possible_crtc;
10198         struct drm_encoder *encoder = &intel_encoder->base;
10199         struct drm_crtc *crtc = NULL;
10200         struct drm_device *dev = encoder->dev;
10201         struct drm_i915_private *dev_priv = to_i915(dev);
10202         struct drm_mode_config *config = &dev->mode_config;
10203         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10204         struct drm_connector_state *connector_state;
10205         struct intel_crtc_state *crtc_state;
10206         int ret, i = -1;
10207
10208         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10209                       connector->base.id, connector->name,
10210                       encoder->base.id, encoder->name);
10211
10212         old->restore_state = NULL;
10213
10214         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10215
10216         /*
10217          * Algorithm gets a little messy:
10218          *
10219          *   - if the connector already has an assigned crtc, use it (but make
10220          *     sure it's on first)
10221          *
10222          *   - try to find the first unused crtc that can drive this connector,
10223          *     and use that if we find one
10224          */
10225
10226         /* See if we already have a CRTC for this connector */
10227         if (connector->state->crtc) {
10228                 crtc = connector->state->crtc;
10229
10230                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10231                 if (ret)
10232                         goto fail;
10233
10234                 /* Make sure the crtc and connector are running */
10235                 goto found;
10236         }
10237
10238         /* Find an unused one (if possible) */
10239         for_each_crtc(dev, possible_crtc) {
10240                 i++;
10241                 if (!(encoder->possible_crtcs & (1 << i)))
10242                         continue;
10243
10244                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10245                 if (ret)
10246                         goto fail;
10247
10248                 if (possible_crtc->state->enable) {
10249                         drm_modeset_unlock(&possible_crtc->mutex);
10250                         continue;
10251                 }
10252
10253                 crtc = possible_crtc;
10254                 break;
10255         }
10256
10257         /*
10258          * If we didn't find an unused CRTC, don't use any.
10259          */
10260         if (!crtc) {
10261                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10262                 ret = -ENODEV;
10263                 goto fail;
10264         }
10265
10266 found:
10267         intel_crtc = to_intel_crtc(crtc);
10268
10269         state = drm_atomic_state_alloc(dev);
10270         restore_state = drm_atomic_state_alloc(dev);
10271         if (!state || !restore_state) {
10272                 ret = -ENOMEM;
10273                 goto fail;
10274         }
10275
10276         state->acquire_ctx = ctx;
10277         restore_state->acquire_ctx = ctx;
10278
10279         connector_state = drm_atomic_get_connector_state(state, connector);
10280         if (IS_ERR(connector_state)) {
10281                 ret = PTR_ERR(connector_state);
10282                 goto fail;
10283         }
10284
10285         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10286         if (ret)
10287                 goto fail;
10288
10289         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10290         if (IS_ERR(crtc_state)) {
10291                 ret = PTR_ERR(crtc_state);
10292                 goto fail;
10293         }
10294
10295         crtc_state->base.active = crtc_state->base.enable = true;
10296
10297         if (!mode)
10298                 mode = &load_detect_mode;
10299
10300         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10301         if (ret)
10302                 goto fail;
10303
10304         ret = intel_modeset_disable_planes(state, crtc);
10305         if (ret)
10306                 goto fail;
10307
10308         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10309         if (!ret)
10310                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10311         if (!ret)
10312                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10313         if (ret) {
10314                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10315                 goto fail;
10316         }
10317
10318         ret = drm_atomic_commit(state);
10319         if (ret) {
10320                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10321                 goto fail;
10322         }
10323
10324         old->restore_state = restore_state;
10325         drm_atomic_state_put(state);
10326
10327         /* let the connector get through one full cycle before testing */
10328         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10329         return true;
10330
10331 fail:
10332         if (state) {
10333                 drm_atomic_state_put(state);
10334                 state = NULL;
10335         }
10336         if (restore_state) {
10337                 drm_atomic_state_put(restore_state);
10338                 restore_state = NULL;
10339         }
10340
10341         if (ret == -EDEADLK)
10342                 return ret;
10343
10344         return false;
10345 }
10346
10347 void intel_release_load_detect_pipe(struct drm_connector *connector,
10348                                     struct intel_load_detect_pipe *old,
10349                                     struct drm_modeset_acquire_ctx *ctx)
10350 {
10351         struct intel_encoder *intel_encoder =
10352                 intel_attached_encoder(connector);
10353         struct drm_encoder *encoder = &intel_encoder->base;
10354         struct drm_atomic_state *state = old->restore_state;
10355         int ret;
10356
10357         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10358                       connector->base.id, connector->name,
10359                       encoder->base.id, encoder->name);
10360
10361         if (!state)
10362                 return;
10363
10364         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10365         if (ret)
10366                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10367         drm_atomic_state_put(state);
10368 }
10369
10370 static int i9xx_pll_refclk(struct drm_device *dev,
10371                            const struct intel_crtc_state *pipe_config)
10372 {
10373         struct drm_i915_private *dev_priv = to_i915(dev);
10374         u32 dpll = pipe_config->dpll_hw_state.dpll;
10375
10376         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10377                 return dev_priv->vbt.lvds_ssc_freq;
10378         else if (HAS_PCH_SPLIT(dev_priv))
10379                 return 120000;
10380         else if (!IS_GEN2(dev_priv))
10381                 return 96000;
10382         else
10383                 return 48000;
10384 }
10385
10386 /* Returns the clock of the currently programmed mode of the given pipe. */
10387 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10388                                 struct intel_crtc_state *pipe_config)
10389 {
10390         struct drm_device *dev = crtc->base.dev;
10391         struct drm_i915_private *dev_priv = to_i915(dev);
10392         int pipe = pipe_config->cpu_transcoder;
10393         u32 dpll = pipe_config->dpll_hw_state.dpll;
10394         u32 fp;
10395         struct dpll clock;
10396         int port_clock;
10397         int refclk = i9xx_pll_refclk(dev, pipe_config);
10398
10399         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10400                 fp = pipe_config->dpll_hw_state.fp0;
10401         else
10402                 fp = pipe_config->dpll_hw_state.fp1;
10403
10404         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10405         if (IS_PINEVIEW(dev_priv)) {
10406                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10407                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10408         } else {
10409                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10410                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10411         }
10412
10413         if (!IS_GEN2(dev_priv)) {
10414                 if (IS_PINEVIEW(dev_priv))
10415                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10416                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10417                 else
10418                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10419                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10420
10421                 switch (dpll & DPLL_MODE_MASK) {
10422                 case DPLLB_MODE_DAC_SERIAL:
10423                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10424                                 5 : 10;
10425                         break;
10426                 case DPLLB_MODE_LVDS:
10427                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10428                                 7 : 14;
10429                         break;
10430                 default:
10431                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10432                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10433                         return;
10434                 }
10435
10436                 if (IS_PINEVIEW(dev_priv))
10437                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10438                 else
10439                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10440         } else {
10441                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10442                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10443
10444                 if (is_lvds) {
10445                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10446                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10447
10448                         if (lvds & LVDS_CLKB_POWER_UP)
10449                                 clock.p2 = 7;
10450                         else
10451                                 clock.p2 = 14;
10452                 } else {
10453                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10454                                 clock.p1 = 2;
10455                         else {
10456                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10457                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10458                         }
10459                         if (dpll & PLL_P2_DIVIDE_BY_4)
10460                                 clock.p2 = 4;
10461                         else
10462                                 clock.p2 = 2;
10463                 }
10464
10465                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10466         }
10467
10468         /*
10469          * This value includes pixel_multiplier. We will use
10470          * port_clock to compute adjusted_mode.crtc_clock in the
10471          * encoder's get_config() function.
10472          */
10473         pipe_config->port_clock = port_clock;
10474 }
10475
10476 int intel_dotclock_calculate(int link_freq,
10477                              const struct intel_link_m_n *m_n)
10478 {
10479         /*
10480          * The calculation for the data clock is:
10481          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10482          * But we want to avoid losing precison if possible, so:
10483          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10484          *
10485          * and the link clock is simpler:
10486          * link_clock = (m * link_clock) / n
10487          */
10488
10489         if (!m_n->link_n)
10490                 return 0;
10491
10492         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10493 }
10494
10495 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10496                                    struct intel_crtc_state *pipe_config)
10497 {
10498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10499
10500         /* read out port_clock from the DPLL */
10501         i9xx_crtc_clock_get(crtc, pipe_config);
10502
10503         /*
10504          * In case there is an active pipe without active ports,
10505          * we may need some idea for the dotclock anyway.
10506          * Calculate one based on the FDI configuration.
10507          */
10508         pipe_config->base.adjusted_mode.crtc_clock =
10509                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10510                                          &pipe_config->fdi_m_n);
10511 }
10512
10513 /* Returns the currently programmed mode of the given encoder. */
10514 struct drm_display_mode *
10515 intel_encoder_current_mode(struct intel_encoder *encoder)
10516 {
10517         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10518         struct intel_crtc_state *crtc_state;
10519         struct drm_display_mode *mode;
10520         struct intel_crtc *crtc;
10521         enum pipe pipe;
10522
10523         if (!encoder->get_hw_state(encoder, &pipe))
10524                 return NULL;
10525
10526         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10527
10528         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10529         if (!mode)
10530                 return NULL;
10531
10532         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10533         if (!crtc_state) {
10534                 kfree(mode);
10535                 return NULL;
10536         }
10537
10538         crtc_state->base.crtc = &crtc->base;
10539
10540         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10541                 kfree(crtc_state);
10542                 kfree(mode);
10543                 return NULL;
10544         }
10545
10546         encoder->get_config(encoder, crtc_state);
10547
10548         intel_mode_from_pipe_config(mode, crtc_state);
10549
10550         kfree(crtc_state);
10551
10552         return mode;
10553 }
10554
10555 static void intel_crtc_destroy(struct drm_crtc *crtc)
10556 {
10557         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10558
10559         drm_crtc_cleanup(crtc);
10560         kfree(intel_crtc);
10561 }
10562
10563 /**
10564  * intel_wm_need_update - Check whether watermarks need updating
10565  * @plane: drm plane
10566  * @state: new plane state
10567  *
10568  * Check current plane state versus the new one to determine whether
10569  * watermarks need to be recalculated.
10570  *
10571  * Returns true or false.
10572  */
10573 static bool intel_wm_need_update(struct drm_plane *plane,
10574                                  struct drm_plane_state *state)
10575 {
10576         struct intel_plane_state *new = to_intel_plane_state(state);
10577         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10578
10579         /* Update watermarks on tiling or size changes. */
10580         if (new->base.visible != cur->base.visible)
10581                 return true;
10582
10583         if (!cur->base.fb || !new->base.fb)
10584                 return false;
10585
10586         if (cur->base.fb->modifier != new->base.fb->modifier ||
10587             cur->base.rotation != new->base.rotation ||
10588             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10589             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10590             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10591             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10592                 return true;
10593
10594         return false;
10595 }
10596
10597 static bool needs_scaling(const struct intel_plane_state *state)
10598 {
10599         int src_w = drm_rect_width(&state->base.src) >> 16;
10600         int src_h = drm_rect_height(&state->base.src) >> 16;
10601         int dst_w = drm_rect_width(&state->base.dst);
10602         int dst_h = drm_rect_height(&state->base.dst);
10603
10604         return (src_w != dst_w || src_h != dst_h);
10605 }
10606
10607 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10608                                     struct drm_crtc_state *crtc_state,
10609                                     const struct intel_plane_state *old_plane_state,
10610                                     struct drm_plane_state *plane_state)
10611 {
10612         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10613         struct drm_crtc *crtc = crtc_state->crtc;
10614         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10615         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10616         struct drm_device *dev = crtc->dev;
10617         struct drm_i915_private *dev_priv = to_i915(dev);
10618         bool mode_changed = needs_modeset(crtc_state);
10619         bool was_crtc_enabled = old_crtc_state->base.active;
10620         bool is_crtc_enabled = crtc_state->active;
10621         bool turn_off, turn_on, visible, was_visible;
10622         struct drm_framebuffer *fb = plane_state->fb;
10623         int ret;
10624
10625         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10626                 ret = skl_update_scaler_plane(
10627                         to_intel_crtc_state(crtc_state),
10628                         to_intel_plane_state(plane_state));
10629                 if (ret)
10630                         return ret;
10631         }
10632
10633         was_visible = old_plane_state->base.visible;
10634         visible = plane_state->visible;
10635
10636         if (!was_crtc_enabled && WARN_ON(was_visible))
10637                 was_visible = false;
10638
10639         /*
10640          * Visibility is calculated as if the crtc was on, but
10641          * after scaler setup everything depends on it being off
10642          * when the crtc isn't active.
10643          *
10644          * FIXME this is wrong for watermarks. Watermarks should also
10645          * be computed as if the pipe would be active. Perhaps move
10646          * per-plane wm computation to the .check_plane() hook, and
10647          * only combine the results from all planes in the current place?
10648          */
10649         if (!is_crtc_enabled) {
10650                 plane_state->visible = visible = false;
10651                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10652         }
10653
10654         if (!was_visible && !visible)
10655                 return 0;
10656
10657         if (fb != old_plane_state->base.fb)
10658                 pipe_config->fb_changed = true;
10659
10660         turn_off = was_visible && (!visible || mode_changed);
10661         turn_on = visible && (!was_visible || mode_changed);
10662
10663         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10664                          intel_crtc->base.base.id, intel_crtc->base.name,
10665                          plane->base.base.id, plane->base.name,
10666                          fb ? fb->base.id : -1);
10667
10668         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10669                          plane->base.base.id, plane->base.name,
10670                          was_visible, visible,
10671                          turn_off, turn_on, mode_changed);
10672
10673         if (turn_on) {
10674                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10675                         pipe_config->update_wm_pre = true;
10676
10677                 /* must disable cxsr around plane enable/disable */
10678                 if (plane->id != PLANE_CURSOR)
10679                         pipe_config->disable_cxsr = true;
10680         } else if (turn_off) {
10681                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10682                         pipe_config->update_wm_post = true;
10683
10684                 /* must disable cxsr around plane enable/disable */
10685                 if (plane->id != PLANE_CURSOR)
10686                         pipe_config->disable_cxsr = true;
10687         } else if (intel_wm_need_update(&plane->base, plane_state)) {
10688                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10689                         /* FIXME bollocks */
10690                         pipe_config->update_wm_pre = true;
10691                         pipe_config->update_wm_post = true;
10692                 }
10693         }
10694
10695         if (visible || was_visible)
10696                 pipe_config->fb_bits |= plane->frontbuffer_bit;
10697
10698         /*
10699          * WaCxSRDisabledForSpriteScaling:ivb
10700          *
10701          * cstate->update_wm was already set above, so this flag will
10702          * take effect when we commit and program watermarks.
10703          */
10704         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10705             needs_scaling(to_intel_plane_state(plane_state)) &&
10706             !needs_scaling(old_plane_state))
10707                 pipe_config->disable_lp_wm = true;
10708
10709         return 0;
10710 }
10711
10712 static bool encoders_cloneable(const struct intel_encoder *a,
10713                                const struct intel_encoder *b)
10714 {
10715         /* masks could be asymmetric, so check both ways */
10716         return a == b || (a->cloneable & (1 << b->type) &&
10717                           b->cloneable & (1 << a->type));
10718 }
10719
10720 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10721                                          struct intel_crtc *crtc,
10722                                          struct intel_encoder *encoder)
10723 {
10724         struct intel_encoder *source_encoder;
10725         struct drm_connector *connector;
10726         struct drm_connector_state *connector_state;
10727         int i;
10728
10729         for_each_new_connector_in_state(state, connector, connector_state, i) {
10730                 if (connector_state->crtc != &crtc->base)
10731                         continue;
10732
10733                 source_encoder =
10734                         to_intel_encoder(connector_state->best_encoder);
10735                 if (!encoders_cloneable(encoder, source_encoder))
10736                         return false;
10737         }
10738
10739         return true;
10740 }
10741
10742 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10743                                    struct drm_crtc_state *crtc_state)
10744 {
10745         struct drm_device *dev = crtc->dev;
10746         struct drm_i915_private *dev_priv = to_i915(dev);
10747         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10748         struct intel_crtc_state *pipe_config =
10749                 to_intel_crtc_state(crtc_state);
10750         struct drm_atomic_state *state = crtc_state->state;
10751         int ret;
10752         bool mode_changed = needs_modeset(crtc_state);
10753
10754         if (mode_changed && !crtc_state->active)
10755                 pipe_config->update_wm_post = true;
10756
10757         if (mode_changed && crtc_state->enable &&
10758             dev_priv->display.crtc_compute_clock &&
10759             !WARN_ON(pipe_config->shared_dpll)) {
10760                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10761                                                            pipe_config);
10762                 if (ret)
10763                         return ret;
10764         }
10765
10766         if (crtc_state->color_mgmt_changed) {
10767                 ret = intel_color_check(crtc, crtc_state);
10768                 if (ret)
10769                         return ret;
10770
10771                 /*
10772                  * Changing color management on Intel hardware is
10773                  * handled as part of planes update.
10774                  */
10775                 crtc_state->planes_changed = true;
10776         }
10777
10778         ret = 0;
10779         if (dev_priv->display.compute_pipe_wm) {
10780                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
10781                 if (ret) {
10782                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10783                         return ret;
10784                 }
10785         }
10786
10787         if (dev_priv->display.compute_intermediate_wm &&
10788             !to_intel_atomic_state(state)->skip_intermediate_wm) {
10789                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10790                         return 0;
10791
10792                 /*
10793                  * Calculate 'intermediate' watermarks that satisfy both the
10794                  * old state and the new state.  We can program these
10795                  * immediately.
10796                  */
10797                 ret = dev_priv->display.compute_intermediate_wm(dev,
10798                                                                 intel_crtc,
10799                                                                 pipe_config);
10800                 if (ret) {
10801                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10802                         return ret;
10803                 }
10804         } else if (dev_priv->display.compute_intermediate_wm) {
10805                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10806                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10807         }
10808
10809         if (INTEL_GEN(dev_priv) >= 9) {
10810                 if (mode_changed)
10811                         ret = skl_update_scaler_crtc(pipe_config);
10812
10813                 if (!ret)
10814                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10815                                                             pipe_config);
10816                 if (!ret)
10817                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10818                                                          pipe_config);
10819         }
10820
10821         if (HAS_IPS(dev_priv))
10822                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10823
10824         return ret;
10825 }
10826
10827 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10828         .atomic_check = intel_crtc_atomic_check,
10829 };
10830
10831 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10832 {
10833         struct intel_connector *connector;
10834         struct drm_connector_list_iter conn_iter;
10835
10836         drm_connector_list_iter_begin(dev, &conn_iter);
10837         for_each_intel_connector_iter(connector, &conn_iter) {
10838                 if (connector->base.state->crtc)
10839                         drm_connector_put(&connector->base);
10840
10841                 if (connector->base.encoder) {
10842                         connector->base.state->best_encoder =
10843                                 connector->base.encoder;
10844                         connector->base.state->crtc =
10845                                 connector->base.encoder->crtc;
10846
10847                         drm_connector_get(&connector->base);
10848                 } else {
10849                         connector->base.state->best_encoder = NULL;
10850                         connector->base.state->crtc = NULL;
10851                 }
10852         }
10853         drm_connector_list_iter_end(&conn_iter);
10854 }
10855
10856 static void
10857 connected_sink_compute_bpp(struct intel_connector *connector,
10858                            struct intel_crtc_state *pipe_config)
10859 {
10860         const struct drm_display_info *info = &connector->base.display_info;
10861         int bpp = pipe_config->pipe_bpp;
10862
10863         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10864                       connector->base.base.id,
10865                       connector->base.name);
10866
10867         /* Don't use an invalid EDID bpc value */
10868         if (info->bpc != 0 && info->bpc * 3 < bpp) {
10869                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10870                               bpp, info->bpc * 3);
10871                 pipe_config->pipe_bpp = info->bpc * 3;
10872         }
10873
10874         /* Clamp bpp to 8 on screens without EDID 1.4 */
10875         if (info->bpc == 0 && bpp > 24) {
10876                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10877                               bpp);
10878                 pipe_config->pipe_bpp = 24;
10879         }
10880 }
10881
10882 static int
10883 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10884                           struct intel_crtc_state *pipe_config)
10885 {
10886         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10887         struct drm_atomic_state *state;
10888         struct drm_connector *connector;
10889         struct drm_connector_state *connector_state;
10890         int bpp, i;
10891
10892         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10893             IS_CHERRYVIEW(dev_priv)))
10894                 bpp = 10*3;
10895         else if (INTEL_GEN(dev_priv) >= 5)
10896                 bpp = 12*3;
10897         else
10898                 bpp = 8*3;
10899
10900
10901         pipe_config->pipe_bpp = bpp;
10902
10903         state = pipe_config->base.state;
10904
10905         /* Clamp display bpp to EDID value */
10906         for_each_new_connector_in_state(state, connector, connector_state, i) {
10907                 if (connector_state->crtc != &crtc->base)
10908                         continue;
10909
10910                 connected_sink_compute_bpp(to_intel_connector(connector),
10911                                            pipe_config);
10912         }
10913
10914         return bpp;
10915 }
10916
10917 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10918 {
10919         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10920                         "type: 0x%x flags: 0x%x\n",
10921                 mode->crtc_clock,
10922                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10923                 mode->crtc_hsync_end, mode->crtc_htotal,
10924                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10925                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10926 }
10927
10928 static inline void
10929 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10930                       unsigned int lane_count, struct intel_link_m_n *m_n)
10931 {
10932         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10933                       id, lane_count,
10934                       m_n->gmch_m, m_n->gmch_n,
10935                       m_n->link_m, m_n->link_n, m_n->tu);
10936 }
10937
10938 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10939
10940 static const char * const output_type_str[] = {
10941         OUTPUT_TYPE(UNUSED),
10942         OUTPUT_TYPE(ANALOG),
10943         OUTPUT_TYPE(DVO),
10944         OUTPUT_TYPE(SDVO),
10945         OUTPUT_TYPE(LVDS),
10946         OUTPUT_TYPE(TVOUT),
10947         OUTPUT_TYPE(HDMI),
10948         OUTPUT_TYPE(DP),
10949         OUTPUT_TYPE(EDP),
10950         OUTPUT_TYPE(DSI),
10951         OUTPUT_TYPE(DDI),
10952         OUTPUT_TYPE(DP_MST),
10953 };
10954
10955 #undef OUTPUT_TYPE
10956
10957 static void snprintf_output_types(char *buf, size_t len,
10958                                   unsigned int output_types)
10959 {
10960         char *str = buf;
10961         int i;
10962
10963         str[0] = '\0';
10964
10965         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10966                 int r;
10967
10968                 if ((output_types & BIT(i)) == 0)
10969                         continue;
10970
10971                 r = snprintf(str, len, "%s%s",
10972                              str != buf ? "," : "", output_type_str[i]);
10973                 if (r >= len)
10974                         break;
10975                 str += r;
10976                 len -= r;
10977
10978                 output_types &= ~BIT(i);
10979         }
10980
10981         WARN_ON_ONCE(output_types != 0);
10982 }
10983
10984 static void intel_dump_pipe_config(struct intel_crtc *crtc,
10985                                    struct intel_crtc_state *pipe_config,
10986                                    const char *context)
10987 {
10988         struct drm_device *dev = crtc->base.dev;
10989         struct drm_i915_private *dev_priv = to_i915(dev);
10990         struct drm_plane *plane;
10991         struct intel_plane *intel_plane;
10992         struct intel_plane_state *state;
10993         struct drm_framebuffer *fb;
10994         char buf[64];
10995
10996         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10997                       crtc->base.base.id, crtc->base.name, context);
10998
10999         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11000         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11001                       buf, pipe_config->output_types);
11002
11003         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11004                       transcoder_name(pipe_config->cpu_transcoder),
11005                       pipe_config->pipe_bpp, pipe_config->dither);
11006
11007         if (pipe_config->has_pch_encoder)
11008                 intel_dump_m_n_config(pipe_config, "fdi",
11009                                       pipe_config->fdi_lanes,
11010                                       &pipe_config->fdi_m_n);
11011
11012         if (pipe_config->ycbcr420)
11013                 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
11014
11015         if (intel_crtc_has_dp_encoder(pipe_config)) {
11016                 intel_dump_m_n_config(pipe_config, "dp m_n",
11017                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11018                 if (pipe_config->has_drrs)
11019                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11020                                               pipe_config->lane_count,
11021                                               &pipe_config->dp_m2_n2);
11022         }
11023
11024         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11025                       pipe_config->has_audio, pipe_config->has_infoframe);
11026
11027         DRM_DEBUG_KMS("requested mode:\n");
11028         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11029         DRM_DEBUG_KMS("adjusted mode:\n");
11030         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11031         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11032         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11033                       pipe_config->port_clock,
11034                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11035                       pipe_config->pixel_rate);
11036
11037         if (INTEL_GEN(dev_priv) >= 9)
11038                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11039                               crtc->num_scalers,
11040                               pipe_config->scaler_state.scaler_users,
11041                               pipe_config->scaler_state.scaler_id);
11042
11043         if (HAS_GMCH_DISPLAY(dev_priv))
11044                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11045                               pipe_config->gmch_pfit.control,
11046                               pipe_config->gmch_pfit.pgm_ratios,
11047                               pipe_config->gmch_pfit.lvds_border_bits);
11048         else
11049                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11050                               pipe_config->pch_pfit.pos,
11051                               pipe_config->pch_pfit.size,
11052                               enableddisabled(pipe_config->pch_pfit.enabled));
11053
11054         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11055                       pipe_config->ips_enabled, pipe_config->double_wide);
11056
11057         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11058
11059         DRM_DEBUG_KMS("planes on this crtc\n");
11060         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11061                 struct drm_format_name_buf format_name;
11062                 intel_plane = to_intel_plane(plane);
11063                 if (intel_plane->pipe != crtc->pipe)
11064                         continue;
11065
11066                 state = to_intel_plane_state(plane->state);
11067                 fb = state->base.fb;
11068                 if (!fb) {
11069                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11070                                       plane->base.id, plane->name, state->scaler_id);
11071                         continue;
11072                 }
11073
11074                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11075                               plane->base.id, plane->name,
11076                               fb->base.id, fb->width, fb->height,
11077                               drm_get_format_name(fb->format->format, &format_name));
11078                 if (INTEL_GEN(dev_priv) >= 9)
11079                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11080                                       state->scaler_id,
11081                                       state->base.src.x1 >> 16,
11082                                       state->base.src.y1 >> 16,
11083                                       drm_rect_width(&state->base.src) >> 16,
11084                                       drm_rect_height(&state->base.src) >> 16,
11085                                       state->base.dst.x1, state->base.dst.y1,
11086                                       drm_rect_width(&state->base.dst),
11087                                       drm_rect_height(&state->base.dst));
11088         }
11089 }
11090
11091 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11092 {
11093         struct drm_device *dev = state->dev;
11094         struct drm_connector *connector;
11095         struct drm_connector_list_iter conn_iter;
11096         unsigned int used_ports = 0;
11097         unsigned int used_mst_ports = 0;
11098         bool ret = true;
11099
11100         /*
11101          * Walk the connector list instead of the encoder
11102          * list to detect the problem on ddi platforms
11103          * where there's just one encoder per digital port.
11104          */
11105         drm_connector_list_iter_begin(dev, &conn_iter);
11106         drm_for_each_connector_iter(connector, &conn_iter) {
11107                 struct drm_connector_state *connector_state;
11108                 struct intel_encoder *encoder;
11109
11110                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11111                 if (!connector_state)
11112                         connector_state = connector->state;
11113
11114                 if (!connector_state->best_encoder)
11115                         continue;
11116
11117                 encoder = to_intel_encoder(connector_state->best_encoder);
11118
11119                 WARN_ON(!connector_state->crtc);
11120
11121                 switch (encoder->type) {
11122                         unsigned int port_mask;
11123                 case INTEL_OUTPUT_DDI:
11124                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11125                                 break;
11126                         /* else: fall through */
11127                 case INTEL_OUTPUT_DP:
11128                 case INTEL_OUTPUT_HDMI:
11129                 case INTEL_OUTPUT_EDP:
11130                         port_mask = 1 << encoder->port;
11131
11132                         /* the same port mustn't appear more than once */
11133                         if (used_ports & port_mask)
11134                                 ret = false;
11135
11136                         used_ports |= port_mask;
11137                         break;
11138                 case INTEL_OUTPUT_DP_MST:
11139                         used_mst_ports |=
11140                                 1 << encoder->port;
11141                         break;
11142                 default:
11143                         break;
11144                 }
11145         }
11146         drm_connector_list_iter_end(&conn_iter);
11147
11148         /* can't mix MST and SST/HDMI on the same port */
11149         if (used_ports & used_mst_ports)
11150                 return false;
11151
11152         return ret;
11153 }
11154
11155 static void
11156 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11157 {
11158         struct drm_i915_private *dev_priv =
11159                 to_i915(crtc_state->base.crtc->dev);
11160         struct intel_crtc_scaler_state scaler_state;
11161         struct intel_dpll_hw_state dpll_hw_state;
11162         struct intel_shared_dpll *shared_dpll;
11163         struct intel_crtc_wm_state wm_state;
11164         bool force_thru, ips_force_disable;
11165
11166         /* FIXME: before the switch to atomic started, a new pipe_config was
11167          * kzalloc'd. Code that depends on any field being zero should be
11168          * fixed, so that the crtc_state can be safely duplicated. For now,
11169          * only fields that are know to not cause problems are preserved. */
11170
11171         scaler_state = crtc_state->scaler_state;
11172         shared_dpll = crtc_state->shared_dpll;
11173         dpll_hw_state = crtc_state->dpll_hw_state;
11174         force_thru = crtc_state->pch_pfit.force_thru;
11175         ips_force_disable = crtc_state->ips_force_disable;
11176         if (IS_G4X(dev_priv) ||
11177             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11178                 wm_state = crtc_state->wm;
11179
11180         /* Keep base drm_crtc_state intact, only clear our extended struct */
11181         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11182         memset(&crtc_state->base + 1, 0,
11183                sizeof(*crtc_state) - sizeof(crtc_state->base));
11184
11185         crtc_state->scaler_state = scaler_state;
11186         crtc_state->shared_dpll = shared_dpll;
11187         crtc_state->dpll_hw_state = dpll_hw_state;
11188         crtc_state->pch_pfit.force_thru = force_thru;
11189         crtc_state->ips_force_disable = ips_force_disable;
11190         if (IS_G4X(dev_priv) ||
11191             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11192                 crtc_state->wm = wm_state;
11193 }
11194
11195 static int
11196 intel_modeset_pipe_config(struct drm_crtc *crtc,
11197                           struct intel_crtc_state *pipe_config)
11198 {
11199         struct drm_atomic_state *state = pipe_config->base.state;
11200         struct intel_encoder *encoder;
11201         struct drm_connector *connector;
11202         struct drm_connector_state *connector_state;
11203         int base_bpp, ret = -EINVAL;
11204         int i;
11205         bool retry = true;
11206
11207         clear_intel_crtc_state(pipe_config);
11208
11209         pipe_config->cpu_transcoder =
11210                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11211
11212         /*
11213          * Sanitize sync polarity flags based on requested ones. If neither
11214          * positive or negative polarity is requested, treat this as meaning
11215          * negative polarity.
11216          */
11217         if (!(pipe_config->base.adjusted_mode.flags &
11218               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11219                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11220
11221         if (!(pipe_config->base.adjusted_mode.flags &
11222               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11223                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11224
11225         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11226                                              pipe_config);
11227         if (base_bpp < 0)
11228                 goto fail;
11229
11230         /*
11231          * Determine the real pipe dimensions. Note that stereo modes can
11232          * increase the actual pipe size due to the frame doubling and
11233          * insertion of additional space for blanks between the frame. This
11234          * is stored in the crtc timings. We use the requested mode to do this
11235          * computation to clearly distinguish it from the adjusted mode, which
11236          * can be changed by the connectors in the below retry loop.
11237          */
11238         drm_mode_get_hv_timing(&pipe_config->base.mode,
11239                                &pipe_config->pipe_src_w,
11240                                &pipe_config->pipe_src_h);
11241
11242         for_each_new_connector_in_state(state, connector, connector_state, i) {
11243                 if (connector_state->crtc != crtc)
11244                         continue;
11245
11246                 encoder = to_intel_encoder(connector_state->best_encoder);
11247
11248                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11249                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11250                         goto fail;
11251                 }
11252
11253                 /*
11254                  * Determine output_types before calling the .compute_config()
11255                  * hooks so that the hooks can use this information safely.
11256                  */
11257                 if (encoder->compute_output_type)
11258                         pipe_config->output_types |=
11259                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11260                                                                  connector_state));
11261                 else
11262                         pipe_config->output_types |= BIT(encoder->type);
11263         }
11264
11265 encoder_retry:
11266         /* Ensure the port clock defaults are reset when retrying. */
11267         pipe_config->port_clock = 0;
11268         pipe_config->pixel_multiplier = 1;
11269
11270         /* Fill in default crtc timings, allow encoders to overwrite them. */
11271         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11272                               CRTC_STEREO_DOUBLE);
11273
11274         /* Pass our mode to the connectors and the CRTC to give them a chance to
11275          * adjust it according to limitations or connector properties, and also
11276          * a chance to reject the mode entirely.
11277          */
11278         for_each_new_connector_in_state(state, connector, connector_state, i) {
11279                 if (connector_state->crtc != crtc)
11280                         continue;
11281
11282                 encoder = to_intel_encoder(connector_state->best_encoder);
11283
11284                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11285                         DRM_DEBUG_KMS("Encoder config failure\n");
11286                         goto fail;
11287                 }
11288         }
11289
11290         /* Set default port clock if not overwritten by the encoder. Needs to be
11291          * done afterwards in case the encoder adjusts the mode. */
11292         if (!pipe_config->port_clock)
11293                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11294                         * pipe_config->pixel_multiplier;
11295
11296         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11297         if (ret < 0) {
11298                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11299                 goto fail;
11300         }
11301
11302         if (ret == RETRY) {
11303                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11304                         ret = -EINVAL;
11305                         goto fail;
11306                 }
11307
11308                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11309                 retry = false;
11310                 goto encoder_retry;
11311         }
11312
11313         /* Dithering seems to not pass-through bits correctly when it should, so
11314          * only enable it on 6bpc panels and when its not a compliance
11315          * test requesting 6bpc video pattern.
11316          */
11317         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11318                 !pipe_config->dither_force_disable;
11319         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11320                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11321
11322 fail:
11323         return ret;
11324 }
11325
11326 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11327 {
11328         int diff;
11329
11330         if (clock1 == clock2)
11331                 return true;
11332
11333         if (!clock1 || !clock2)
11334                 return false;
11335
11336         diff = abs(clock1 - clock2);
11337
11338         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11339                 return true;
11340
11341         return false;
11342 }
11343
11344 static bool
11345 intel_compare_m_n(unsigned int m, unsigned int n,
11346                   unsigned int m2, unsigned int n2,
11347                   bool exact)
11348 {
11349         if (m == m2 && n == n2)
11350                 return true;
11351
11352         if (exact || !m || !n || !m2 || !n2)
11353                 return false;
11354
11355         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11356
11357         if (n > n2) {
11358                 while (n > n2) {
11359                         m2 <<= 1;
11360                         n2 <<= 1;
11361                 }
11362         } else if (n < n2) {
11363                 while (n < n2) {
11364                         m <<= 1;
11365                         n <<= 1;
11366                 }
11367         }
11368
11369         if (n != n2)
11370                 return false;
11371
11372         return intel_fuzzy_clock_check(m, m2);
11373 }
11374
11375 static bool
11376 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11377                        struct intel_link_m_n *m2_n2,
11378                        bool adjust)
11379 {
11380         if (m_n->tu == m2_n2->tu &&
11381             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11382                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11383             intel_compare_m_n(m_n->link_m, m_n->link_n,
11384                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11385                 if (adjust)
11386                         *m2_n2 = *m_n;
11387
11388                 return true;
11389         }
11390
11391         return false;
11392 }
11393
11394 static void __printf(3, 4)
11395 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11396 {
11397         struct va_format vaf;
11398         va_list args;
11399
11400         va_start(args, format);
11401         vaf.fmt = format;
11402         vaf.va = &args;
11403
11404         if (adjust)
11405                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11406         else
11407                 drm_err("mismatch in %s %pV", name, &vaf);
11408
11409         va_end(args);
11410 }
11411
11412 static bool
11413 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11414                           struct intel_crtc_state *current_config,
11415                           struct intel_crtc_state *pipe_config,
11416                           bool adjust)
11417 {
11418         bool ret = true;
11419         bool fixup_inherited = adjust &&
11420                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11421                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11422
11423 #define PIPE_CONF_CHECK_X(name) do { \
11424         if (current_config->name != pipe_config->name) { \
11425                 pipe_config_err(adjust, __stringify(name), \
11426                           "(expected 0x%08x, found 0x%08x)\n", \
11427                           current_config->name, \
11428                           pipe_config->name); \
11429                 ret = false; \
11430         } \
11431 } while (0)
11432
11433 #define PIPE_CONF_CHECK_I(name) do { \
11434         if (current_config->name != pipe_config->name) { \
11435                 pipe_config_err(adjust, __stringify(name), \
11436                           "(expected %i, found %i)\n", \
11437                           current_config->name, \
11438                           pipe_config->name); \
11439                 ret = false; \
11440         } \
11441 } while (0)
11442
11443 #define PIPE_CONF_CHECK_BOOL(name) do { \
11444         if (current_config->name != pipe_config->name) { \
11445                 pipe_config_err(adjust, __stringify(name), \
11446                           "(expected %s, found %s)\n", \
11447                           yesno(current_config->name), \
11448                           yesno(pipe_config->name)); \
11449                 ret = false; \
11450         } \
11451 } while (0)
11452
11453 /*
11454  * Checks state where we only read out the enabling, but not the entire
11455  * state itself (like full infoframes or ELD for audio). These states
11456  * require a full modeset on bootup to fix up.
11457  */
11458 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11459         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11460                 PIPE_CONF_CHECK_BOOL(name); \
11461         } else { \
11462                 pipe_config_err(adjust, __stringify(name), \
11463                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11464                           yesno(current_config->name), \
11465                           yesno(pipe_config->name)); \
11466                 ret = false; \
11467         } \
11468 } while (0)
11469
11470 #define PIPE_CONF_CHECK_P(name) do { \
11471         if (current_config->name != pipe_config->name) { \
11472                 pipe_config_err(adjust, __stringify(name), \
11473                           "(expected %p, found %p)\n", \
11474                           current_config->name, \
11475                           pipe_config->name); \
11476                 ret = false; \
11477         } \
11478 } while (0)
11479
11480 #define PIPE_CONF_CHECK_M_N(name) do { \
11481         if (!intel_compare_link_m_n(&current_config->name, \
11482                                     &pipe_config->name,\
11483                                     adjust)) { \
11484                 pipe_config_err(adjust, __stringify(name), \
11485                           "(expected tu %i gmch %i/%i link %i/%i, " \
11486                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11487                           current_config->name.tu, \
11488                           current_config->name.gmch_m, \
11489                           current_config->name.gmch_n, \
11490                           current_config->name.link_m, \
11491                           current_config->name.link_n, \
11492                           pipe_config->name.tu, \
11493                           pipe_config->name.gmch_m, \
11494                           pipe_config->name.gmch_n, \
11495                           pipe_config->name.link_m, \
11496                           pipe_config->name.link_n); \
11497                 ret = false; \
11498         } \
11499 } while (0)
11500
11501 /* This is required for BDW+ where there is only one set of registers for
11502  * switching between high and low RR.
11503  * This macro can be used whenever a comparison has to be made between one
11504  * hw state and multiple sw state variables.
11505  */
11506 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11507         if (!intel_compare_link_m_n(&current_config->name, \
11508                                     &pipe_config->name, adjust) && \
11509             !intel_compare_link_m_n(&current_config->alt_name, \
11510                                     &pipe_config->name, adjust)) { \
11511                 pipe_config_err(adjust, __stringify(name), \
11512                           "(expected tu %i gmch %i/%i link %i/%i, " \
11513                           "or tu %i gmch %i/%i link %i/%i, " \
11514                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11515                           current_config->name.tu, \
11516                           current_config->name.gmch_m, \
11517                           current_config->name.gmch_n, \
11518                           current_config->name.link_m, \
11519                           current_config->name.link_n, \
11520                           current_config->alt_name.tu, \
11521                           current_config->alt_name.gmch_m, \
11522                           current_config->alt_name.gmch_n, \
11523                           current_config->alt_name.link_m, \
11524                           current_config->alt_name.link_n, \
11525                           pipe_config->name.tu, \
11526                           pipe_config->name.gmch_m, \
11527                           pipe_config->name.gmch_n, \
11528                           pipe_config->name.link_m, \
11529                           pipe_config->name.link_n); \
11530                 ret = false; \
11531         } \
11532 } while (0)
11533
11534 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11535         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11536                 pipe_config_err(adjust, __stringify(name), \
11537                           "(%x) (expected %i, found %i)\n", \
11538                           (mask), \
11539                           current_config->name & (mask), \
11540                           pipe_config->name & (mask)); \
11541                 ret = false; \
11542         } \
11543 } while (0)
11544
11545 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
11546         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11547                 pipe_config_err(adjust, __stringify(name), \
11548                           "(expected %i, found %i)\n", \
11549                           current_config->name, \
11550                           pipe_config->name); \
11551                 ret = false; \
11552         } \
11553 } while (0)
11554
11555 #define PIPE_CONF_QUIRK(quirk)  \
11556         ((current_config->quirks | pipe_config->quirks) & (quirk))
11557
11558         PIPE_CONF_CHECK_I(cpu_transcoder);
11559
11560         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11561         PIPE_CONF_CHECK_I(fdi_lanes);
11562         PIPE_CONF_CHECK_M_N(fdi_m_n);
11563
11564         PIPE_CONF_CHECK_I(lane_count);
11565         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11566
11567         if (INTEL_GEN(dev_priv) < 8) {
11568                 PIPE_CONF_CHECK_M_N(dp_m_n);
11569
11570                 if (current_config->has_drrs)
11571                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11572         } else
11573                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11574
11575         PIPE_CONF_CHECK_X(output_types);
11576
11577         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11578         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11579         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11580         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11581         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11582         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11583
11584         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11585         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11586         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11587         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11588         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11589         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11590
11591         PIPE_CONF_CHECK_I(pixel_multiplier);
11592         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11593         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11594             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11595                 PIPE_CONF_CHECK_BOOL(limited_color_range);
11596
11597         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11598         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11599         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11600         PIPE_CONF_CHECK_BOOL(ycbcr420);
11601
11602         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11603
11604         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11605                               DRM_MODE_FLAG_INTERLACE);
11606
11607         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11608                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11609                                       DRM_MODE_FLAG_PHSYNC);
11610                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11611                                       DRM_MODE_FLAG_NHSYNC);
11612                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11613                                       DRM_MODE_FLAG_PVSYNC);
11614                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11615                                       DRM_MODE_FLAG_NVSYNC);
11616         }
11617
11618         PIPE_CONF_CHECK_X(gmch_pfit.control);
11619         /* pfit ratios are autocomputed by the hw on gen4+ */
11620         if (INTEL_GEN(dev_priv) < 4)
11621                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11622         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11623
11624         if (!adjust) {
11625                 PIPE_CONF_CHECK_I(pipe_src_w);
11626                 PIPE_CONF_CHECK_I(pipe_src_h);
11627
11628                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11629                 if (current_config->pch_pfit.enabled) {
11630                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11631                         PIPE_CONF_CHECK_X(pch_pfit.size);
11632                 }
11633
11634                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11635                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11636         }
11637
11638         PIPE_CONF_CHECK_BOOL(double_wide);
11639
11640         PIPE_CONF_CHECK_P(shared_dpll);
11641         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11642         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11643         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11644         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11645         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11646         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11647         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11648         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11649         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11650         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11651         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11652         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11653         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11654         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11655         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11656         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11657         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11658         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11659         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11660         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11661         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11662         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11663         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11664         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11665         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11666         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11667         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11668         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11669         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11670         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11671         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
11672
11673         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11674         PIPE_CONF_CHECK_X(dsi_pll.div);
11675
11676         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11677                 PIPE_CONF_CHECK_I(pipe_bpp);
11678
11679         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11680         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11681
11682         PIPE_CONF_CHECK_I(min_voltage_level);
11683
11684 #undef PIPE_CONF_CHECK_X
11685 #undef PIPE_CONF_CHECK_I
11686 #undef PIPE_CONF_CHECK_BOOL
11687 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11688 #undef PIPE_CONF_CHECK_P
11689 #undef PIPE_CONF_CHECK_FLAGS
11690 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11691 #undef PIPE_CONF_QUIRK
11692
11693         return ret;
11694 }
11695
11696 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11697                                            const struct intel_crtc_state *pipe_config)
11698 {
11699         if (pipe_config->has_pch_encoder) {
11700                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11701                                                             &pipe_config->fdi_m_n);
11702                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11703
11704                 /*
11705                  * FDI already provided one idea for the dotclock.
11706                  * Yell if the encoder disagrees.
11707                  */
11708                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11709                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11710                      fdi_dotclock, dotclock);
11711         }
11712 }
11713
11714 static void verify_wm_state(struct drm_crtc *crtc,
11715                             struct drm_crtc_state *new_state)
11716 {
11717         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11718         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11719         struct skl_pipe_wm hw_wm, *sw_wm;
11720         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11721         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11722         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11723         const enum pipe pipe = intel_crtc->pipe;
11724         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11725
11726         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11727                 return;
11728
11729         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11730         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11731
11732         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11733         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11734
11735         if (INTEL_GEN(dev_priv) >= 11)
11736                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11737                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11738                                   sw_ddb->enabled_slices,
11739                                   hw_ddb.enabled_slices);
11740         /* planes */
11741         for_each_universal_plane(dev_priv, pipe, plane) {
11742                 hw_plane_wm = &hw_wm.planes[plane];
11743                 sw_plane_wm = &sw_wm->planes[plane];
11744
11745                 /* Watermarks */
11746                 for (level = 0; level <= max_level; level++) {
11747                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11748                                                 &sw_plane_wm->wm[level]))
11749                                 continue;
11750
11751                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11752                                   pipe_name(pipe), plane + 1, level,
11753                                   sw_plane_wm->wm[level].plane_en,
11754                                   sw_plane_wm->wm[level].plane_res_b,
11755                                   sw_plane_wm->wm[level].plane_res_l,
11756                                   hw_plane_wm->wm[level].plane_en,
11757                                   hw_plane_wm->wm[level].plane_res_b,
11758                                   hw_plane_wm->wm[level].plane_res_l);
11759                 }
11760
11761                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11762                                          &sw_plane_wm->trans_wm)) {
11763                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11764                                   pipe_name(pipe), plane + 1,
11765                                   sw_plane_wm->trans_wm.plane_en,
11766                                   sw_plane_wm->trans_wm.plane_res_b,
11767                                   sw_plane_wm->trans_wm.plane_res_l,
11768                                   hw_plane_wm->trans_wm.plane_en,
11769                                   hw_plane_wm->trans_wm.plane_res_b,
11770                                   hw_plane_wm->trans_wm.plane_res_l);
11771                 }
11772
11773                 /* DDB */
11774                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11775                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11776
11777                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11778                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11779                                   pipe_name(pipe), plane + 1,
11780                                   sw_ddb_entry->start, sw_ddb_entry->end,
11781                                   hw_ddb_entry->start, hw_ddb_entry->end);
11782                 }
11783         }
11784
11785         /*
11786          * cursor
11787          * If the cursor plane isn't active, we may not have updated it's ddb
11788          * allocation. In that case since the ddb allocation will be updated
11789          * once the plane becomes visible, we can skip this check
11790          */
11791         if (1) {
11792                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11793                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11794
11795                 /* Watermarks */
11796                 for (level = 0; level <= max_level; level++) {
11797                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11798                                                 &sw_plane_wm->wm[level]))
11799                                 continue;
11800
11801                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11802                                   pipe_name(pipe), level,
11803                                   sw_plane_wm->wm[level].plane_en,
11804                                   sw_plane_wm->wm[level].plane_res_b,
11805                                   sw_plane_wm->wm[level].plane_res_l,
11806                                   hw_plane_wm->wm[level].plane_en,
11807                                   hw_plane_wm->wm[level].plane_res_b,
11808                                   hw_plane_wm->wm[level].plane_res_l);
11809                 }
11810
11811                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11812                                          &sw_plane_wm->trans_wm)) {
11813                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11814                                   pipe_name(pipe),
11815                                   sw_plane_wm->trans_wm.plane_en,
11816                                   sw_plane_wm->trans_wm.plane_res_b,
11817                                   sw_plane_wm->trans_wm.plane_res_l,
11818                                   hw_plane_wm->trans_wm.plane_en,
11819                                   hw_plane_wm->trans_wm.plane_res_b,
11820                                   hw_plane_wm->trans_wm.plane_res_l);
11821                 }
11822
11823                 /* DDB */
11824                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11825                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11826
11827                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11828                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11829                                   pipe_name(pipe),
11830                                   sw_ddb_entry->start, sw_ddb_entry->end,
11831                                   hw_ddb_entry->start, hw_ddb_entry->end);
11832                 }
11833         }
11834 }
11835
11836 static void
11837 verify_connector_state(struct drm_device *dev,
11838                        struct drm_atomic_state *state,
11839                        struct drm_crtc *crtc)
11840 {
11841         struct drm_connector *connector;
11842         struct drm_connector_state *new_conn_state;
11843         int i;
11844
11845         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11846                 struct drm_encoder *encoder = connector->encoder;
11847                 struct drm_crtc_state *crtc_state = NULL;
11848
11849                 if (new_conn_state->crtc != crtc)
11850                         continue;
11851
11852                 if (crtc)
11853                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11854
11855                 intel_connector_verify_state(crtc_state, new_conn_state);
11856
11857                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11858                      "connector's atomic encoder doesn't match legacy encoder\n");
11859         }
11860 }
11861
11862 static void
11863 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11864 {
11865         struct intel_encoder *encoder;
11866         struct drm_connector *connector;
11867         struct drm_connector_state *old_conn_state, *new_conn_state;
11868         int i;
11869
11870         for_each_intel_encoder(dev, encoder) {
11871                 bool enabled = false, found = false;
11872                 enum pipe pipe;
11873
11874                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11875                               encoder->base.base.id,
11876                               encoder->base.name);
11877
11878                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11879                                                    new_conn_state, i) {
11880                         if (old_conn_state->best_encoder == &encoder->base)
11881                                 found = true;
11882
11883                         if (new_conn_state->best_encoder != &encoder->base)
11884                                 continue;
11885                         found = enabled = true;
11886
11887                         I915_STATE_WARN(new_conn_state->crtc !=
11888                                         encoder->base.crtc,
11889                              "connector's crtc doesn't match encoder crtc\n");
11890                 }
11891
11892                 if (!found)
11893                         continue;
11894
11895                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
11896                      "encoder's enabled state mismatch "
11897                      "(expected %i, found %i)\n",
11898                      !!encoder->base.crtc, enabled);
11899
11900                 if (!encoder->base.crtc) {
11901                         bool active;
11902
11903                         active = encoder->get_hw_state(encoder, &pipe);
11904                         I915_STATE_WARN(active,
11905                              "encoder detached but still enabled on pipe %c.\n",
11906                              pipe_name(pipe));
11907                 }
11908         }
11909 }
11910
11911 static void
11912 verify_crtc_state(struct drm_crtc *crtc,
11913                   struct drm_crtc_state *old_crtc_state,
11914                   struct drm_crtc_state *new_crtc_state)
11915 {
11916         struct drm_device *dev = crtc->dev;
11917         struct drm_i915_private *dev_priv = to_i915(dev);
11918         struct intel_encoder *encoder;
11919         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11920         struct intel_crtc_state *pipe_config, *sw_config;
11921         struct drm_atomic_state *old_state;
11922         bool active;
11923
11924         old_state = old_crtc_state->state;
11925         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11926         pipe_config = to_intel_crtc_state(old_crtc_state);
11927         memset(pipe_config, 0, sizeof(*pipe_config));
11928         pipe_config->base.crtc = crtc;
11929         pipe_config->base.state = old_state;
11930
11931         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11932
11933         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11934
11935         /* we keep both pipes enabled on 830 */
11936         if (IS_I830(dev_priv))
11937                 active = new_crtc_state->active;
11938
11939         I915_STATE_WARN(new_crtc_state->active != active,
11940              "crtc active state doesn't match with hw state "
11941              "(expected %i, found %i)\n", new_crtc_state->active, active);
11942
11943         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11944              "transitional active state does not match atomic hw state "
11945              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11946
11947         for_each_encoder_on_crtc(dev, crtc, encoder) {
11948                 enum pipe pipe;
11949
11950                 active = encoder->get_hw_state(encoder, &pipe);
11951                 I915_STATE_WARN(active != new_crtc_state->active,
11952                         "[ENCODER:%i] active %i with crtc active %i\n",
11953                         encoder->base.base.id, active, new_crtc_state->active);
11954
11955                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11956                                 "Encoder connected to wrong pipe %c\n",
11957                                 pipe_name(pipe));
11958
11959                 if (active)
11960                         encoder->get_config(encoder, pipe_config);
11961         }
11962
11963         intel_crtc_compute_pixel_rate(pipe_config);
11964
11965         if (!new_crtc_state->active)
11966                 return;
11967
11968         intel_pipe_config_sanity_check(dev_priv, pipe_config);
11969
11970         sw_config = to_intel_crtc_state(new_crtc_state);
11971         if (!intel_pipe_config_compare(dev_priv, sw_config,
11972                                        pipe_config, false)) {
11973                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11974                 intel_dump_pipe_config(intel_crtc, pipe_config,
11975                                        "[hw state]");
11976                 intel_dump_pipe_config(intel_crtc, sw_config,
11977                                        "[sw state]");
11978         }
11979 }
11980
11981 static void
11982 intel_verify_planes(struct intel_atomic_state *state)
11983 {
11984         struct intel_plane *plane;
11985         const struct intel_plane_state *plane_state;
11986         int i;
11987
11988         for_each_new_intel_plane_in_state(state, plane,
11989                                           plane_state, i)
11990                 assert_plane(plane, plane_state->base.visible);
11991 }
11992
11993 static void
11994 verify_single_dpll_state(struct drm_i915_private *dev_priv,
11995                          struct intel_shared_dpll *pll,
11996                          struct drm_crtc *crtc,
11997                          struct drm_crtc_state *new_state)
11998 {
11999         struct intel_dpll_hw_state dpll_hw_state;
12000         unsigned int crtc_mask;
12001         bool active;
12002
12003         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12004
12005         DRM_DEBUG_KMS("%s\n", pll->info->name);
12006
12007         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12008
12009         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12010                 I915_STATE_WARN(!pll->on && pll->active_mask,
12011                      "pll in active use but not on in sw tracking\n");
12012                 I915_STATE_WARN(pll->on && !pll->active_mask,
12013                      "pll is on but not used by any active crtc\n");
12014                 I915_STATE_WARN(pll->on != active,
12015                      "pll on state mismatch (expected %i, found %i)\n",
12016                      pll->on, active);
12017         }
12018
12019         if (!crtc) {
12020                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12021                                 "more active pll users than references: %x vs %x\n",
12022                                 pll->active_mask, pll->state.crtc_mask);
12023
12024                 return;
12025         }
12026
12027         crtc_mask = drm_crtc_mask(crtc);
12028
12029         if (new_state->active)
12030                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12031                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12032                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12033         else
12034                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12035                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12036                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12037
12038         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12039                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12040                         crtc_mask, pll->state.crtc_mask);
12041
12042         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12043                                           &dpll_hw_state,
12044                                           sizeof(dpll_hw_state)),
12045                         "pll hw state mismatch\n");
12046 }
12047
12048 static void
12049 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12050                          struct drm_crtc_state *old_crtc_state,
12051                          struct drm_crtc_state *new_crtc_state)
12052 {
12053         struct drm_i915_private *dev_priv = to_i915(dev);
12054         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12055         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12056
12057         if (new_state->shared_dpll)
12058                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12059
12060         if (old_state->shared_dpll &&
12061             old_state->shared_dpll != new_state->shared_dpll) {
12062                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12063                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12064
12065                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12066                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12067                                 pipe_name(drm_crtc_index(crtc)));
12068                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12069                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12070                                 pipe_name(drm_crtc_index(crtc)));
12071         }
12072 }
12073
12074 static void
12075 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12076                           struct drm_atomic_state *state,
12077                           struct drm_crtc_state *old_state,
12078                           struct drm_crtc_state *new_state)
12079 {
12080         if (!needs_modeset(new_state) &&
12081             !to_intel_crtc_state(new_state)->update_pipe)
12082                 return;
12083
12084         verify_wm_state(crtc, new_state);
12085         verify_connector_state(crtc->dev, state, crtc);
12086         verify_crtc_state(crtc, old_state, new_state);
12087         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12088 }
12089
12090 static void
12091 verify_disabled_dpll_state(struct drm_device *dev)
12092 {
12093         struct drm_i915_private *dev_priv = to_i915(dev);
12094         int i;
12095
12096         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12097                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12098 }
12099
12100 static void
12101 intel_modeset_verify_disabled(struct drm_device *dev,
12102                               struct drm_atomic_state *state)
12103 {
12104         verify_encoder_state(dev, state);
12105         verify_connector_state(dev, state, NULL);
12106         verify_disabled_dpll_state(dev);
12107 }
12108
12109 static void update_scanline_offset(struct intel_crtc *crtc)
12110 {
12111         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12112
12113         /*
12114          * The scanline counter increments at the leading edge of hsync.
12115          *
12116          * On most platforms it starts counting from vtotal-1 on the
12117          * first active line. That means the scanline counter value is
12118          * always one less than what we would expect. Ie. just after
12119          * start of vblank, which also occurs at start of hsync (on the
12120          * last active line), the scanline counter will read vblank_start-1.
12121          *
12122          * On gen2 the scanline counter starts counting from 1 instead
12123          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12124          * to keep the value positive), instead of adding one.
12125          *
12126          * On HSW+ the behaviour of the scanline counter depends on the output
12127          * type. For DP ports it behaves like most other platforms, but on HDMI
12128          * there's an extra 1 line difference. So we need to add two instead of
12129          * one to the value.
12130          *
12131          * On VLV/CHV DSI the scanline counter would appear to increment
12132          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12133          * that means we can't tell whether we're in vblank or not while
12134          * we're on that particular line. We must still set scanline_offset
12135          * to 1 so that the vblank timestamps come out correct when we query
12136          * the scanline counter from within the vblank interrupt handler.
12137          * However if queried just before the start of vblank we'll get an
12138          * answer that's slightly in the future.
12139          */
12140         if (IS_GEN2(dev_priv)) {
12141                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12142                 int vtotal;
12143
12144                 vtotal = adjusted_mode->crtc_vtotal;
12145                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12146                         vtotal /= 2;
12147
12148                 crtc->scanline_offset = vtotal - 1;
12149         } else if (HAS_DDI(dev_priv) &&
12150                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12151                 crtc->scanline_offset = 2;
12152         } else
12153                 crtc->scanline_offset = 1;
12154 }
12155
12156 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12157 {
12158         struct drm_device *dev = state->dev;
12159         struct drm_i915_private *dev_priv = to_i915(dev);
12160         struct drm_crtc *crtc;
12161         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12162         int i;
12163
12164         if (!dev_priv->display.crtc_compute_clock)
12165                 return;
12166
12167         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12168                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12169                 struct intel_shared_dpll *old_dpll =
12170                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12171
12172                 if (!needs_modeset(new_crtc_state))
12173                         continue;
12174
12175                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12176
12177                 if (!old_dpll)
12178                         continue;
12179
12180                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12181         }
12182 }
12183
12184 /*
12185  * This implements the workaround described in the "notes" section of the mode
12186  * set sequence documentation. When going from no pipes or single pipe to
12187  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12188  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12189  */
12190 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12191 {
12192         struct drm_crtc_state *crtc_state;
12193         struct intel_crtc *intel_crtc;
12194         struct drm_crtc *crtc;
12195         struct intel_crtc_state *first_crtc_state = NULL;
12196         struct intel_crtc_state *other_crtc_state = NULL;
12197         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12198         int i;
12199
12200         /* look at all crtc's that are going to be enabled in during modeset */
12201         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12202                 intel_crtc = to_intel_crtc(crtc);
12203
12204                 if (!crtc_state->active || !needs_modeset(crtc_state))
12205                         continue;
12206
12207                 if (first_crtc_state) {
12208                         other_crtc_state = to_intel_crtc_state(crtc_state);
12209                         break;
12210                 } else {
12211                         first_crtc_state = to_intel_crtc_state(crtc_state);
12212                         first_pipe = intel_crtc->pipe;
12213                 }
12214         }
12215
12216         /* No workaround needed? */
12217         if (!first_crtc_state)
12218                 return 0;
12219
12220         /* w/a possibly needed, check how many crtc's are already enabled. */
12221         for_each_intel_crtc(state->dev, intel_crtc) {
12222                 struct intel_crtc_state *pipe_config;
12223
12224                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12225                 if (IS_ERR(pipe_config))
12226                         return PTR_ERR(pipe_config);
12227
12228                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12229
12230                 if (!pipe_config->base.active ||
12231                     needs_modeset(&pipe_config->base))
12232                         continue;
12233
12234                 /* 2 or more enabled crtcs means no need for w/a */
12235                 if (enabled_pipe != INVALID_PIPE)
12236                         return 0;
12237
12238                 enabled_pipe = intel_crtc->pipe;
12239         }
12240
12241         if (enabled_pipe != INVALID_PIPE)
12242                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12243         else if (other_crtc_state)
12244                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12245
12246         return 0;
12247 }
12248
12249 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12250 {
12251         struct drm_crtc *crtc;
12252
12253         /* Add all pipes to the state */
12254         for_each_crtc(state->dev, crtc) {
12255                 struct drm_crtc_state *crtc_state;
12256
12257                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12258                 if (IS_ERR(crtc_state))
12259                         return PTR_ERR(crtc_state);
12260         }
12261
12262         return 0;
12263 }
12264
12265 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12266 {
12267         struct drm_crtc *crtc;
12268
12269         /*
12270          * Add all pipes to the state, and force
12271          * a modeset on all the active ones.
12272          */
12273         for_each_crtc(state->dev, crtc) {
12274                 struct drm_crtc_state *crtc_state;
12275                 int ret;
12276
12277                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12278                 if (IS_ERR(crtc_state))
12279                         return PTR_ERR(crtc_state);
12280
12281                 if (!crtc_state->active || needs_modeset(crtc_state))
12282                         continue;
12283
12284                 crtc_state->mode_changed = true;
12285
12286                 ret = drm_atomic_add_affected_connectors(state, crtc);
12287                 if (ret)
12288                         return ret;
12289
12290                 ret = drm_atomic_add_affected_planes(state, crtc);
12291                 if (ret)
12292                         return ret;
12293         }
12294
12295         return 0;
12296 }
12297
12298 static int intel_modeset_checks(struct drm_atomic_state *state)
12299 {
12300         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12301         struct drm_i915_private *dev_priv = to_i915(state->dev);
12302         struct drm_crtc *crtc;
12303         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12304         int ret = 0, i;
12305
12306         if (!check_digital_port_conflicts(state)) {
12307                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12308                 return -EINVAL;
12309         }
12310
12311         intel_state->modeset = true;
12312         intel_state->active_crtcs = dev_priv->active_crtcs;
12313         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12314         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12315
12316         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12317                 if (new_crtc_state->active)
12318                         intel_state->active_crtcs |= 1 << i;
12319                 else
12320                         intel_state->active_crtcs &= ~(1 << i);
12321
12322                 if (old_crtc_state->active != new_crtc_state->active)
12323                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12324         }
12325
12326         /*
12327          * See if the config requires any additional preparation, e.g.
12328          * to adjust global state with pipes off.  We need to do this
12329          * here so we can get the modeset_pipe updated config for the new
12330          * mode set on this crtc.  For other crtcs we need to use the
12331          * adjusted_mode bits in the crtc directly.
12332          */
12333         if (dev_priv->display.modeset_calc_cdclk) {
12334                 ret = dev_priv->display.modeset_calc_cdclk(state);
12335                 if (ret < 0)
12336                         return ret;
12337
12338                 /*
12339                  * Writes to dev_priv->cdclk.logical must protected by
12340                  * holding all the crtc locks, even if we don't end up
12341                  * touching the hardware
12342                  */
12343                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12344                                         &intel_state->cdclk.logical)) {
12345                         ret = intel_lock_all_pipes(state);
12346                         if (ret < 0)
12347                                 return ret;
12348                 }
12349
12350                 /* All pipes must be switched off while we change the cdclk. */
12351                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12352                                               &intel_state->cdclk.actual)) {
12353                         ret = intel_modeset_all_pipes(state);
12354                         if (ret < 0)
12355                                 return ret;
12356                 }
12357
12358                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12359                               intel_state->cdclk.logical.cdclk,
12360                               intel_state->cdclk.actual.cdclk);
12361                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12362                               intel_state->cdclk.logical.voltage_level,
12363                               intel_state->cdclk.actual.voltage_level);
12364         } else {
12365                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12366         }
12367
12368         intel_modeset_clear_plls(state);
12369
12370         if (IS_HASWELL(dev_priv))
12371                 return haswell_mode_set_planes_workaround(state);
12372
12373         return 0;
12374 }
12375
12376 /*
12377  * Handle calculation of various watermark data at the end of the atomic check
12378  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12379  * handlers to ensure that all derived state has been updated.
12380  */
12381 static int calc_watermark_data(struct drm_atomic_state *state)
12382 {
12383         struct drm_device *dev = state->dev;
12384         struct drm_i915_private *dev_priv = to_i915(dev);
12385
12386         /* Is there platform-specific watermark information to calculate? */
12387         if (dev_priv->display.compute_global_watermarks)
12388                 return dev_priv->display.compute_global_watermarks(state);
12389
12390         return 0;
12391 }
12392
12393 /**
12394  * intel_atomic_check - validate state object
12395  * @dev: drm device
12396  * @state: state to validate
12397  */
12398 static int intel_atomic_check(struct drm_device *dev,
12399                               struct drm_atomic_state *state)
12400 {
12401         struct drm_i915_private *dev_priv = to_i915(dev);
12402         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12403         struct drm_crtc *crtc;
12404         struct drm_crtc_state *old_crtc_state, *crtc_state;
12405         int ret, i;
12406         bool any_ms = false;
12407
12408         /* Catch I915_MODE_FLAG_INHERITED */
12409         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12410                                       crtc_state, i) {
12411                 if (crtc_state->mode.private_flags !=
12412                     old_crtc_state->mode.private_flags)
12413                         crtc_state->mode_changed = true;
12414         }
12415
12416         ret = drm_atomic_helper_check_modeset(dev, state);
12417         if (ret)
12418                 return ret;
12419
12420         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12421                 struct intel_crtc_state *pipe_config =
12422                         to_intel_crtc_state(crtc_state);
12423
12424                 if (!needs_modeset(crtc_state))
12425                         continue;
12426
12427                 if (!crtc_state->enable) {
12428                         any_ms = true;
12429                         continue;
12430                 }
12431
12432                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12433                 if (ret) {
12434                         intel_dump_pipe_config(to_intel_crtc(crtc),
12435                                                pipe_config, "[failed]");
12436                         return ret;
12437                 }
12438
12439                 if (i915_modparams.fastboot &&
12440                     intel_pipe_config_compare(dev_priv,
12441                                         to_intel_crtc_state(old_crtc_state),
12442                                         pipe_config, true)) {
12443                         crtc_state->mode_changed = false;
12444                         pipe_config->update_pipe = true;
12445                 }
12446
12447                 if (needs_modeset(crtc_state))
12448                         any_ms = true;
12449
12450                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12451                                        needs_modeset(crtc_state) ?
12452                                        "[modeset]" : "[fastset]");
12453         }
12454
12455         if (any_ms) {
12456                 ret = intel_modeset_checks(state);
12457
12458                 if (ret)
12459                         return ret;
12460         } else {
12461                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12462         }
12463
12464         ret = drm_atomic_helper_check_planes(dev, state);
12465         if (ret)
12466                 return ret;
12467
12468         intel_fbc_choose_crtc(dev_priv, intel_state);
12469         return calc_watermark_data(state);
12470 }
12471
12472 static int intel_atomic_prepare_commit(struct drm_device *dev,
12473                                        struct drm_atomic_state *state)
12474 {
12475         return drm_atomic_helper_prepare_planes(dev, state);
12476 }
12477
12478 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12479 {
12480         struct drm_device *dev = crtc->base.dev;
12481
12482         if (!dev->max_vblank_count)
12483                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12484
12485         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12486 }
12487
12488 static void intel_update_crtc(struct drm_crtc *crtc,
12489                               struct drm_atomic_state *state,
12490                               struct drm_crtc_state *old_crtc_state,
12491                               struct drm_crtc_state *new_crtc_state)
12492 {
12493         struct drm_device *dev = crtc->dev;
12494         struct drm_i915_private *dev_priv = to_i915(dev);
12495         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12496         struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
12497         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12498         bool modeset = needs_modeset(new_crtc_state);
12499         struct intel_plane_state *new_plane_state =
12500                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12501                                                  to_intel_plane(crtc->primary));
12502
12503         if (modeset) {
12504                 update_scanline_offset(intel_crtc);
12505                 dev_priv->display.crtc_enable(pipe_config, state);
12506
12507                 /* vblanks work again, re-enable pipe CRC. */
12508                 intel_crtc_enable_pipe_crc(intel_crtc);
12509         } else {
12510                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12511                                        pipe_config);
12512         }
12513
12514         if (new_plane_state)
12515                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12516
12517         intel_begin_crtc_commit(crtc, old_crtc_state);
12518
12519         intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
12520                                     old_intel_cstate, pipe_config);
12521
12522         intel_finish_crtc_commit(crtc, old_crtc_state);
12523 }
12524
12525 static void intel_update_crtcs(struct drm_atomic_state *state)
12526 {
12527         struct drm_crtc *crtc;
12528         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12529         int i;
12530
12531         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12532                 if (!new_crtc_state->active)
12533                         continue;
12534
12535                 intel_update_crtc(crtc, state, old_crtc_state,
12536                                   new_crtc_state);
12537         }
12538 }
12539
12540 static void skl_update_crtcs(struct drm_atomic_state *state)
12541 {
12542         struct drm_i915_private *dev_priv = to_i915(state->dev);
12543         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12544         struct drm_crtc *crtc;
12545         struct intel_crtc *intel_crtc;
12546         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12547         struct intel_crtc_state *cstate;
12548         unsigned int updated = 0;
12549         bool progress;
12550         enum pipe pipe;
12551         int i;
12552         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12553         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12554
12555         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12556
12557         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12558                 /* ignore allocations for crtc's that have been turned off. */
12559                 if (new_crtc_state->active)
12560                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12561
12562         /* If 2nd DBuf slice required, enable it here */
12563         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12564                 icl_dbuf_slices_update(dev_priv, required_slices);
12565
12566         /*
12567          * Whenever the number of active pipes changes, we need to make sure we
12568          * update the pipes in the right order so that their ddb allocations
12569          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12570          * cause pipe underruns and other bad stuff.
12571          */
12572         do {
12573                 progress = false;
12574
12575                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12576                         bool vbl_wait = false;
12577                         unsigned int cmask = drm_crtc_mask(crtc);
12578
12579                         intel_crtc = to_intel_crtc(crtc);
12580                         cstate = to_intel_crtc_state(new_crtc_state);
12581                         pipe = intel_crtc->pipe;
12582
12583                         if (updated & cmask || !cstate->base.active)
12584                                 continue;
12585
12586                         if (skl_ddb_allocation_overlaps(dev_priv,
12587                                                         entries,
12588                                                         &cstate->wm.skl.ddb,
12589                                                         i))
12590                                 continue;
12591
12592                         updated |= cmask;
12593                         entries[i] = &cstate->wm.skl.ddb;
12594
12595                         /*
12596                          * If this is an already active pipe, it's DDB changed,
12597                          * and this isn't the last pipe that needs updating
12598                          * then we need to wait for a vblank to pass for the
12599                          * new ddb allocation to take effect.
12600                          */
12601                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12602                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12603                             !new_crtc_state->active_changed &&
12604                             intel_state->wm_results.dirty_pipes != updated)
12605                                 vbl_wait = true;
12606
12607                         intel_update_crtc(crtc, state, old_crtc_state,
12608                                           new_crtc_state);
12609
12610                         if (vbl_wait)
12611                                 intel_wait_for_vblank(dev_priv, pipe);
12612
12613                         progress = true;
12614                 }
12615         } while (progress);
12616
12617         /* If 2nd DBuf slice is no more required disable it */
12618         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12619                 icl_dbuf_slices_update(dev_priv, required_slices);
12620 }
12621
12622 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12623 {
12624         struct intel_atomic_state *state, *next;
12625         struct llist_node *freed;
12626
12627         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12628         llist_for_each_entry_safe(state, next, freed, freed)
12629                 drm_atomic_state_put(&state->base);
12630 }
12631
12632 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12633 {
12634         struct drm_i915_private *dev_priv =
12635                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12636
12637         intel_atomic_helper_free_state(dev_priv);
12638 }
12639
12640 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12641 {
12642         struct wait_queue_entry wait_fence, wait_reset;
12643         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12644
12645         init_wait_entry(&wait_fence, 0);
12646         init_wait_entry(&wait_reset, 0);
12647         for (;;) {
12648                 prepare_to_wait(&intel_state->commit_ready.wait,
12649                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12650                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12651                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12652
12653
12654                 if (i915_sw_fence_done(&intel_state->commit_ready)
12655                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12656                         break;
12657
12658                 schedule();
12659         }
12660         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12661         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12662 }
12663
12664 static void intel_atomic_cleanup_work(struct work_struct *work)
12665 {
12666         struct drm_atomic_state *state =
12667                 container_of(work, struct drm_atomic_state, commit_work);
12668         struct drm_i915_private *i915 = to_i915(state->dev);
12669
12670         drm_atomic_helper_cleanup_planes(&i915->drm, state);
12671         drm_atomic_helper_commit_cleanup_done(state);
12672         drm_atomic_state_put(state);
12673
12674         intel_atomic_helper_free_state(i915);
12675 }
12676
12677 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12678 {
12679         struct drm_device *dev = state->dev;
12680         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12681         struct drm_i915_private *dev_priv = to_i915(dev);
12682         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12683         struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
12684         struct drm_crtc *crtc;
12685         struct intel_crtc *intel_crtc;
12686         u64 put_domains[I915_MAX_PIPES] = {};
12687         int i;
12688
12689         intel_atomic_commit_fence_wait(intel_state);
12690
12691         drm_atomic_helper_wait_for_dependencies(state);
12692
12693         if (intel_state->modeset)
12694                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12695
12696         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12697                 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12698                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12699                 intel_crtc = to_intel_crtc(crtc);
12700
12701                 if (needs_modeset(new_crtc_state) ||
12702                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12703
12704                         put_domains[intel_crtc->pipe] =
12705                                 modeset_get_crtc_power_domains(crtc,
12706                                         new_intel_crtc_state);
12707                 }
12708
12709                 if (!needs_modeset(new_crtc_state))
12710                         continue;
12711
12712                 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
12713
12714                 if (old_crtc_state->active) {
12715                         intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
12716
12717                         /*
12718                          * We need to disable pipe CRC before disabling the pipe,
12719                          * or we race against vblank off.
12720                          */
12721                         intel_crtc_disable_pipe_crc(intel_crtc);
12722
12723                         dev_priv->display.crtc_disable(old_intel_crtc_state, state);
12724                         intel_crtc->active = false;
12725                         intel_fbc_disable(intel_crtc);
12726                         intel_disable_shared_dpll(intel_crtc);
12727
12728                         /*
12729                          * Underruns don't always raise
12730                          * interrupts, so check manually.
12731                          */
12732                         intel_check_cpu_fifo_underruns(dev_priv);
12733                         intel_check_pch_fifo_underruns(dev_priv);
12734
12735                         if (!new_crtc_state->active) {
12736                                 /*
12737                                  * Make sure we don't call initial_watermarks
12738                                  * for ILK-style watermark updates.
12739                                  *
12740                                  * No clue what this is supposed to achieve.
12741                                  */
12742                                 if (INTEL_GEN(dev_priv) >= 9)
12743                                         dev_priv->display.initial_watermarks(intel_state,
12744                                                                              new_intel_crtc_state);
12745                         }
12746                 }
12747         }
12748
12749         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12750         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12751                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12752
12753         if (intel_state->modeset) {
12754                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12755
12756                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12757
12758                 /*
12759                  * SKL workaround: bspec recommends we disable the SAGV when we
12760                  * have more then one pipe enabled
12761                  */
12762                 if (!intel_can_enable_sagv(state))
12763                         intel_disable_sagv(dev_priv);
12764
12765                 intel_modeset_verify_disabled(dev, state);
12766         }
12767
12768         /* Complete the events for pipes that have now been disabled */
12769         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12770                 bool modeset = needs_modeset(new_crtc_state);
12771
12772                 /* Complete events for now disable pipes here. */
12773                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12774                         spin_lock_irq(&dev->event_lock);
12775                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12776                         spin_unlock_irq(&dev->event_lock);
12777
12778                         new_crtc_state->event = NULL;
12779                 }
12780         }
12781
12782         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12783         dev_priv->display.update_crtcs(state);
12784
12785         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12786          * already, but still need the state for the delayed optimization. To
12787          * fix this:
12788          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12789          * - schedule that vblank worker _before_ calling hw_done
12790          * - at the start of commit_tail, cancel it _synchrously
12791          * - switch over to the vblank wait helper in the core after that since
12792          *   we don't need out special handling any more.
12793          */
12794         drm_atomic_helper_wait_for_flip_done(dev, state);
12795
12796         /*
12797          * Now that the vblank has passed, we can go ahead and program the
12798          * optimal watermarks on platforms that need two-step watermark
12799          * programming.
12800          *
12801          * TODO: Move this (and other cleanup) to an async worker eventually.
12802          */
12803         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12804                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12805
12806                 if (dev_priv->display.optimize_watermarks)
12807                         dev_priv->display.optimize_watermarks(intel_state,
12808                                                               new_intel_crtc_state);
12809         }
12810
12811         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12812                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12813
12814                 if (put_domains[i])
12815                         modeset_put_power_domains(dev_priv, put_domains[i]);
12816
12817                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12818         }
12819
12820         if (intel_state->modeset)
12821                 intel_verify_planes(intel_state);
12822
12823         if (intel_state->modeset && intel_can_enable_sagv(state))
12824                 intel_enable_sagv(dev_priv);
12825
12826         drm_atomic_helper_commit_hw_done(state);
12827
12828         if (intel_state->modeset) {
12829                 /* As one of the primary mmio accessors, KMS has a high
12830                  * likelihood of triggering bugs in unclaimed access. After we
12831                  * finish modesetting, see if an error has been flagged, and if
12832                  * so enable debugging for the next modeset - and hope we catch
12833                  * the culprit.
12834                  */
12835                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12836                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12837         }
12838
12839         /*
12840          * Defer the cleanup of the old state to a separate worker to not
12841          * impede the current task (userspace for blocking modesets) that
12842          * are executed inline. For out-of-line asynchronous modesets/flips,
12843          * deferring to a new worker seems overkill, but we would place a
12844          * schedule point (cond_resched()) here anyway to keep latencies
12845          * down.
12846          */
12847         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
12848         queue_work(system_highpri_wq, &state->commit_work);
12849 }
12850
12851 static void intel_atomic_commit_work(struct work_struct *work)
12852 {
12853         struct drm_atomic_state *state =
12854                 container_of(work, struct drm_atomic_state, commit_work);
12855
12856         intel_atomic_commit_tail(state);
12857 }
12858
12859 static int __i915_sw_fence_call
12860 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12861                           enum i915_sw_fence_notify notify)
12862 {
12863         struct intel_atomic_state *state =
12864                 container_of(fence, struct intel_atomic_state, commit_ready);
12865
12866         switch (notify) {
12867         case FENCE_COMPLETE:
12868                 /* we do blocking waits in the worker, nothing to do here */
12869                 break;
12870         case FENCE_FREE:
12871                 {
12872                         struct intel_atomic_helper *helper =
12873                                 &to_i915(state->base.dev)->atomic_helper;
12874
12875                         if (llist_add(&state->freed, &helper->free_list))
12876                                 schedule_work(&helper->free_work);
12877                         break;
12878                 }
12879         }
12880
12881         return NOTIFY_DONE;
12882 }
12883
12884 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12885 {
12886         struct drm_plane_state *old_plane_state, *new_plane_state;
12887         struct drm_plane *plane;
12888         int i;
12889
12890         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12891                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12892                                   intel_fb_obj(new_plane_state->fb),
12893                                   to_intel_plane(plane)->frontbuffer_bit);
12894 }
12895
12896 /**
12897  * intel_atomic_commit - commit validated state object
12898  * @dev: DRM device
12899  * @state: the top-level driver state object
12900  * @nonblock: nonblocking commit
12901  *
12902  * This function commits a top-level state object that has been validated
12903  * with drm_atomic_helper_check().
12904  *
12905  * RETURNS
12906  * Zero for success or -errno.
12907  */
12908 static int intel_atomic_commit(struct drm_device *dev,
12909                                struct drm_atomic_state *state,
12910                                bool nonblock)
12911 {
12912         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12913         struct drm_i915_private *dev_priv = to_i915(dev);
12914         int ret = 0;
12915
12916         drm_atomic_state_get(state);
12917         i915_sw_fence_init(&intel_state->commit_ready,
12918                            intel_atomic_commit_ready);
12919
12920         /*
12921          * The intel_legacy_cursor_update() fast path takes care
12922          * of avoiding the vblank waits for simple cursor
12923          * movement and flips. For cursor on/off and size changes,
12924          * we want to perform the vblank waits so that watermark
12925          * updates happen during the correct frames. Gen9+ have
12926          * double buffered watermarks and so shouldn't need this.
12927          *
12928          * Unset state->legacy_cursor_update before the call to
12929          * drm_atomic_helper_setup_commit() because otherwise
12930          * drm_atomic_helper_wait_for_flip_done() is a noop and
12931          * we get FIFO underruns because we didn't wait
12932          * for vblank.
12933          *
12934          * FIXME doing watermarks and fb cleanup from a vblank worker
12935          * (assuming we had any) would solve these problems.
12936          */
12937         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12938                 struct intel_crtc_state *new_crtc_state;
12939                 struct intel_crtc *crtc;
12940                 int i;
12941
12942                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12943                         if (new_crtc_state->wm.need_postvbl_update ||
12944                             new_crtc_state->update_wm_post)
12945                                 state->legacy_cursor_update = false;
12946         }
12947
12948         ret = intel_atomic_prepare_commit(dev, state);
12949         if (ret) {
12950                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12951                 i915_sw_fence_commit(&intel_state->commit_ready);
12952                 return ret;
12953         }
12954
12955         ret = drm_atomic_helper_setup_commit(state, nonblock);
12956         if (!ret)
12957                 ret = drm_atomic_helper_swap_state(state, true);
12958
12959         if (ret) {
12960                 i915_sw_fence_commit(&intel_state->commit_ready);
12961
12962                 drm_atomic_helper_cleanup_planes(dev, state);
12963                 return ret;
12964         }
12965         dev_priv->wm.distrust_bios_wm = false;
12966         intel_shared_dpll_swap_state(state);
12967         intel_atomic_track_fbs(state);
12968
12969         if (intel_state->modeset) {
12970                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12971                        sizeof(intel_state->min_cdclk));
12972                 memcpy(dev_priv->min_voltage_level,
12973                        intel_state->min_voltage_level,
12974                        sizeof(intel_state->min_voltage_level));
12975                 dev_priv->active_crtcs = intel_state->active_crtcs;
12976                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12977                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
12978         }
12979
12980         drm_atomic_state_get(state);
12981         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12982
12983         i915_sw_fence_commit(&intel_state->commit_ready);
12984         if (nonblock && intel_state->modeset) {
12985                 queue_work(dev_priv->modeset_wq, &state->commit_work);
12986         } else if (nonblock) {
12987                 queue_work(system_unbound_wq, &state->commit_work);
12988         } else {
12989                 if (intel_state->modeset)
12990                         flush_workqueue(dev_priv->modeset_wq);
12991                 intel_atomic_commit_tail(state);
12992         }
12993
12994         return 0;
12995 }
12996
12997 static const struct drm_crtc_funcs intel_crtc_funcs = {
12998         .gamma_set = drm_atomic_helper_legacy_gamma_set,
12999         .set_config = drm_atomic_helper_set_config,
13000         .destroy = intel_crtc_destroy,
13001         .page_flip = drm_atomic_helper_page_flip,
13002         .atomic_duplicate_state = intel_crtc_duplicate_state,
13003         .atomic_destroy_state = intel_crtc_destroy_state,
13004         .set_crc_source = intel_crtc_set_crc_source,
13005         .verify_crc_source = intel_crtc_verify_crc_source,
13006         .get_crc_sources = intel_crtc_get_crc_sources,
13007 };
13008
13009 struct wait_rps_boost {
13010         struct wait_queue_entry wait;
13011
13012         struct drm_crtc *crtc;
13013         struct i915_request *request;
13014 };
13015
13016 static int do_rps_boost(struct wait_queue_entry *_wait,
13017                         unsigned mode, int sync, void *key)
13018 {
13019         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13020         struct i915_request *rq = wait->request;
13021
13022         /*
13023          * If we missed the vblank, but the request is already running it
13024          * is reasonable to assume that it will complete before the next
13025          * vblank without our intervention, so leave RPS alone.
13026          */
13027         if (!i915_request_started(rq))
13028                 gen6_rps_boost(rq, NULL);
13029         i915_request_put(rq);
13030
13031         drm_crtc_vblank_put(wait->crtc);
13032
13033         list_del(&wait->wait.entry);
13034         kfree(wait);
13035         return 1;
13036 }
13037
13038 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13039                                        struct dma_fence *fence)
13040 {
13041         struct wait_rps_boost *wait;
13042
13043         if (!dma_fence_is_i915(fence))
13044                 return;
13045
13046         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13047                 return;
13048
13049         if (drm_crtc_vblank_get(crtc))
13050                 return;
13051
13052         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13053         if (!wait) {
13054                 drm_crtc_vblank_put(crtc);
13055                 return;
13056         }
13057
13058         wait->request = to_request(dma_fence_get(fence));
13059         wait->crtc = crtc;
13060
13061         wait->wait.func = do_rps_boost;
13062         wait->wait.flags = 0;
13063
13064         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13065 }
13066
13067 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13068 {
13069         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13070         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13071         struct drm_framebuffer *fb = plane_state->base.fb;
13072         struct i915_vma *vma;
13073
13074         if (plane->id == PLANE_CURSOR &&
13075             INTEL_INFO(dev_priv)->cursor_needs_physical) {
13076                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13077                 const int align = intel_cursor_alignment(dev_priv);
13078                 int err;
13079
13080                 err = i915_gem_object_attach_phys(obj, align);
13081                 if (err)
13082                         return err;
13083         }
13084
13085         vma = intel_pin_and_fence_fb_obj(fb,
13086                                          &plane_state->view,
13087                                          intel_plane_uses_fence(plane_state),
13088                                          &plane_state->flags);
13089         if (IS_ERR(vma))
13090                 return PTR_ERR(vma);
13091
13092         plane_state->vma = vma;
13093
13094         return 0;
13095 }
13096
13097 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13098 {
13099         struct i915_vma *vma;
13100
13101         vma = fetch_and_zero(&old_plane_state->vma);
13102         if (vma)
13103                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13104 }
13105
13106 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13107 {
13108         struct i915_sched_attr attr = {
13109                 .priority = I915_PRIORITY_DISPLAY,
13110         };
13111
13112         i915_gem_object_wait_priority(obj, 0, &attr);
13113 }
13114
13115 /**
13116  * intel_prepare_plane_fb - Prepare fb for usage on plane
13117  * @plane: drm plane to prepare for
13118  * @new_state: the plane state being prepared
13119  *
13120  * Prepares a framebuffer for usage on a display plane.  Generally this
13121  * involves pinning the underlying object and updating the frontbuffer tracking
13122  * bits.  Some older platforms need special physical address handling for
13123  * cursor planes.
13124  *
13125  * Must be called with struct_mutex held.
13126  *
13127  * Returns 0 on success, negative error code on failure.
13128  */
13129 int
13130 intel_prepare_plane_fb(struct drm_plane *plane,
13131                        struct drm_plane_state *new_state)
13132 {
13133         struct intel_atomic_state *intel_state =
13134                 to_intel_atomic_state(new_state->state);
13135         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13136         struct drm_framebuffer *fb = new_state->fb;
13137         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13138         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13139         int ret;
13140
13141         if (old_obj) {
13142                 struct drm_crtc_state *crtc_state =
13143                         drm_atomic_get_new_crtc_state(new_state->state,
13144                                                       plane->state->crtc);
13145
13146                 /* Big Hammer, we also need to ensure that any pending
13147                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13148                  * current scanout is retired before unpinning the old
13149                  * framebuffer. Note that we rely on userspace rendering
13150                  * into the buffer attached to the pipe they are waiting
13151                  * on. If not, userspace generates a GPU hang with IPEHR
13152                  * point to the MI_WAIT_FOR_EVENT.
13153                  *
13154                  * This should only fail upon a hung GPU, in which case we
13155                  * can safely continue.
13156                  */
13157                 if (needs_modeset(crtc_state)) {
13158                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13159                                                               old_obj->resv, NULL,
13160                                                               false, 0,
13161                                                               GFP_KERNEL);
13162                         if (ret < 0)
13163                                 return ret;
13164                 }
13165         }
13166
13167         if (new_state->fence) { /* explicit fencing */
13168                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13169                                                     new_state->fence,
13170                                                     I915_FENCE_TIMEOUT,
13171                                                     GFP_KERNEL);
13172                 if (ret < 0)
13173                         return ret;
13174         }
13175
13176         if (!obj)
13177                 return 0;
13178
13179         ret = i915_gem_object_pin_pages(obj);
13180         if (ret)
13181                 return ret;
13182
13183         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13184         if (ret) {
13185                 i915_gem_object_unpin_pages(obj);
13186                 return ret;
13187         }
13188
13189         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13190
13191         fb_obj_bump_render_priority(obj);
13192
13193         mutex_unlock(&dev_priv->drm.struct_mutex);
13194         i915_gem_object_unpin_pages(obj);
13195         if (ret)
13196                 return ret;
13197
13198         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13199
13200         if (!new_state->fence) { /* implicit fencing */
13201                 struct dma_fence *fence;
13202
13203                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13204                                                       obj->resv, NULL,
13205                                                       false, I915_FENCE_TIMEOUT,
13206                                                       GFP_KERNEL);
13207                 if (ret < 0)
13208                         return ret;
13209
13210                 fence = reservation_object_get_excl_rcu(obj->resv);
13211                 if (fence) {
13212                         add_rps_boost_after_vblank(new_state->crtc, fence);
13213                         dma_fence_put(fence);
13214                 }
13215         } else {
13216                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13217         }
13218
13219         /*
13220          * We declare pageflips to be interactive and so merit a small bias
13221          * towards upclocking to deliver the frame on time. By only changing
13222          * the RPS thresholds to sample more regularly and aim for higher
13223          * clocks we can hopefully deliver low power workloads (like kodi)
13224          * that are not quite steady state without resorting to forcing
13225          * maximum clocks following a vblank miss (see do_rps_boost()).
13226          */
13227         if (!intel_state->rps_interactive) {
13228                 intel_rps_mark_interactive(dev_priv, true);
13229                 intel_state->rps_interactive = true;
13230         }
13231
13232         return 0;
13233 }
13234
13235 /**
13236  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13237  * @plane: drm plane to clean up for
13238  * @old_state: the state from the previous modeset
13239  *
13240  * Cleans up a framebuffer that has just been removed from a plane.
13241  *
13242  * Must be called with struct_mutex held.
13243  */
13244 void
13245 intel_cleanup_plane_fb(struct drm_plane *plane,
13246                        struct drm_plane_state *old_state)
13247 {
13248         struct intel_atomic_state *intel_state =
13249                 to_intel_atomic_state(old_state->state);
13250         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13251
13252         if (intel_state->rps_interactive) {
13253                 intel_rps_mark_interactive(dev_priv, false);
13254                 intel_state->rps_interactive = false;
13255         }
13256
13257         /* Should only be called after a successful intel_prepare_plane_fb()! */
13258         mutex_lock(&dev_priv->drm.struct_mutex);
13259         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13260         mutex_unlock(&dev_priv->drm.struct_mutex);
13261 }
13262
13263 int
13264 skl_max_scale(const struct intel_crtc_state *crtc_state,
13265               u32 pixel_format)
13266 {
13267         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13268         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13269         int max_scale, mult;
13270         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13271
13272         if (!crtc_state->base.enable)
13273                 return DRM_PLANE_HELPER_NO_SCALING;
13274
13275         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13276         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13277
13278         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13279                 max_dotclk *= 2;
13280
13281         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13282                 return DRM_PLANE_HELPER_NO_SCALING;
13283
13284         /*
13285          * skl max scale is lower of:
13286          *    close to 3 but not 3, -1 is for that purpose
13287          *            or
13288          *    cdclk/crtc_clock
13289          */
13290         mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13291         tmpclk1 = (1 << 16) * mult - 1;
13292         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13293         max_scale = min(tmpclk1, tmpclk2);
13294
13295         return max_scale;
13296 }
13297
13298 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13299                                     struct drm_crtc_state *old_crtc_state)
13300 {
13301         struct drm_device *dev = crtc->dev;
13302         struct drm_i915_private *dev_priv = to_i915(dev);
13303         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13304         struct intel_crtc_state *old_intel_cstate =
13305                 to_intel_crtc_state(old_crtc_state);
13306         struct intel_atomic_state *old_intel_state =
13307                 to_intel_atomic_state(old_crtc_state->state);
13308         struct intel_crtc_state *intel_cstate =
13309                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13310         bool modeset = needs_modeset(&intel_cstate->base);
13311
13312         if (!modeset &&
13313             (intel_cstate->base.color_mgmt_changed ||
13314              intel_cstate->update_pipe)) {
13315                 intel_color_set_csc(&intel_cstate->base);
13316                 intel_color_load_luts(&intel_cstate->base);
13317         }
13318
13319         /* Perform vblank evasion around commit operation */
13320         intel_pipe_update_start(intel_cstate);
13321
13322         if (modeset)
13323                 goto out;
13324
13325         if (intel_cstate->update_pipe)
13326                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13327         else if (INTEL_GEN(dev_priv) >= 9)
13328                 skl_detach_scalers(intel_crtc);
13329
13330 out:
13331         if (dev_priv->display.atomic_update_watermarks)
13332                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13333                                                            intel_cstate);
13334 }
13335
13336 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13337                                   struct intel_crtc_state *crtc_state)
13338 {
13339         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13340
13341         if (!IS_GEN2(dev_priv))
13342                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13343
13344         if (crtc_state->has_pch_encoder) {
13345                 enum pipe pch_transcoder =
13346                         intel_crtc_pch_transcoder(crtc);
13347
13348                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13349         }
13350 }
13351
13352 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13353                                      struct drm_crtc_state *old_crtc_state)
13354 {
13355         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13356         struct intel_atomic_state *old_intel_state =
13357                 to_intel_atomic_state(old_crtc_state->state);
13358         struct intel_crtc_state *new_crtc_state =
13359                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13360
13361         intel_pipe_update_end(new_crtc_state);
13362
13363         if (new_crtc_state->update_pipe &&
13364             !needs_modeset(&new_crtc_state->base) &&
13365             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13366                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13367 }
13368
13369 /**
13370  * intel_plane_destroy - destroy a plane
13371  * @plane: plane to destroy
13372  *
13373  * Common destruction function for all types of planes (primary, cursor,
13374  * sprite).
13375  */
13376 void intel_plane_destroy(struct drm_plane *plane)
13377 {
13378         drm_plane_cleanup(plane);
13379         kfree(to_intel_plane(plane));
13380 }
13381
13382 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13383                                             u32 format, u64 modifier)
13384 {
13385         switch (modifier) {
13386         case DRM_FORMAT_MOD_LINEAR:
13387         case I915_FORMAT_MOD_X_TILED:
13388                 break;
13389         default:
13390                 return false;
13391         }
13392
13393         switch (format) {
13394         case DRM_FORMAT_C8:
13395         case DRM_FORMAT_RGB565:
13396         case DRM_FORMAT_XRGB1555:
13397         case DRM_FORMAT_XRGB8888:
13398                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13399                         modifier == I915_FORMAT_MOD_X_TILED;
13400         default:
13401                 return false;
13402         }
13403 }
13404
13405 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13406                                             u32 format, u64 modifier)
13407 {
13408         switch (modifier) {
13409         case DRM_FORMAT_MOD_LINEAR:
13410         case I915_FORMAT_MOD_X_TILED:
13411                 break;
13412         default:
13413                 return false;
13414         }
13415
13416         switch (format) {
13417         case DRM_FORMAT_C8:
13418         case DRM_FORMAT_RGB565:
13419         case DRM_FORMAT_XRGB8888:
13420         case DRM_FORMAT_XBGR8888:
13421         case DRM_FORMAT_XRGB2101010:
13422         case DRM_FORMAT_XBGR2101010:
13423                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13424                         modifier == I915_FORMAT_MOD_X_TILED;
13425         default:
13426                 return false;
13427         }
13428 }
13429
13430 static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13431                                            u32 format, u64 modifier)
13432 {
13433         struct intel_plane *plane = to_intel_plane(_plane);
13434
13435         switch (modifier) {
13436         case DRM_FORMAT_MOD_LINEAR:
13437         case I915_FORMAT_MOD_X_TILED:
13438         case I915_FORMAT_MOD_Y_TILED:
13439         case I915_FORMAT_MOD_Yf_TILED:
13440                 break;
13441         case I915_FORMAT_MOD_Y_TILED_CCS:
13442         case I915_FORMAT_MOD_Yf_TILED_CCS:
13443                 if (!plane->has_ccs)
13444                         return false;
13445                 break;
13446         default:
13447                 return false;
13448         }
13449
13450         switch (format) {
13451         case DRM_FORMAT_XRGB8888:
13452         case DRM_FORMAT_XBGR8888:
13453         case DRM_FORMAT_ARGB8888:
13454         case DRM_FORMAT_ABGR8888:
13455                 if (is_ccs_modifier(modifier))
13456                         return true;
13457                 /* fall through */
13458         case DRM_FORMAT_RGB565:
13459         case DRM_FORMAT_XRGB2101010:
13460         case DRM_FORMAT_XBGR2101010:
13461         case DRM_FORMAT_YUYV:
13462         case DRM_FORMAT_YVYU:
13463         case DRM_FORMAT_UYVY:
13464         case DRM_FORMAT_VYUY:
13465         case DRM_FORMAT_NV12:
13466                 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13467                         return true;
13468                 /* fall through */
13469         case DRM_FORMAT_C8:
13470                 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13471                     modifier == I915_FORMAT_MOD_X_TILED ||
13472                     modifier == I915_FORMAT_MOD_Y_TILED)
13473                         return true;
13474                 /* fall through */
13475         default:
13476                 return false;
13477         }
13478 }
13479
13480 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13481                                               u32 format, u64 modifier)
13482 {
13483         return modifier == DRM_FORMAT_MOD_LINEAR &&
13484                 format == DRM_FORMAT_ARGB8888;
13485 }
13486
13487 static struct drm_plane_funcs skl_plane_funcs = {
13488         .update_plane = drm_atomic_helper_update_plane,
13489         .disable_plane = drm_atomic_helper_disable_plane,
13490         .destroy = intel_plane_destroy,
13491         .atomic_get_property = intel_plane_atomic_get_property,
13492         .atomic_set_property = intel_plane_atomic_set_property,
13493         .atomic_duplicate_state = intel_plane_duplicate_state,
13494         .atomic_destroy_state = intel_plane_destroy_state,
13495         .format_mod_supported = skl_plane_format_mod_supported,
13496 };
13497
13498 static struct drm_plane_funcs i965_plane_funcs = {
13499         .update_plane = drm_atomic_helper_update_plane,
13500         .disable_plane = drm_atomic_helper_disable_plane,
13501         .destroy = intel_plane_destroy,
13502         .atomic_get_property = intel_plane_atomic_get_property,
13503         .atomic_set_property = intel_plane_atomic_set_property,
13504         .atomic_duplicate_state = intel_plane_duplicate_state,
13505         .atomic_destroy_state = intel_plane_destroy_state,
13506         .format_mod_supported = i965_plane_format_mod_supported,
13507 };
13508
13509 static struct drm_plane_funcs i8xx_plane_funcs = {
13510         .update_plane = drm_atomic_helper_update_plane,
13511         .disable_plane = drm_atomic_helper_disable_plane,
13512         .destroy = intel_plane_destroy,
13513         .atomic_get_property = intel_plane_atomic_get_property,
13514         .atomic_set_property = intel_plane_atomic_set_property,
13515         .atomic_duplicate_state = intel_plane_duplicate_state,
13516         .atomic_destroy_state = intel_plane_destroy_state,
13517         .format_mod_supported = i8xx_plane_format_mod_supported,
13518 };
13519
13520 static int
13521 intel_legacy_cursor_update(struct drm_plane *plane,
13522                            struct drm_crtc *crtc,
13523                            struct drm_framebuffer *fb,
13524                            int crtc_x, int crtc_y,
13525                            unsigned int crtc_w, unsigned int crtc_h,
13526                            uint32_t src_x, uint32_t src_y,
13527                            uint32_t src_w, uint32_t src_h,
13528                            struct drm_modeset_acquire_ctx *ctx)
13529 {
13530         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13531         int ret;
13532         struct drm_plane_state *old_plane_state, *new_plane_state;
13533         struct intel_plane *intel_plane = to_intel_plane(plane);
13534         struct drm_framebuffer *old_fb;
13535         struct intel_crtc_state *crtc_state =
13536                 to_intel_crtc_state(crtc->state);
13537         struct intel_crtc_state *new_crtc_state;
13538
13539         /*
13540          * When crtc is inactive or there is a modeset pending,
13541          * wait for it to complete in the slowpath
13542          */
13543         if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13544             crtc_state->update_pipe)
13545                 goto slow;
13546
13547         old_plane_state = plane->state;
13548         /*
13549          * Don't do an async update if there is an outstanding commit modifying
13550          * the plane.  This prevents our async update's changes from getting
13551          * overridden by a previous synchronous update's state.
13552          */
13553         if (old_plane_state->commit &&
13554             !try_wait_for_completion(&old_plane_state->commit->hw_done))
13555                 goto slow;
13556
13557         /*
13558          * If any parameters change that may affect watermarks,
13559          * take the slowpath. Only changing fb or position should be
13560          * in the fastpath.
13561          */
13562         if (old_plane_state->crtc != crtc ||
13563             old_plane_state->src_w != src_w ||
13564             old_plane_state->src_h != src_h ||
13565             old_plane_state->crtc_w != crtc_w ||
13566             old_plane_state->crtc_h != crtc_h ||
13567             !old_plane_state->fb != !fb)
13568                 goto slow;
13569
13570         new_plane_state = intel_plane_duplicate_state(plane);
13571         if (!new_plane_state)
13572                 return -ENOMEM;
13573
13574         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13575         if (!new_crtc_state) {
13576                 ret = -ENOMEM;
13577                 goto out_free;
13578         }
13579
13580         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13581
13582         new_plane_state->src_x = src_x;
13583         new_plane_state->src_y = src_y;
13584         new_plane_state->src_w = src_w;
13585         new_plane_state->src_h = src_h;
13586         new_plane_state->crtc_x = crtc_x;
13587         new_plane_state->crtc_y = crtc_y;
13588         new_plane_state->crtc_w = crtc_w;
13589         new_plane_state->crtc_h = crtc_h;
13590
13591         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13592                                                   to_intel_plane_state(old_plane_state),
13593                                                   to_intel_plane_state(new_plane_state));
13594         if (ret)
13595                 goto out_free;
13596
13597         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13598         if (ret)
13599                 goto out_free;
13600
13601         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13602         if (ret)
13603                 goto out_unlock;
13604
13605         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
13606
13607         old_fb = old_plane_state->fb;
13608         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13609                           intel_plane->frontbuffer_bit);
13610
13611         /* Swap plane state */
13612         plane->state = new_plane_state;
13613
13614         /*
13615          * We cannot swap crtc_state as it may be in use by an atomic commit or
13616          * page flip that's running simultaneously. If we swap crtc_state and
13617          * destroy the old state, we will cause a use-after-free there.
13618          *
13619          * Only update active_planes, which is needed for our internal
13620          * bookkeeping. Either value will do the right thing when updating
13621          * planes atomically. If the cursor was part of the atomic update then
13622          * we would have taken the slowpath.
13623          */
13624         crtc_state->active_planes = new_crtc_state->active_planes;
13625
13626         if (plane->state->visible) {
13627                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13628                 intel_plane->update_plane(intel_plane, crtc_state,
13629                                           to_intel_plane_state(plane->state));
13630         } else {
13631                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13632                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13633         }
13634
13635         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13636
13637 out_unlock:
13638         mutex_unlock(&dev_priv->drm.struct_mutex);
13639 out_free:
13640         if (new_crtc_state)
13641                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
13642         if (ret)
13643                 intel_plane_destroy_state(plane, new_plane_state);
13644         else
13645                 intel_plane_destroy_state(plane, old_plane_state);
13646         return ret;
13647
13648 slow:
13649         return drm_atomic_helper_update_plane(plane, crtc, fb,
13650                                               crtc_x, crtc_y, crtc_w, crtc_h,
13651                                               src_x, src_y, src_w, src_h, ctx);
13652 }
13653
13654 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13655         .update_plane = intel_legacy_cursor_update,
13656         .disable_plane = drm_atomic_helper_disable_plane,
13657         .destroy = intel_plane_destroy,
13658         .atomic_get_property = intel_plane_atomic_get_property,
13659         .atomic_set_property = intel_plane_atomic_set_property,
13660         .atomic_duplicate_state = intel_plane_duplicate_state,
13661         .atomic_destroy_state = intel_plane_destroy_state,
13662         .format_mod_supported = intel_cursor_format_mod_supported,
13663 };
13664
13665 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13666                                enum i9xx_plane_id i9xx_plane)
13667 {
13668         if (!HAS_FBC(dev_priv))
13669                 return false;
13670
13671         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13672                 return i9xx_plane == PLANE_A; /* tied to pipe A */
13673         else if (IS_IVYBRIDGE(dev_priv))
13674                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13675                         i9xx_plane == PLANE_C;
13676         else if (INTEL_GEN(dev_priv) >= 4)
13677                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13678         else
13679                 return i9xx_plane == PLANE_A;
13680 }
13681
13682 static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13683                               enum pipe pipe, enum plane_id plane_id)
13684 {
13685         if (!HAS_FBC(dev_priv))
13686                 return false;
13687
13688         return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13689 }
13690
13691 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13692                           enum pipe pipe, enum plane_id plane_id)
13693 {
13694         /*
13695          * FIXME: ICL requires two hardware planes for scanning out NV12
13696          * framebuffers. Do not advertize support until this is implemented.
13697          */
13698         if (INTEL_GEN(dev_priv) >= 11)
13699                 return false;
13700
13701         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13702                 return false;
13703
13704         if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13705                 return false;
13706
13707         if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13708                 return false;
13709
13710         return true;
13711 }
13712
13713 static struct intel_plane *
13714 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13715 {
13716         struct intel_plane *primary = NULL;
13717         struct intel_plane_state *state = NULL;
13718         const struct drm_plane_funcs *plane_funcs;
13719         const uint32_t *intel_primary_formats;
13720         unsigned int supported_rotations;
13721         unsigned int num_formats;
13722         const uint64_t *modifiers;
13723         int ret;
13724
13725         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13726         if (!primary) {
13727                 ret = -ENOMEM;
13728                 goto fail;
13729         }
13730
13731         state = intel_create_plane_state(&primary->base);
13732         if (!state) {
13733                 ret = -ENOMEM;
13734                 goto fail;
13735         }
13736
13737         primary->base.state = &state->base;
13738
13739         if (INTEL_GEN(dev_priv) >= 9)
13740                 state->scaler_id = -1;
13741         primary->pipe = pipe;
13742         /*
13743          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13744          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13745          */
13746         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13747                 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13748         else
13749                 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13750         primary->id = PLANE_PRIMARY;
13751         primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13752
13753         if (INTEL_GEN(dev_priv) >= 9)
13754                 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13755                                                      primary->pipe,
13756                                                      primary->id);
13757         else
13758                 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13759                                                       primary->i9xx_plane);
13760
13761         if (primary->has_fbc) {
13762                 struct intel_fbc *fbc = &dev_priv->fbc;
13763
13764                 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13765         }
13766
13767         if (INTEL_GEN(dev_priv) >= 9) {
13768                 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13769                                                      PLANE_PRIMARY);
13770
13771                 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13772                         intel_primary_formats = skl_pri_planar_formats;
13773                         num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13774                 } else {
13775                         intel_primary_formats = skl_primary_formats;
13776                         num_formats = ARRAY_SIZE(skl_primary_formats);
13777                 }
13778
13779                 if (primary->has_ccs)
13780                         modifiers = skl_format_modifiers_ccs;
13781                 else
13782                         modifiers = skl_format_modifiers_noccs;
13783
13784                 primary->max_stride = skl_plane_max_stride;
13785                 primary->update_plane = skl_update_plane;
13786                 primary->disable_plane = skl_disable_plane;
13787                 primary->get_hw_state = skl_plane_get_hw_state;
13788                 primary->check_plane = skl_plane_check;
13789
13790                 plane_funcs = &skl_plane_funcs;
13791         } else if (INTEL_GEN(dev_priv) >= 4) {
13792                 intel_primary_formats = i965_primary_formats;
13793                 num_formats = ARRAY_SIZE(i965_primary_formats);
13794                 modifiers = i9xx_format_modifiers;
13795
13796                 primary->max_stride = i9xx_plane_max_stride;
13797                 primary->update_plane = i9xx_update_plane;
13798                 primary->disable_plane = i9xx_disable_plane;
13799                 primary->get_hw_state = i9xx_plane_get_hw_state;
13800                 primary->check_plane = i9xx_plane_check;
13801
13802                 plane_funcs = &i965_plane_funcs;
13803         } else {
13804                 intel_primary_formats = i8xx_primary_formats;
13805                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13806                 modifiers = i9xx_format_modifiers;
13807
13808                 primary->max_stride = i9xx_plane_max_stride;
13809                 primary->update_plane = i9xx_update_plane;
13810                 primary->disable_plane = i9xx_disable_plane;
13811                 primary->get_hw_state = i9xx_plane_get_hw_state;
13812                 primary->check_plane = i9xx_plane_check;
13813
13814                 plane_funcs = &i8xx_plane_funcs;
13815         }
13816
13817         if (INTEL_GEN(dev_priv) >= 9)
13818                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13819                                                0, plane_funcs,
13820                                                intel_primary_formats, num_formats,
13821                                                modifiers,
13822                                                DRM_PLANE_TYPE_PRIMARY,
13823                                                "plane 1%c", pipe_name(pipe));
13824         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13825                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13826                                                0, plane_funcs,
13827                                                intel_primary_formats, num_formats,
13828                                                modifiers,
13829                                                DRM_PLANE_TYPE_PRIMARY,
13830                                                "primary %c", pipe_name(pipe));
13831         else
13832                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13833                                                0, plane_funcs,
13834                                                intel_primary_formats, num_formats,
13835                                                modifiers,
13836                                                DRM_PLANE_TYPE_PRIMARY,
13837                                                "plane %c",
13838                                                plane_name(primary->i9xx_plane));
13839         if (ret)
13840                 goto fail;
13841
13842         if (INTEL_GEN(dev_priv) >= 10) {
13843                 supported_rotations =
13844                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13845                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13846                         DRM_MODE_REFLECT_X;
13847         } else if (INTEL_GEN(dev_priv) >= 9) {
13848                 supported_rotations =
13849                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13850                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13851         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13852                 supported_rotations =
13853                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13854                         DRM_MODE_REFLECT_X;
13855         } else if (INTEL_GEN(dev_priv) >= 4) {
13856                 supported_rotations =
13857                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13858         } else {
13859                 supported_rotations = DRM_MODE_ROTATE_0;
13860         }
13861
13862         if (INTEL_GEN(dev_priv) >= 4)
13863                 drm_plane_create_rotation_property(&primary->base,
13864                                                    DRM_MODE_ROTATE_0,
13865                                                    supported_rotations);
13866
13867         if (INTEL_GEN(dev_priv) >= 9)
13868                 drm_plane_create_color_properties(&primary->base,
13869                                                   BIT(DRM_COLOR_YCBCR_BT601) |
13870                                                   BIT(DRM_COLOR_YCBCR_BT709),
13871                                                   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13872                                                   BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13873                                                   DRM_COLOR_YCBCR_BT709,
13874                                                   DRM_COLOR_YCBCR_LIMITED_RANGE);
13875
13876         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13877
13878         return primary;
13879
13880 fail:
13881         kfree(state);
13882         kfree(primary);
13883
13884         return ERR_PTR(ret);
13885 }
13886
13887 static struct intel_plane *
13888 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13889                           enum pipe pipe)
13890 {
13891         struct intel_plane *cursor = NULL;
13892         struct intel_plane_state *state = NULL;
13893         int ret;
13894
13895         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13896         if (!cursor) {
13897                 ret = -ENOMEM;
13898                 goto fail;
13899         }
13900
13901         state = intel_create_plane_state(&cursor->base);
13902         if (!state) {
13903                 ret = -ENOMEM;
13904                 goto fail;
13905         }
13906
13907         cursor->base.state = &state->base;
13908
13909         cursor->pipe = pipe;
13910         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13911         cursor->id = PLANE_CURSOR;
13912         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13913
13914         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13915                 cursor->max_stride = i845_cursor_max_stride;
13916                 cursor->update_plane = i845_update_cursor;
13917                 cursor->disable_plane = i845_disable_cursor;
13918                 cursor->get_hw_state = i845_cursor_get_hw_state;
13919                 cursor->check_plane = i845_check_cursor;
13920         } else {
13921                 cursor->max_stride = i9xx_cursor_max_stride;
13922                 cursor->update_plane = i9xx_update_cursor;
13923                 cursor->disable_plane = i9xx_disable_cursor;
13924                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13925                 cursor->check_plane = i9xx_check_cursor;
13926         }
13927
13928         cursor->cursor.base = ~0;
13929         cursor->cursor.cntl = ~0;
13930
13931         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13932                 cursor->cursor.size = ~0;
13933
13934         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13935                                        0, &intel_cursor_plane_funcs,
13936                                        intel_cursor_formats,
13937                                        ARRAY_SIZE(intel_cursor_formats),
13938                                        cursor_format_modifiers,
13939                                        DRM_PLANE_TYPE_CURSOR,
13940                                        "cursor %c", pipe_name(pipe));
13941         if (ret)
13942                 goto fail;
13943
13944         if (INTEL_GEN(dev_priv) >= 4)
13945                 drm_plane_create_rotation_property(&cursor->base,
13946                                                    DRM_MODE_ROTATE_0,
13947                                                    DRM_MODE_ROTATE_0 |
13948                                                    DRM_MODE_ROTATE_180);
13949
13950         if (INTEL_GEN(dev_priv) >= 9)
13951                 state->scaler_id = -1;
13952
13953         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13954
13955         return cursor;
13956
13957 fail:
13958         kfree(state);
13959         kfree(cursor);
13960
13961         return ERR_PTR(ret);
13962 }
13963
13964 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13965                                     struct intel_crtc_state *crtc_state)
13966 {
13967         struct intel_crtc_scaler_state *scaler_state =
13968                 &crtc_state->scaler_state;
13969         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13970         int i;
13971
13972         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13973         if (!crtc->num_scalers)
13974                 return;
13975
13976         for (i = 0; i < crtc->num_scalers; i++) {
13977                 struct intel_scaler *scaler = &scaler_state->scalers[i];
13978
13979                 scaler->in_use = 0;
13980                 scaler->mode = 0;
13981         }
13982
13983         scaler_state->scaler_id = -1;
13984 }
13985
13986 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13987 {
13988         struct intel_crtc *intel_crtc;
13989         struct intel_crtc_state *crtc_state = NULL;
13990         struct intel_plane *primary = NULL;
13991         struct intel_plane *cursor = NULL;
13992         int sprite, ret;
13993
13994         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13995         if (!intel_crtc)
13996                 return -ENOMEM;
13997
13998         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13999         if (!crtc_state) {
14000                 ret = -ENOMEM;
14001                 goto fail;
14002         }
14003         intel_crtc->config = crtc_state;
14004         intel_crtc->base.state = &crtc_state->base;
14005         crtc_state->base.crtc = &intel_crtc->base;
14006
14007         primary = intel_primary_plane_create(dev_priv, pipe);
14008         if (IS_ERR(primary)) {
14009                 ret = PTR_ERR(primary);
14010                 goto fail;
14011         }
14012         intel_crtc->plane_ids_mask |= BIT(primary->id);
14013
14014         for_each_sprite(dev_priv, pipe, sprite) {
14015                 struct intel_plane *plane;
14016
14017                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14018                 if (IS_ERR(plane)) {
14019                         ret = PTR_ERR(plane);
14020                         goto fail;
14021                 }
14022                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14023         }
14024
14025         cursor = intel_cursor_plane_create(dev_priv, pipe);
14026         if (IS_ERR(cursor)) {
14027                 ret = PTR_ERR(cursor);
14028                 goto fail;
14029         }
14030         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14031
14032         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14033                                         &primary->base, &cursor->base,
14034                                         &intel_crtc_funcs,
14035                                         "pipe %c", pipe_name(pipe));
14036         if (ret)
14037                 goto fail;
14038
14039         intel_crtc->pipe = pipe;
14040
14041         /* initialize shared scalers */
14042         intel_crtc_init_scalers(intel_crtc, crtc_state);
14043
14044         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14045                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14046         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14047
14048         if (INTEL_GEN(dev_priv) < 9) {
14049                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14050
14051                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14052                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14053                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14054         }
14055
14056         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14057
14058         intel_color_init(&intel_crtc->base);
14059
14060         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14061
14062         return 0;
14063
14064 fail:
14065         /*
14066          * drm_mode_config_cleanup() will free up any
14067          * crtcs/planes already initialized.
14068          */
14069         kfree(crtc_state);
14070         kfree(intel_crtc);
14071
14072         return ret;
14073 }
14074
14075 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14076 {
14077         struct drm_device *dev = connector->base.dev;
14078
14079         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14080
14081         if (!connector->base.state->crtc)
14082                 return INVALID_PIPE;
14083
14084         return to_intel_crtc(connector->base.state->crtc)->pipe;
14085 }
14086
14087 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14088                                       struct drm_file *file)
14089 {
14090         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14091         struct drm_crtc *drmmode_crtc;
14092         struct intel_crtc *crtc;
14093
14094         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14095         if (!drmmode_crtc)
14096                 return -ENOENT;
14097
14098         crtc = to_intel_crtc(drmmode_crtc);
14099         pipe_from_crtc_id->pipe = crtc->pipe;
14100
14101         return 0;
14102 }
14103
14104 static int intel_encoder_clones(struct intel_encoder *encoder)
14105 {
14106         struct drm_device *dev = encoder->base.dev;
14107         struct intel_encoder *source_encoder;
14108         int index_mask = 0;
14109         int entry = 0;
14110
14111         for_each_intel_encoder(dev, source_encoder) {
14112                 if (encoders_cloneable(encoder, source_encoder))
14113                         index_mask |= (1 << entry);
14114
14115                 entry++;
14116         }
14117
14118         return index_mask;
14119 }
14120
14121 static bool has_edp_a(struct drm_i915_private *dev_priv)
14122 {
14123         if (!IS_MOBILE(dev_priv))
14124                 return false;
14125
14126         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14127                 return false;
14128
14129         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14130                 return false;
14131
14132         return true;
14133 }
14134
14135 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14136 {
14137         if (INTEL_GEN(dev_priv) >= 9)
14138                 return false;
14139
14140         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14141                 return false;
14142
14143         if (IS_CHERRYVIEW(dev_priv))
14144                 return false;
14145
14146         if (HAS_PCH_LPT_H(dev_priv) &&
14147             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14148                 return false;
14149
14150         /* DDI E can't be used if DDI A requires 4 lanes */
14151         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14152                 return false;
14153
14154         if (!dev_priv->vbt.int_crt_support)
14155                 return false;
14156
14157         return true;
14158 }
14159
14160 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14161 {
14162         int pps_num;
14163         int pps_idx;
14164
14165         if (HAS_DDI(dev_priv))
14166                 return;
14167         /*
14168          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14169          * everywhere where registers can be write protected.
14170          */
14171         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14172                 pps_num = 2;
14173         else
14174                 pps_num = 1;
14175
14176         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14177                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14178
14179                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14180                 I915_WRITE(PP_CONTROL(pps_idx), val);
14181         }
14182 }
14183
14184 static void intel_pps_init(struct drm_i915_private *dev_priv)
14185 {
14186         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14187                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14188         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14189                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14190         else
14191                 dev_priv->pps_mmio_base = PPS_BASE;
14192
14193         intel_pps_unlock_regs_wa(dev_priv);
14194 }
14195
14196 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14197 {
14198         struct intel_encoder *encoder;
14199         bool dpd_is_edp = false;
14200
14201         intel_pps_init(dev_priv);
14202
14203         if (INTEL_INFO(dev_priv)->num_pipes == 0)
14204                 return;
14205
14206         /*
14207          * intel_edp_init_connector() depends on this completing first, to
14208          * prevent the registeration of both eDP and LVDS and the incorrect
14209          * sharing of the PPS.
14210          */
14211         intel_lvds_init(dev_priv);
14212
14213         if (intel_crt_present(dev_priv))
14214                 intel_crt_init(dev_priv);
14215
14216         if (IS_ICELAKE(dev_priv)) {
14217                 intel_ddi_init(dev_priv, PORT_A);
14218                 intel_ddi_init(dev_priv, PORT_B);
14219                 intel_ddi_init(dev_priv, PORT_C);
14220                 intel_ddi_init(dev_priv, PORT_D);
14221                 intel_ddi_init(dev_priv, PORT_E);
14222                 intel_ddi_init(dev_priv, PORT_F);
14223         } else if (IS_GEN9_LP(dev_priv)) {
14224                 /*
14225                  * FIXME: Broxton doesn't support port detection via the
14226                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14227                  * detect the ports.
14228                  */
14229                 intel_ddi_init(dev_priv, PORT_A);
14230                 intel_ddi_init(dev_priv, PORT_B);
14231                 intel_ddi_init(dev_priv, PORT_C);
14232
14233                 vlv_dsi_init(dev_priv);
14234         } else if (HAS_DDI(dev_priv)) {
14235                 int found;
14236
14237                 /*
14238                  * Haswell uses DDI functions to detect digital outputs.
14239                  * On SKL pre-D0 the strap isn't connected, so we assume
14240                  * it's there.
14241                  */
14242                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14243                 /* WaIgnoreDDIAStrap: skl */
14244                 if (found || IS_GEN9_BC(dev_priv))
14245                         intel_ddi_init(dev_priv, PORT_A);
14246
14247                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14248                  * register */
14249                 found = I915_READ(SFUSE_STRAP);
14250
14251                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14252                         intel_ddi_init(dev_priv, PORT_B);
14253                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14254                         intel_ddi_init(dev_priv, PORT_C);
14255                 if (found & SFUSE_STRAP_DDID_DETECTED)
14256                         intel_ddi_init(dev_priv, PORT_D);
14257                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14258                         intel_ddi_init(dev_priv, PORT_F);
14259                 /*
14260                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14261                  */
14262                 if (IS_GEN9_BC(dev_priv) &&
14263                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14264                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14265                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14266                         intel_ddi_init(dev_priv, PORT_E);
14267
14268         } else if (HAS_PCH_SPLIT(dev_priv)) {
14269                 int found;
14270                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14271
14272                 if (has_edp_a(dev_priv))
14273                         intel_dp_init(dev_priv, DP_A, PORT_A);
14274
14275                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14276                         /* PCH SDVOB multiplex with HDMIB */
14277                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14278                         if (!found)
14279                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14280                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14281                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14282                 }
14283
14284                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14285                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14286
14287                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14288                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14289
14290                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14291                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14292
14293                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14294                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14295         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14296                 bool has_edp, has_port;
14297
14298                 /*
14299                  * The DP_DETECTED bit is the latched state of the DDC
14300                  * SDA pin at boot. However since eDP doesn't require DDC
14301                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14302                  * eDP ports may have been muxed to an alternate function.
14303                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14304                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14305                  * detect eDP ports.
14306                  *
14307                  * Sadly the straps seem to be missing sometimes even for HDMI
14308                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14309                  * and VBT for the presence of the port. Additionally we can't
14310                  * trust the port type the VBT declares as we've seen at least
14311                  * HDMI ports that the VBT claim are DP or eDP.
14312                  */
14313                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14314                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14315                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14316                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14317                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14318                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14319
14320                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14321                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14322                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14323                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14324                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14325                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14326
14327                 if (IS_CHERRYVIEW(dev_priv)) {
14328                         /*
14329                          * eDP not supported on port D,
14330                          * so no need to worry about it
14331                          */
14332                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14333                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14334                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14335                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14336                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14337                 }
14338
14339                 vlv_dsi_init(dev_priv);
14340         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14341                 bool found = false;
14342
14343                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14344                         DRM_DEBUG_KMS("probing SDVOB\n");
14345                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14346                         if (!found && IS_G4X(dev_priv)) {
14347                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14348                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14349                         }
14350
14351                         if (!found && IS_G4X(dev_priv))
14352                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14353                 }
14354
14355                 /* Before G4X SDVOC doesn't have its own detect register */
14356
14357                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14358                         DRM_DEBUG_KMS("probing SDVOC\n");
14359                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14360                 }
14361
14362                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14363
14364                         if (IS_G4X(dev_priv)) {
14365                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14366                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14367                         }
14368                         if (IS_G4X(dev_priv))
14369                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14370                 }
14371
14372                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14373                         intel_dp_init(dev_priv, DP_D, PORT_D);
14374         } else if (IS_GEN2(dev_priv))
14375                 intel_dvo_init(dev_priv);
14376
14377         if (SUPPORTS_TV(dev_priv))
14378                 intel_tv_init(dev_priv);
14379
14380         intel_psr_init(dev_priv);
14381
14382         for_each_intel_encoder(&dev_priv->drm, encoder) {
14383                 encoder->base.possible_crtcs = encoder->crtc_mask;
14384                 encoder->base.possible_clones =
14385                         intel_encoder_clones(encoder);
14386         }
14387
14388         intel_init_pch_refclk(dev_priv);
14389
14390         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14391 }
14392
14393 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14394 {
14395         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14396         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14397
14398         drm_framebuffer_cleanup(fb);
14399
14400         i915_gem_object_lock(obj);
14401         WARN_ON(!obj->framebuffer_references--);
14402         i915_gem_object_unlock(obj);
14403
14404         i915_gem_object_put(obj);
14405
14406         kfree(intel_fb);
14407 }
14408
14409 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14410                                                 struct drm_file *file,
14411                                                 unsigned int *handle)
14412 {
14413         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14414
14415         if (obj->userptr.mm) {
14416                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14417                 return -EINVAL;
14418         }
14419
14420         return drm_gem_handle_create(file, &obj->base, handle);
14421 }
14422
14423 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14424                                         struct drm_file *file,
14425                                         unsigned flags, unsigned color,
14426                                         struct drm_clip_rect *clips,
14427                                         unsigned num_clips)
14428 {
14429         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14430
14431         i915_gem_object_flush_if_display(obj);
14432         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14433
14434         return 0;
14435 }
14436
14437 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14438         .destroy = intel_user_framebuffer_destroy,
14439         .create_handle = intel_user_framebuffer_create_handle,
14440         .dirty = intel_user_framebuffer_dirty,
14441 };
14442
14443 static
14444 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14445                          uint64_t fb_modifier, uint32_t pixel_format)
14446 {
14447         struct intel_crtc *crtc;
14448         struct intel_plane *plane;
14449
14450         /*
14451          * We assume the primary plane for pipe A has
14452          * the highest stride limits of them all.
14453          */
14454         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14455         plane = to_intel_plane(crtc->base.primary);
14456
14457         return plane->max_stride(plane, pixel_format, fb_modifier,
14458                                  DRM_MODE_ROTATE_0);
14459 }
14460
14461 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14462                                   struct drm_i915_gem_object *obj,
14463                                   struct drm_mode_fb_cmd2 *mode_cmd)
14464 {
14465         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14466         struct drm_framebuffer *fb = &intel_fb->base;
14467         struct drm_format_name_buf format_name;
14468         u32 pitch_limit;
14469         unsigned int tiling, stride;
14470         int ret = -EINVAL;
14471         int i;
14472
14473         i915_gem_object_lock(obj);
14474         obj->framebuffer_references++;
14475         tiling = i915_gem_object_get_tiling(obj);
14476         stride = i915_gem_object_get_stride(obj);
14477         i915_gem_object_unlock(obj);
14478
14479         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14480                 /*
14481                  * If there's a fence, enforce that
14482                  * the fb modifier and tiling mode match.
14483                  */
14484                 if (tiling != I915_TILING_NONE &&
14485                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14486                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14487                         goto err;
14488                 }
14489         } else {
14490                 if (tiling == I915_TILING_X) {
14491                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14492                 } else if (tiling == I915_TILING_Y) {
14493                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14494                         goto err;
14495                 }
14496         }
14497
14498         /* Passed in modifier sanity checking. */
14499         switch (mode_cmd->modifier[0]) {
14500         case I915_FORMAT_MOD_Y_TILED_CCS:
14501         case I915_FORMAT_MOD_Yf_TILED_CCS:
14502                 switch (mode_cmd->pixel_format) {
14503                 case DRM_FORMAT_XBGR8888:
14504                 case DRM_FORMAT_ABGR8888:
14505                 case DRM_FORMAT_XRGB8888:
14506                 case DRM_FORMAT_ARGB8888:
14507                         break;
14508                 default:
14509                         DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14510                         goto err;
14511                 }
14512                 /* fall through */
14513         case I915_FORMAT_MOD_Y_TILED:
14514         case I915_FORMAT_MOD_Yf_TILED:
14515                 if (INTEL_GEN(dev_priv) < 9) {
14516                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14517                                       mode_cmd->modifier[0]);
14518                         goto err;
14519                 }
14520         case DRM_FORMAT_MOD_LINEAR:
14521         case I915_FORMAT_MOD_X_TILED:
14522                 break;
14523         default:
14524                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14525                               mode_cmd->modifier[0]);
14526                 goto err;
14527         }
14528
14529         /*
14530          * gen2/3 display engine uses the fence if present,
14531          * so the tiling mode must match the fb modifier exactly.
14532          */
14533         if (INTEL_GEN(dev_priv) < 4 &&
14534             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14535                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14536                 goto err;
14537         }
14538
14539         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14540                                            mode_cmd->pixel_format);
14541         if (mode_cmd->pitches[0] > pitch_limit) {
14542                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14543                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14544                               "tiled" : "linear",
14545                               mode_cmd->pitches[0], pitch_limit);
14546                 goto err;
14547         }
14548
14549         /*
14550          * If there's a fence, enforce that
14551          * the fb pitch and fence stride match.
14552          */
14553         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14554                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14555                               mode_cmd->pitches[0], stride);
14556                 goto err;
14557         }
14558
14559         /* Reject formats not supported by any plane early. */
14560         switch (mode_cmd->pixel_format) {
14561         case DRM_FORMAT_C8:
14562         case DRM_FORMAT_RGB565:
14563         case DRM_FORMAT_XRGB8888:
14564         case DRM_FORMAT_ARGB8888:
14565                 break;
14566         case DRM_FORMAT_XRGB1555:
14567                 if (INTEL_GEN(dev_priv) > 3) {
14568                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14569                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14570                         goto err;
14571                 }
14572                 break;
14573         case DRM_FORMAT_ABGR8888:
14574                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14575                     INTEL_GEN(dev_priv) < 9) {
14576                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14577                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14578                         goto err;
14579                 }
14580                 break;
14581         case DRM_FORMAT_XBGR8888:
14582         case DRM_FORMAT_XRGB2101010:
14583         case DRM_FORMAT_XBGR2101010:
14584                 if (INTEL_GEN(dev_priv) < 4) {
14585                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14586                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14587                         goto err;
14588                 }
14589                 break;
14590         case DRM_FORMAT_ABGR2101010:
14591                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14592                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14593                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14594                         goto err;
14595                 }
14596                 break;
14597         case DRM_FORMAT_YUYV:
14598         case DRM_FORMAT_UYVY:
14599         case DRM_FORMAT_YVYU:
14600         case DRM_FORMAT_VYUY:
14601                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14602                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14603                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14604                         goto err;
14605                 }
14606                 break;
14607         case DRM_FORMAT_NV12:
14608                 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14609                     IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
14610                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14611                                       drm_get_format_name(mode_cmd->pixel_format,
14612                                                           &format_name));
14613                         goto err;
14614                 }
14615                 break;
14616         default:
14617                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14618                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14619                 goto err;
14620         }
14621
14622         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14623         if (mode_cmd->offsets[0] != 0)
14624                 goto err;
14625
14626         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14627
14628         if (fb->format->format == DRM_FORMAT_NV12 &&
14629             (fb->width < SKL_MIN_YUV_420_SRC_W ||
14630              fb->height < SKL_MIN_YUV_420_SRC_H ||
14631              (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14632                 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14633                 return -EINVAL;
14634         }
14635
14636         for (i = 0; i < fb->format->num_planes; i++) {
14637                 u32 stride_alignment;
14638
14639                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14640                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14641                         goto err;
14642                 }
14643
14644                 stride_alignment = intel_fb_stride_alignment(fb, i);
14645
14646                 /*
14647                  * Display WA #0531: skl,bxt,kbl,glk
14648                  *
14649                  * Render decompression and plane width > 3840
14650                  * combined with horizontal panning requires the
14651                  * plane stride to be a multiple of 4. We'll just
14652                  * require the entire fb to accommodate that to avoid
14653                  * potential runtime errors at plane configuration time.
14654                  */
14655                 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14656                     is_ccs_modifier(fb->modifier))
14657                         stride_alignment *= 4;
14658
14659                 if (fb->pitches[i] & (stride_alignment - 1)) {
14660                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14661                                       i, fb->pitches[i], stride_alignment);
14662                         goto err;
14663                 }
14664
14665                 fb->obj[i] = &obj->base;
14666         }
14667
14668         ret = intel_fill_fb_info(dev_priv, fb);
14669         if (ret)
14670                 goto err;
14671
14672         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14673         if (ret) {
14674                 DRM_ERROR("framebuffer init failed %d\n", ret);
14675                 goto err;
14676         }
14677
14678         return 0;
14679
14680 err:
14681         i915_gem_object_lock(obj);
14682         obj->framebuffer_references--;
14683         i915_gem_object_unlock(obj);
14684         return ret;
14685 }
14686
14687 static struct drm_framebuffer *
14688 intel_user_framebuffer_create(struct drm_device *dev,
14689                               struct drm_file *filp,
14690                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14691 {
14692         struct drm_framebuffer *fb;
14693         struct drm_i915_gem_object *obj;
14694         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14695
14696         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14697         if (!obj)
14698                 return ERR_PTR(-ENOENT);
14699
14700         fb = intel_framebuffer_create(obj, &mode_cmd);
14701         if (IS_ERR(fb))
14702                 i915_gem_object_put(obj);
14703
14704         return fb;
14705 }
14706
14707 static void intel_atomic_state_free(struct drm_atomic_state *state)
14708 {
14709         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14710
14711         drm_atomic_state_default_release(state);
14712
14713         i915_sw_fence_fini(&intel_state->commit_ready);
14714
14715         kfree(state);
14716 }
14717
14718 static enum drm_mode_status
14719 intel_mode_valid(struct drm_device *dev,
14720                  const struct drm_display_mode *mode)
14721 {
14722         struct drm_i915_private *dev_priv = to_i915(dev);
14723         int hdisplay_max, htotal_max;
14724         int vdisplay_max, vtotal_max;
14725
14726         /*
14727          * Can't reject DBLSCAN here because Xorg ddxen can add piles
14728          * of DBLSCAN modes to the output's mode list when they detect
14729          * the scaling mode property on the connector. And they don't
14730          * ask the kernel to validate those modes in any way until
14731          * modeset time at which point the client gets a protocol error.
14732          * So in order to not upset those clients we silently ignore the
14733          * DBLSCAN flag on such connectors. For other connectors we will
14734          * reject modes with the DBLSCAN flag in encoder->compute_config().
14735          * And we always reject DBLSCAN modes in connector->mode_valid()
14736          * as we never want such modes on the connector's mode list.
14737          */
14738
14739         if (mode->vscan > 1)
14740                 return MODE_NO_VSCAN;
14741
14742         if (mode->flags & DRM_MODE_FLAG_HSKEW)
14743                 return MODE_H_ILLEGAL;
14744
14745         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14746                            DRM_MODE_FLAG_NCSYNC |
14747                            DRM_MODE_FLAG_PCSYNC))
14748                 return MODE_HSYNC;
14749
14750         if (mode->flags & (DRM_MODE_FLAG_BCAST |
14751                            DRM_MODE_FLAG_PIXMUX |
14752                            DRM_MODE_FLAG_CLKDIV2))
14753                 return MODE_BAD;
14754
14755         if (INTEL_GEN(dev_priv) >= 9 ||
14756             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14757                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14758                 vdisplay_max = 4096;
14759                 htotal_max = 8192;
14760                 vtotal_max = 8192;
14761         } else if (INTEL_GEN(dev_priv) >= 3) {
14762                 hdisplay_max = 4096;
14763                 vdisplay_max = 4096;
14764                 htotal_max = 8192;
14765                 vtotal_max = 8192;
14766         } else {
14767                 hdisplay_max = 2048;
14768                 vdisplay_max = 2048;
14769                 htotal_max = 4096;
14770                 vtotal_max = 4096;
14771         }
14772
14773         if (mode->hdisplay > hdisplay_max ||
14774             mode->hsync_start > htotal_max ||
14775             mode->hsync_end > htotal_max ||
14776             mode->htotal > htotal_max)
14777                 return MODE_H_ILLEGAL;
14778
14779         if (mode->vdisplay > vdisplay_max ||
14780             mode->vsync_start > vtotal_max ||
14781             mode->vsync_end > vtotal_max ||
14782             mode->vtotal > vtotal_max)
14783                 return MODE_V_ILLEGAL;
14784
14785         return MODE_OK;
14786 }
14787
14788 static const struct drm_mode_config_funcs intel_mode_funcs = {
14789         .fb_create = intel_user_framebuffer_create,
14790         .get_format_info = intel_get_format_info,
14791         .output_poll_changed = intel_fbdev_output_poll_changed,
14792         .mode_valid = intel_mode_valid,
14793         .atomic_check = intel_atomic_check,
14794         .atomic_commit = intel_atomic_commit,
14795         .atomic_state_alloc = intel_atomic_state_alloc,
14796         .atomic_state_clear = intel_atomic_state_clear,
14797         .atomic_state_free = intel_atomic_state_free,
14798 };
14799
14800 /**
14801  * intel_init_display_hooks - initialize the display modesetting hooks
14802  * @dev_priv: device private
14803  */
14804 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14805 {
14806         intel_init_cdclk_hooks(dev_priv);
14807
14808         if (INTEL_GEN(dev_priv) >= 9) {
14809                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14810                 dev_priv->display.get_initial_plane_config =
14811                         skylake_get_initial_plane_config;
14812                 dev_priv->display.crtc_compute_clock =
14813                         haswell_crtc_compute_clock;
14814                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14815                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14816         } else if (HAS_DDI(dev_priv)) {
14817                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14818                 dev_priv->display.get_initial_plane_config =
14819                         i9xx_get_initial_plane_config;
14820                 dev_priv->display.crtc_compute_clock =
14821                         haswell_crtc_compute_clock;
14822                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14823                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14824         } else if (HAS_PCH_SPLIT(dev_priv)) {
14825                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14826                 dev_priv->display.get_initial_plane_config =
14827                         i9xx_get_initial_plane_config;
14828                 dev_priv->display.crtc_compute_clock =
14829                         ironlake_crtc_compute_clock;
14830                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14831                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14832         } else if (IS_CHERRYVIEW(dev_priv)) {
14833                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14834                 dev_priv->display.get_initial_plane_config =
14835                         i9xx_get_initial_plane_config;
14836                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14837                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14838                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14839         } else if (IS_VALLEYVIEW(dev_priv)) {
14840                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14841                 dev_priv->display.get_initial_plane_config =
14842                         i9xx_get_initial_plane_config;
14843                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14844                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14845                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14846         } else if (IS_G4X(dev_priv)) {
14847                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14848                 dev_priv->display.get_initial_plane_config =
14849                         i9xx_get_initial_plane_config;
14850                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14851                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14852                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14853         } else if (IS_PINEVIEW(dev_priv)) {
14854                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14855                 dev_priv->display.get_initial_plane_config =
14856                         i9xx_get_initial_plane_config;
14857                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14858                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14859                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14860         } else if (!IS_GEN2(dev_priv)) {
14861                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14862                 dev_priv->display.get_initial_plane_config =
14863                         i9xx_get_initial_plane_config;
14864                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14865                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14866                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14867         } else {
14868                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14869                 dev_priv->display.get_initial_plane_config =
14870                         i9xx_get_initial_plane_config;
14871                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14872                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14873                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14874         }
14875
14876         if (IS_GEN5(dev_priv)) {
14877                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14878         } else if (IS_GEN6(dev_priv)) {
14879                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14880         } else if (IS_IVYBRIDGE(dev_priv)) {
14881                 /* FIXME: detect B0+ stepping and use auto training */
14882                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14883         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14884                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14885         }
14886
14887         if (INTEL_GEN(dev_priv) >= 9)
14888                 dev_priv->display.update_crtcs = skl_update_crtcs;
14889         else
14890                 dev_priv->display.update_crtcs = intel_update_crtcs;
14891 }
14892
14893 /*
14894  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14895  */
14896 static void quirk_ssc_force_disable(struct drm_device *dev)
14897 {
14898         struct drm_i915_private *dev_priv = to_i915(dev);
14899         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14900         DRM_INFO("applying lvds SSC disable quirk\n");
14901 }
14902
14903 /*
14904  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14905  * brightness value
14906  */
14907 static void quirk_invert_brightness(struct drm_device *dev)
14908 {
14909         struct drm_i915_private *dev_priv = to_i915(dev);
14910         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14911         DRM_INFO("applying inverted panel brightness quirk\n");
14912 }
14913
14914 /* Some VBT's incorrectly indicate no backlight is present */
14915 static void quirk_backlight_present(struct drm_device *dev)
14916 {
14917         struct drm_i915_private *dev_priv = to_i915(dev);
14918         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14919         DRM_INFO("applying backlight present quirk\n");
14920 }
14921
14922 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14923  * which is 300 ms greater than eDP spec T12 min.
14924  */
14925 static void quirk_increase_t12_delay(struct drm_device *dev)
14926 {
14927         struct drm_i915_private *dev_priv = to_i915(dev);
14928
14929         dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14930         DRM_INFO("Applying T12 delay quirk\n");
14931 }
14932
14933 /*
14934  * GeminiLake NUC HDMI outputs require additional off time
14935  * this allows the onboard retimer to correctly sync to signal
14936  */
14937 static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14938 {
14939         struct drm_i915_private *dev_priv = to_i915(dev);
14940
14941         dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14942         DRM_INFO("Applying Increase DDI Disabled quirk\n");
14943 }
14944
14945 struct intel_quirk {
14946         int device;
14947         int subsystem_vendor;
14948         int subsystem_device;
14949         void (*hook)(struct drm_device *dev);
14950 };
14951
14952 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14953 struct intel_dmi_quirk {
14954         void (*hook)(struct drm_device *dev);
14955         const struct dmi_system_id (*dmi_id_list)[];
14956 };
14957
14958 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14959 {
14960         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14961         return 1;
14962 }
14963
14964 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14965         {
14966                 .dmi_id_list = &(const struct dmi_system_id[]) {
14967                         {
14968                                 .callback = intel_dmi_reverse_brightness,
14969                                 .ident = "NCR Corporation",
14970                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14971                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14972                                 },
14973                         },
14974                         { }  /* terminating entry */
14975                 },
14976                 .hook = quirk_invert_brightness,
14977         },
14978 };
14979
14980 static struct intel_quirk intel_quirks[] = {
14981         /* Lenovo U160 cannot use SSC on LVDS */
14982         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14983
14984         /* Sony Vaio Y cannot use SSC on LVDS */
14985         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14986
14987         /* Acer Aspire 5734Z must invert backlight brightness */
14988         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14989
14990         /* Acer/eMachines G725 */
14991         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14992
14993         /* Acer/eMachines e725 */
14994         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14995
14996         /* Acer/Packard Bell NCL20 */
14997         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14998
14999         /* Acer Aspire 4736Z */
15000         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15001
15002         /* Acer Aspire 5336 */
15003         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15004
15005         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15006         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15007
15008         /* Acer C720 Chromebook (Core i3 4005U) */
15009         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15010
15011         /* Apple Macbook 2,1 (Core 2 T7400) */
15012         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15013
15014         /* Apple Macbook 4,1 */
15015         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15016
15017         /* Toshiba CB35 Chromebook (Celeron 2955U) */
15018         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15019
15020         /* HP Chromebook 14 (Celeron 2955U) */
15021         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15022
15023         /* Dell Chromebook 11 */
15024         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15025
15026         /* Dell Chromebook 11 (2015 version) */
15027         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15028
15029         /* Toshiba Satellite P50-C-18C */
15030         { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
15031
15032         /* GeminiLake NUC */
15033         { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15034         { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15035         /* ASRock ITX*/
15036         { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15037         { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15038 };
15039
15040 static void intel_init_quirks(struct drm_device *dev)
15041 {
15042         struct pci_dev *d = dev->pdev;
15043         int i;
15044
15045         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15046                 struct intel_quirk *q = &intel_quirks[i];
15047
15048                 if (d->device == q->device &&
15049                     (d->subsystem_vendor == q->subsystem_vendor ||
15050                      q->subsystem_vendor == PCI_ANY_ID) &&
15051                     (d->subsystem_device == q->subsystem_device ||
15052                      q->subsystem_device == PCI_ANY_ID))
15053                         q->hook(dev);
15054         }
15055         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15056                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15057                         intel_dmi_quirks[i].hook(dev);
15058         }
15059 }
15060
15061 /* Disable the VGA plane that we never use */
15062 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15063 {
15064         struct pci_dev *pdev = dev_priv->drm.pdev;
15065         u8 sr1;
15066         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15067
15068         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15069         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15070         outb(SR01, VGA_SR_INDEX);
15071         sr1 = inb(VGA_SR_DATA);
15072         outb(sr1 | 1<<5, VGA_SR_DATA);
15073         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15074         udelay(300);
15075
15076         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15077         POSTING_READ(vga_reg);
15078 }
15079
15080 void intel_modeset_init_hw(struct drm_device *dev)
15081 {
15082         struct drm_i915_private *dev_priv = to_i915(dev);
15083
15084         intel_update_cdclk(dev_priv);
15085         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15086         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15087 }
15088
15089 /*
15090  * Calculate what we think the watermarks should be for the state we've read
15091  * out of the hardware and then immediately program those watermarks so that
15092  * we ensure the hardware settings match our internal state.
15093  *
15094  * We can calculate what we think WM's should be by creating a duplicate of the
15095  * current state (which was constructed during hardware readout) and running it
15096  * through the atomic check code to calculate new watermark values in the
15097  * state object.
15098  */
15099 static void sanitize_watermarks(struct drm_device *dev)
15100 {
15101         struct drm_i915_private *dev_priv = to_i915(dev);
15102         struct drm_atomic_state *state;
15103         struct intel_atomic_state *intel_state;
15104         struct drm_crtc *crtc;
15105         struct drm_crtc_state *cstate;
15106         struct drm_modeset_acquire_ctx ctx;
15107         int ret;
15108         int i;
15109
15110         /* Only supported on platforms that use atomic watermark design */
15111         if (!dev_priv->display.optimize_watermarks)
15112                 return;
15113
15114         /*
15115          * We need to hold connection_mutex before calling duplicate_state so
15116          * that the connector loop is protected.
15117          */
15118         drm_modeset_acquire_init(&ctx, 0);
15119 retry:
15120         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15121         if (ret == -EDEADLK) {
15122                 drm_modeset_backoff(&ctx);
15123                 goto retry;
15124         } else if (WARN_ON(ret)) {
15125                 goto fail;
15126         }
15127
15128         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15129         if (WARN_ON(IS_ERR(state)))
15130                 goto fail;
15131
15132         intel_state = to_intel_atomic_state(state);
15133
15134         /*
15135          * Hardware readout is the only time we don't want to calculate
15136          * intermediate watermarks (since we don't trust the current
15137          * watermarks).
15138          */
15139         if (!HAS_GMCH_DISPLAY(dev_priv))
15140                 intel_state->skip_intermediate_wm = true;
15141
15142         ret = intel_atomic_check(dev, state);
15143         if (ret) {
15144                 /*
15145                  * If we fail here, it means that the hardware appears to be
15146                  * programmed in a way that shouldn't be possible, given our
15147                  * understanding of watermark requirements.  This might mean a
15148                  * mistake in the hardware readout code or a mistake in the
15149                  * watermark calculations for a given platform.  Raise a WARN
15150                  * so that this is noticeable.
15151                  *
15152                  * If this actually happens, we'll have to just leave the
15153                  * BIOS-programmed watermarks untouched and hope for the best.
15154                  */
15155                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15156                 goto put_state;
15157         }
15158
15159         /* Write calculated watermark values back */
15160         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15161                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15162
15163                 cs->wm.need_postvbl_update = true;
15164                 dev_priv->display.optimize_watermarks(intel_state, cs);
15165
15166                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15167         }
15168
15169 put_state:
15170         drm_atomic_state_put(state);
15171 fail:
15172         drm_modeset_drop_locks(&ctx);
15173         drm_modeset_acquire_fini(&ctx);
15174 }
15175
15176 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15177 {
15178         if (IS_GEN5(dev_priv)) {
15179                 u32 fdi_pll_clk =
15180                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15181
15182                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15183         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
15184                 dev_priv->fdi_pll_freq = 270000;
15185         } else {
15186                 return;
15187         }
15188
15189         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15190 }
15191
15192 static int intel_initial_commit(struct drm_device *dev)
15193 {
15194         struct drm_atomic_state *state = NULL;
15195         struct drm_modeset_acquire_ctx ctx;
15196         struct drm_crtc *crtc;
15197         struct drm_crtc_state *crtc_state;
15198         int ret = 0;
15199
15200         state = drm_atomic_state_alloc(dev);
15201         if (!state)
15202                 return -ENOMEM;
15203
15204         drm_modeset_acquire_init(&ctx, 0);
15205
15206 retry:
15207         state->acquire_ctx = &ctx;
15208
15209         drm_for_each_crtc(crtc, dev) {
15210                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15211                 if (IS_ERR(crtc_state)) {
15212                         ret = PTR_ERR(crtc_state);
15213                         goto out;
15214                 }
15215
15216                 if (crtc_state->active) {
15217                         ret = drm_atomic_add_affected_planes(state, crtc);
15218                         if (ret)
15219                                 goto out;
15220                 }
15221         }
15222
15223         ret = drm_atomic_commit(state);
15224
15225 out:
15226         if (ret == -EDEADLK) {
15227                 drm_atomic_state_clear(state);
15228                 drm_modeset_backoff(&ctx);
15229                 goto retry;
15230         }
15231
15232         drm_atomic_state_put(state);
15233
15234         drm_modeset_drop_locks(&ctx);
15235         drm_modeset_acquire_fini(&ctx);
15236
15237         return ret;
15238 }
15239
15240 int intel_modeset_init(struct drm_device *dev)
15241 {
15242         struct drm_i915_private *dev_priv = to_i915(dev);
15243         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15244         enum pipe pipe;
15245         struct intel_crtc *crtc;
15246         int ret;
15247
15248         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15249
15250         drm_mode_config_init(dev);
15251
15252         dev->mode_config.min_width = 0;
15253         dev->mode_config.min_height = 0;
15254
15255         dev->mode_config.preferred_depth = 24;
15256         dev->mode_config.prefer_shadow = 1;
15257
15258         dev->mode_config.allow_fb_modifiers = true;
15259
15260         dev->mode_config.funcs = &intel_mode_funcs;
15261
15262         init_llist_head(&dev_priv->atomic_helper.free_list);
15263         INIT_WORK(&dev_priv->atomic_helper.free_work,
15264                   intel_atomic_helper_free_state_worker);
15265
15266         intel_init_quirks(dev);
15267
15268         intel_init_pm(dev_priv);
15269
15270         /*
15271          * There may be no VBT; and if the BIOS enabled SSC we can
15272          * just keep using it to avoid unnecessary flicker.  Whereas if the
15273          * BIOS isn't using it, don't assume it will work even if the VBT
15274          * indicates as much.
15275          */
15276         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15277                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15278                                             DREF_SSC1_ENABLE);
15279
15280                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15281                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15282                                      bios_lvds_use_ssc ? "en" : "dis",
15283                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15284                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15285                 }
15286         }
15287
15288         /* maximum framebuffer dimensions */
15289         if (IS_GEN2(dev_priv)) {
15290                 dev->mode_config.max_width = 2048;
15291                 dev->mode_config.max_height = 2048;
15292         } else if (IS_GEN3(dev_priv)) {
15293                 dev->mode_config.max_width = 4096;
15294                 dev->mode_config.max_height = 4096;
15295         } else {
15296                 dev->mode_config.max_width = 8192;
15297                 dev->mode_config.max_height = 8192;
15298         }
15299
15300         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15301                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15302                 dev->mode_config.cursor_height = 1023;
15303         } else if (IS_GEN2(dev_priv)) {
15304                 dev->mode_config.cursor_width = 64;
15305                 dev->mode_config.cursor_height = 64;
15306         } else {
15307                 dev->mode_config.cursor_width = 256;
15308                 dev->mode_config.cursor_height = 256;
15309         }
15310
15311         dev->mode_config.fb_base = ggtt->gmadr.start;
15312
15313         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15314                       INTEL_INFO(dev_priv)->num_pipes,
15315                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15316
15317         for_each_pipe(dev_priv, pipe) {
15318                 ret = intel_crtc_init(dev_priv, pipe);
15319                 if (ret) {
15320                         drm_mode_config_cleanup(dev);
15321                         return ret;
15322                 }
15323         }
15324
15325         intel_shared_dpll_init(dev);
15326         intel_update_fdi_pll_freq(dev_priv);
15327
15328         intel_update_czclk(dev_priv);
15329         intel_modeset_init_hw(dev);
15330
15331         if (dev_priv->max_cdclk_freq == 0)
15332                 intel_update_max_cdclk(dev_priv);
15333
15334         /* Just disable it once at startup */
15335         i915_disable_vga(dev_priv);
15336         intel_setup_outputs(dev_priv);
15337
15338         drm_modeset_lock_all(dev);
15339         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15340         drm_modeset_unlock_all(dev);
15341
15342         for_each_intel_crtc(dev, crtc) {
15343                 struct intel_initial_plane_config plane_config = {};
15344
15345                 if (!crtc->active)
15346                         continue;
15347
15348                 /*
15349                  * Note that reserving the BIOS fb up front prevents us
15350                  * from stuffing other stolen allocations like the ring
15351                  * on top.  This prevents some ugliness at boot time, and
15352                  * can even allow for smooth boot transitions if the BIOS
15353                  * fb is large enough for the active pipe configuration.
15354                  */
15355                 dev_priv->display.get_initial_plane_config(crtc,
15356                                                            &plane_config);
15357
15358                 /*
15359                  * If the fb is shared between multiple heads, we'll
15360                  * just get the first one.
15361                  */
15362                 intel_find_initial_plane_obj(crtc, &plane_config);
15363         }
15364
15365         /*
15366          * Make sure hardware watermarks really match the state we read out.
15367          * Note that we need to do this after reconstructing the BIOS fb's
15368          * since the watermark calculation done here will use pstate->fb.
15369          */
15370         if (!HAS_GMCH_DISPLAY(dev_priv))
15371                 sanitize_watermarks(dev);
15372
15373         /*
15374          * Force all active planes to recompute their states. So that on
15375          * mode_setcrtc after probe, all the intel_plane_state variables
15376          * are already calculated and there is no assert_plane warnings
15377          * during bootup.
15378          */
15379         ret = intel_initial_commit(dev);
15380         if (ret)
15381                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15382
15383         return 0;
15384 }
15385
15386 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15387 {
15388         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15389         /* 640x480@60Hz, ~25175 kHz */
15390         struct dpll clock = {
15391                 .m1 = 18,
15392                 .m2 = 7,
15393                 .p1 = 13,
15394                 .p2 = 4,
15395                 .n = 2,
15396         };
15397         u32 dpll, fp;
15398         int i;
15399
15400         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15401
15402         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15403                       pipe_name(pipe), clock.vco, clock.dot);
15404
15405         fp = i9xx_dpll_compute_fp(&clock);
15406         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15407                 DPLL_VGA_MODE_DIS |
15408                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15409                 PLL_P2_DIVIDE_BY_4 |
15410                 PLL_REF_INPUT_DREFCLK |
15411                 DPLL_VCO_ENABLE;
15412
15413         I915_WRITE(FP0(pipe), fp);
15414         I915_WRITE(FP1(pipe), fp);
15415
15416         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15417         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15418         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15419         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15420         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15421         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15422         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15423
15424         /*
15425          * Apparently we need to have VGA mode enabled prior to changing
15426          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15427          * dividers, even though the register value does change.
15428          */
15429         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15430         I915_WRITE(DPLL(pipe), dpll);
15431
15432         /* Wait for the clocks to stabilize. */
15433         POSTING_READ(DPLL(pipe));
15434         udelay(150);
15435
15436         /* The pixel multiplier can only be updated once the
15437          * DPLL is enabled and the clocks are stable.
15438          *
15439          * So write it again.
15440          */
15441         I915_WRITE(DPLL(pipe), dpll);
15442
15443         /* We do this three times for luck */
15444         for (i = 0; i < 3 ; i++) {
15445                 I915_WRITE(DPLL(pipe), dpll);
15446                 POSTING_READ(DPLL(pipe));
15447                 udelay(150); /* wait for warmup */
15448         }
15449
15450         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15451         POSTING_READ(PIPECONF(pipe));
15452
15453         intel_wait_for_pipe_scanline_moving(crtc);
15454 }
15455
15456 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15457 {
15458         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15459
15460         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15461                       pipe_name(pipe));
15462
15463         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15464         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15465         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15466         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15467         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15468
15469         I915_WRITE(PIPECONF(pipe), 0);
15470         POSTING_READ(PIPECONF(pipe));
15471
15472         intel_wait_for_pipe_scanline_stopped(crtc);
15473
15474         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15475         POSTING_READ(DPLL(pipe));
15476 }
15477
15478 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
15479                                    struct intel_plane *plane)
15480 {
15481         enum pipe pipe;
15482
15483         if (!plane->get_hw_state(plane, &pipe))
15484                 return true;
15485
15486         return pipe == crtc->pipe;
15487 }
15488
15489 static void
15490 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15491 {
15492         struct intel_crtc *crtc;
15493
15494         if (INTEL_GEN(dev_priv) >= 4)
15495                 return;
15496
15497         for_each_intel_crtc(&dev_priv->drm, crtc) {
15498                 struct intel_plane *plane =
15499                         to_intel_plane(crtc->base.primary);
15500
15501                 if (intel_plane_mapping_ok(crtc, plane))
15502                         continue;
15503
15504                 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
15505                               plane->base.name);
15506                 intel_plane_disable_noatomic(crtc, plane);
15507         }
15508 }
15509
15510 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15511 {
15512         struct drm_device *dev = crtc->base.dev;
15513         struct intel_encoder *encoder;
15514
15515         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15516                 return true;
15517
15518         return false;
15519 }
15520
15521 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15522 {
15523         struct drm_device *dev = encoder->base.dev;
15524         struct intel_connector *connector;
15525
15526         for_each_connector_on_encoder(dev, &encoder->base, connector)
15527                 return connector;
15528
15529         return NULL;
15530 }
15531
15532 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15533                               enum pipe pch_transcoder)
15534 {
15535         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15536                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15537 }
15538
15539 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15540                                 struct drm_modeset_acquire_ctx *ctx)
15541 {
15542         struct drm_device *dev = crtc->base.dev;
15543         struct drm_i915_private *dev_priv = to_i915(dev);
15544         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15545
15546         /* Clear any frame start delays used for debugging left by the BIOS */
15547         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15548                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15549
15550                 I915_WRITE(reg,
15551                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15552         }
15553
15554         /* restore vblank interrupts to correct state */
15555         drm_crtc_vblank_reset(&crtc->base);
15556         if (crtc->active) {
15557                 struct intel_plane *plane;
15558
15559                 drm_crtc_vblank_on(&crtc->base);
15560
15561                 /* Disable everything but the primary plane */
15562                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15563                         const struct intel_plane_state *plane_state =
15564                                 to_intel_plane_state(plane->base.state);
15565
15566                         if (plane_state->base.visible &&
15567                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15568                                 intel_plane_disable_noatomic(crtc, plane);
15569                 }
15570         }
15571
15572         /* Adjust the state of the output pipe according to whether we
15573          * have active connectors/encoders. */
15574         if (crtc->active && !intel_crtc_has_encoders(crtc))
15575                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15576
15577         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15578                 /*
15579                  * We start out with underrun reporting disabled to avoid races.
15580                  * For correct bookkeeping mark this on active crtcs.
15581                  *
15582                  * Also on gmch platforms we dont have any hardware bits to
15583                  * disable the underrun reporting. Which means we need to start
15584                  * out with underrun reporting disabled also on inactive pipes,
15585                  * since otherwise we'll complain about the garbage we read when
15586                  * e.g. coming up after runtime pm.
15587                  *
15588                  * No protection against concurrent access is required - at
15589                  * worst a fifo underrun happens which also sets this to false.
15590                  */
15591                 crtc->cpu_fifo_underrun_disabled = true;
15592                 /*
15593                  * We track the PCH trancoder underrun reporting state
15594                  * within the crtc. With crtc for pipe A housing the underrun
15595                  * reporting state for PCH transcoder A, crtc for pipe B housing
15596                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15597                  * and marking underrun reporting as disabled for the non-existing
15598                  * PCH transcoders B and C would prevent enabling the south
15599                  * error interrupt (see cpt_can_enable_serr_int()).
15600                  */
15601                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15602                         crtc->pch_fifo_underrun_disabled = true;
15603         }
15604 }
15605
15606 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15607 {
15608         struct intel_connector *connector;
15609
15610         /* We need to check both for a crtc link (meaning that the
15611          * encoder is active and trying to read from a pipe) and the
15612          * pipe itself being active. */
15613         bool has_active_crtc = encoder->base.crtc &&
15614                 to_intel_crtc(encoder->base.crtc)->active;
15615
15616         connector = intel_encoder_find_connector(encoder);
15617         if (connector && !has_active_crtc) {
15618                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15619                               encoder->base.base.id,
15620                               encoder->base.name);
15621
15622                 /* Connector is active, but has no active pipe. This is
15623                  * fallout from our resume register restoring. Disable
15624                  * the encoder manually again. */
15625                 if (encoder->base.crtc) {
15626                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15627
15628                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15629                                       encoder->base.base.id,
15630                                       encoder->base.name);
15631                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15632                         if (encoder->post_disable)
15633                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15634                 }
15635                 encoder->base.crtc = NULL;
15636
15637                 /* Inconsistent output/port/pipe state happens presumably due to
15638                  * a bug in one of the get_hw_state functions. Or someplace else
15639                  * in our code, like the register restore mess on resume. Clamp
15640                  * things to off as a safer default. */
15641
15642                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15643                 connector->base.encoder = NULL;
15644         }
15645
15646         /* notify opregion of the sanitized encoder state */
15647         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15648 }
15649
15650 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15651 {
15652         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15653
15654         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15655                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15656                 i915_disable_vga(dev_priv);
15657         }
15658 }
15659
15660 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15661 {
15662         /* This function can be called both from intel_modeset_setup_hw_state or
15663          * at a very early point in our resume sequence, where the power well
15664          * structures are not yet restored. Since this function is at a very
15665          * paranoid "someone might have enabled VGA while we were not looking"
15666          * level, just check if the power well is enabled instead of trying to
15667          * follow the "don't touch the power well if we don't need it" policy
15668          * the rest of the driver uses. */
15669         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15670                 return;
15671
15672         i915_redisable_vga_power_on(dev_priv);
15673
15674         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15675 }
15676
15677 /* FIXME read out full plane state for all planes */
15678 static void readout_plane_state(struct intel_crtc *crtc)
15679 {
15680         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15681         struct intel_crtc_state *crtc_state =
15682                 to_intel_crtc_state(crtc->base.state);
15683         struct intel_plane *plane;
15684
15685         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15686                 struct intel_plane_state *plane_state =
15687                         to_intel_plane_state(plane->base.state);
15688                 enum pipe pipe;
15689                 bool visible;
15690
15691                 visible = plane->get_hw_state(plane, &pipe);
15692
15693                 intel_set_plane_visible(crtc_state, plane_state, visible);
15694         }
15695 }
15696
15697 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15698 {
15699         struct drm_i915_private *dev_priv = to_i915(dev);
15700         enum pipe pipe;
15701         struct intel_crtc *crtc;
15702         struct intel_encoder *encoder;
15703         struct intel_connector *connector;
15704         struct drm_connector_list_iter conn_iter;
15705         int i;
15706
15707         dev_priv->active_crtcs = 0;
15708
15709         for_each_intel_crtc(dev, crtc) {
15710                 struct intel_crtc_state *crtc_state =
15711                         to_intel_crtc_state(crtc->base.state);
15712
15713                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15714                 memset(crtc_state, 0, sizeof(*crtc_state));
15715                 crtc_state->base.crtc = &crtc->base;
15716
15717                 crtc_state->base.active = crtc_state->base.enable =
15718                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15719
15720                 crtc->base.enabled = crtc_state->base.enable;
15721                 crtc->active = crtc_state->base.active;
15722
15723                 if (crtc_state->base.active)
15724                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15725
15726                 readout_plane_state(crtc);
15727
15728                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15729                               crtc->base.base.id, crtc->base.name,
15730                               enableddisabled(crtc_state->base.active));
15731         }
15732
15733         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15734                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15735
15736                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15737                                                         &pll->state.hw_state);
15738                 pll->state.crtc_mask = 0;
15739                 for_each_intel_crtc(dev, crtc) {
15740                         struct intel_crtc_state *crtc_state =
15741                                 to_intel_crtc_state(crtc->base.state);
15742
15743                         if (crtc_state->base.active &&
15744                             crtc_state->shared_dpll == pll)
15745                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15746                 }
15747                 pll->active_mask = pll->state.crtc_mask;
15748
15749                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15750                               pll->info->name, pll->state.crtc_mask, pll->on);
15751         }
15752
15753         for_each_intel_encoder(dev, encoder) {
15754                 pipe = 0;
15755
15756                 if (encoder->get_hw_state(encoder, &pipe)) {
15757                         struct intel_crtc_state *crtc_state;
15758
15759                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15760                         crtc_state = to_intel_crtc_state(crtc->base.state);
15761
15762                         encoder->base.crtc = &crtc->base;
15763                         encoder->get_config(encoder, crtc_state);
15764                 } else {
15765                         encoder->base.crtc = NULL;
15766                 }
15767
15768                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15769                               encoder->base.base.id, encoder->base.name,
15770                               enableddisabled(encoder->base.crtc),
15771                               pipe_name(pipe));
15772         }
15773
15774         drm_connector_list_iter_begin(dev, &conn_iter);
15775         for_each_intel_connector_iter(connector, &conn_iter) {
15776                 if (connector->get_hw_state(connector)) {
15777                         connector->base.dpms = DRM_MODE_DPMS_ON;
15778
15779                         encoder = connector->encoder;
15780                         connector->base.encoder = &encoder->base;
15781
15782                         if (encoder->base.crtc &&
15783                             encoder->base.crtc->state->active) {
15784                                 /*
15785                                  * This has to be done during hardware readout
15786                                  * because anything calling .crtc_disable may
15787                                  * rely on the connector_mask being accurate.
15788                                  */
15789                                 encoder->base.crtc->state->connector_mask |=
15790                                         drm_connector_mask(&connector->base);
15791                                 encoder->base.crtc->state->encoder_mask |=
15792                                         drm_encoder_mask(&encoder->base);
15793                         }
15794
15795                 } else {
15796                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15797                         connector->base.encoder = NULL;
15798                 }
15799                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15800                               connector->base.base.id, connector->base.name,
15801                               enableddisabled(connector->base.encoder));
15802         }
15803         drm_connector_list_iter_end(&conn_iter);
15804
15805         for_each_intel_crtc(dev, crtc) {
15806                 struct intel_crtc_state *crtc_state =
15807                         to_intel_crtc_state(crtc->base.state);
15808                 int min_cdclk = 0;
15809
15810                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15811                 if (crtc_state->base.active) {
15812                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15813                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15814                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15815                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15816                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15817
15818                         /*
15819                          * The initial mode needs to be set in order to keep
15820                          * the atomic core happy. It wants a valid mode if the
15821                          * crtc's enabled, so we do the above call.
15822                          *
15823                          * But we don't set all the derived state fully, hence
15824                          * set a flag to indicate that a full recalculation is
15825                          * needed on the next commit.
15826                          */
15827                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15828
15829                         intel_crtc_compute_pixel_rate(crtc_state);
15830
15831                         if (dev_priv->display.modeset_calc_cdclk) {
15832                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15833                                 if (WARN_ON(min_cdclk < 0))
15834                                         min_cdclk = 0;
15835                         }
15836
15837                         drm_calc_timestamping_constants(&crtc->base,
15838                                                         &crtc_state->base.adjusted_mode);
15839                         update_scanline_offset(crtc);
15840                 }
15841
15842                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15843                 dev_priv->min_voltage_level[crtc->pipe] =
15844                         crtc_state->min_voltage_level;
15845
15846                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15847         }
15848 }
15849
15850 static void
15851 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15852 {
15853         struct intel_encoder *encoder;
15854
15855         for_each_intel_encoder(&dev_priv->drm, encoder) {
15856                 u64 get_domains;
15857                 enum intel_display_power_domain domain;
15858                 struct intel_crtc_state *crtc_state;
15859
15860                 if (!encoder->get_power_domains)
15861                         continue;
15862
15863                 /*
15864                  * MST-primary and inactive encoders don't have a crtc state
15865                  * and neither of these require any power domain references.
15866                  */
15867                 if (!encoder->base.crtc)
15868                         continue;
15869
15870                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
15871                 get_domains = encoder->get_power_domains(encoder, crtc_state);
15872                 for_each_power_domain(domain, get_domains)
15873                         intel_display_power_get(dev_priv, domain);
15874         }
15875 }
15876
15877 static void intel_early_display_was(struct drm_i915_private *dev_priv)
15878 {
15879         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15880         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15881                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15882                            DARBF_GATING_DIS);
15883
15884         if (IS_HASWELL(dev_priv)) {
15885                 /*
15886                  * WaRsPkgCStateDisplayPMReq:hsw
15887                  * System hang if this isn't done before disabling all planes!
15888                  */
15889                 I915_WRITE(CHICKEN_PAR1_1,
15890                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15891         }
15892 }
15893
15894 /* Scan out the current hw modeset state,
15895  * and sanitizes it to the current state
15896  */
15897 static void
15898 intel_modeset_setup_hw_state(struct drm_device *dev,
15899                              struct drm_modeset_acquire_ctx *ctx)
15900 {
15901         struct drm_i915_private *dev_priv = to_i915(dev);
15902         enum pipe pipe;
15903         struct intel_crtc *crtc;
15904         struct intel_encoder *encoder;
15905         int i;
15906
15907         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15908
15909         intel_early_display_was(dev_priv);
15910         intel_modeset_readout_hw_state(dev);
15911
15912         /* HW state is read out, now we need to sanitize this mess. */
15913         get_encoder_power_domains(dev_priv);
15914
15915         intel_sanitize_plane_mapping(dev_priv);
15916
15917         for_each_intel_encoder(dev, encoder) {
15918                 intel_sanitize_encoder(encoder);
15919         }
15920
15921         for_each_pipe(dev_priv, pipe) {
15922                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15923
15924                 intel_sanitize_crtc(crtc, ctx);
15925                 intel_dump_pipe_config(crtc, crtc->config,
15926                                        "[setup_hw_state]");
15927         }
15928
15929         intel_modeset_update_connector_atomic_state(dev);
15930
15931         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15932                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15933
15934                 if (!pll->on || pll->active_mask)
15935                         continue;
15936
15937                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15938                               pll->info->name);
15939
15940                 pll->info->funcs->disable(dev_priv, pll);
15941                 pll->on = false;
15942         }
15943
15944         if (IS_G4X(dev_priv)) {
15945                 g4x_wm_get_hw_state(dev);
15946                 g4x_wm_sanitize(dev_priv);
15947         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15948                 vlv_wm_get_hw_state(dev);
15949                 vlv_wm_sanitize(dev_priv);
15950         } else if (INTEL_GEN(dev_priv) >= 9) {
15951                 skl_wm_get_hw_state(dev);
15952         } else if (HAS_PCH_SPLIT(dev_priv)) {
15953                 ilk_wm_get_hw_state(dev);
15954         }
15955
15956         for_each_intel_crtc(dev, crtc) {
15957                 u64 put_domains;
15958
15959                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15960                 if (WARN_ON(put_domains))
15961                         modeset_put_power_domains(dev_priv, put_domains);
15962         }
15963
15964         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
15965
15966         intel_fbc_init_pipe_state(dev_priv);
15967 }
15968
15969 void intel_display_resume(struct drm_device *dev)
15970 {
15971         struct drm_i915_private *dev_priv = to_i915(dev);
15972         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15973         struct drm_modeset_acquire_ctx ctx;
15974         int ret;
15975
15976         dev_priv->modeset_restore_state = NULL;
15977         if (state)
15978                 state->acquire_ctx = &ctx;
15979
15980         drm_modeset_acquire_init(&ctx, 0);
15981
15982         while (1) {
15983                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15984                 if (ret != -EDEADLK)
15985                         break;
15986
15987                 drm_modeset_backoff(&ctx);
15988         }
15989
15990         if (!ret)
15991                 ret = __intel_display_resume(dev, state, &ctx);
15992
15993         intel_enable_ipc(dev_priv);
15994         drm_modeset_drop_locks(&ctx);
15995         drm_modeset_acquire_fini(&ctx);
15996
15997         if (ret)
15998                 DRM_ERROR("Restoring old state failed with %i\n", ret);
15999         if (state)
16000                 drm_atomic_state_put(state);
16001 }
16002
16003 int intel_connector_register(struct drm_connector *connector)
16004 {
16005         struct intel_connector *intel_connector = to_intel_connector(connector);
16006         int ret;
16007
16008         ret = intel_backlight_device_register(intel_connector);
16009         if (ret)
16010                 goto err;
16011
16012         return 0;
16013
16014 err:
16015         return ret;
16016 }
16017
16018 void intel_connector_unregister(struct drm_connector *connector)
16019 {
16020         struct intel_connector *intel_connector = to_intel_connector(connector);
16021
16022         intel_backlight_device_unregister(intel_connector);
16023         intel_panel_destroy_backlight(connector);
16024 }
16025
16026 static void intel_hpd_poll_fini(struct drm_device *dev)
16027 {
16028         struct intel_connector *connector;
16029         struct drm_connector_list_iter conn_iter;
16030
16031         /* Kill all the work that may have been queued by hpd. */
16032         drm_connector_list_iter_begin(dev, &conn_iter);
16033         for_each_intel_connector_iter(connector, &conn_iter) {
16034                 if (connector->modeset_retry_work.func)
16035                         cancel_work_sync(&connector->modeset_retry_work);
16036                 if (connector->hdcp_shim) {
16037                         cancel_delayed_work_sync(&connector->hdcp_check_work);
16038                         cancel_work_sync(&connector->hdcp_prop_work);
16039                 }
16040         }
16041         drm_connector_list_iter_end(&conn_iter);
16042 }
16043
16044 void intel_modeset_cleanup(struct drm_device *dev)
16045 {
16046         struct drm_i915_private *dev_priv = to_i915(dev);
16047
16048         flush_workqueue(dev_priv->modeset_wq);
16049
16050         flush_work(&dev_priv->atomic_helper.free_work);
16051         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16052
16053         /*
16054          * Interrupts and polling as the first thing to avoid creating havoc.
16055          * Too much stuff here (turning of connectors, ...) would
16056          * experience fancy races otherwise.
16057          */
16058         intel_irq_uninstall(dev_priv);
16059
16060         /*
16061          * Due to the hpd irq storm handling the hotplug work can re-arm the
16062          * poll handlers. Hence disable polling after hpd handling is shut down.
16063          */
16064         intel_hpd_poll_fini(dev);
16065
16066         /* poll work can call into fbdev, hence clean that up afterwards */
16067         intel_fbdev_fini(dev_priv);
16068
16069         intel_unregister_dsm_handler();
16070
16071         intel_fbc_global_disable(dev_priv);
16072
16073         /* flush any delayed tasks or pending work */
16074         flush_scheduled_work();
16075
16076         drm_mode_config_cleanup(dev);
16077
16078         intel_cleanup_overlay(dev_priv);
16079
16080         intel_teardown_gmbus(dev_priv);
16081
16082         destroy_workqueue(dev_priv->modeset_wq);
16083 }
16084
16085 void intel_connector_attach_encoder(struct intel_connector *connector,
16086                                     struct intel_encoder *encoder)
16087 {
16088         connector->encoder = encoder;
16089         drm_connector_attach_encoder(&connector->base, &encoder->base);
16090 }
16091
16092 /*
16093  * set vga decode state - true == enable VGA decode
16094  */
16095 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16096 {
16097         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16098         u16 gmch_ctrl;
16099
16100         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16101                 DRM_ERROR("failed to read control word\n");
16102                 return -EIO;
16103         }
16104
16105         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16106                 return 0;
16107
16108         if (state)
16109                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16110         else
16111                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16112
16113         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16114                 DRM_ERROR("failed to write control word\n");
16115                 return -EIO;
16116         }
16117
16118         return 0;
16119 }
16120
16121 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16122
16123 struct intel_display_error_state {
16124
16125         u32 power_well_driver;
16126
16127         int num_transcoders;
16128
16129         struct intel_cursor_error_state {
16130                 u32 control;
16131                 u32 position;
16132                 u32 base;
16133                 u32 size;
16134         } cursor[I915_MAX_PIPES];
16135
16136         struct intel_pipe_error_state {
16137                 bool power_domain_on;
16138                 u32 source;
16139                 u32 stat;
16140         } pipe[I915_MAX_PIPES];
16141
16142         struct intel_plane_error_state {
16143                 u32 control;
16144                 u32 stride;
16145                 u32 size;
16146                 u32 pos;
16147                 u32 addr;
16148                 u32 surface;
16149                 u32 tile_offset;
16150         } plane[I915_MAX_PIPES];
16151
16152         struct intel_transcoder_error_state {
16153                 bool power_domain_on;
16154                 enum transcoder cpu_transcoder;
16155
16156                 u32 conf;
16157
16158                 u32 htotal;
16159                 u32 hblank;
16160                 u32 hsync;
16161                 u32 vtotal;
16162                 u32 vblank;
16163                 u32 vsync;
16164         } transcoder[4];
16165 };
16166
16167 struct intel_display_error_state *
16168 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16169 {
16170         struct intel_display_error_state *error;
16171         int transcoders[] = {
16172                 TRANSCODER_A,
16173                 TRANSCODER_B,
16174                 TRANSCODER_C,
16175                 TRANSCODER_EDP,
16176         };
16177         int i;
16178
16179         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16180                 return NULL;
16181
16182         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16183         if (error == NULL)
16184                 return NULL;
16185
16186         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16187                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16188
16189         for_each_pipe(dev_priv, i) {
16190                 error->pipe[i].power_domain_on =
16191                         __intel_display_power_is_enabled(dev_priv,
16192                                                          POWER_DOMAIN_PIPE(i));
16193                 if (!error->pipe[i].power_domain_on)
16194                         continue;
16195
16196                 error->cursor[i].control = I915_READ(CURCNTR(i));
16197                 error->cursor[i].position = I915_READ(CURPOS(i));
16198                 error->cursor[i].base = I915_READ(CURBASE(i));
16199
16200                 error->plane[i].control = I915_READ(DSPCNTR(i));
16201                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16202                 if (INTEL_GEN(dev_priv) <= 3) {
16203                         error->plane[i].size = I915_READ(DSPSIZE(i));
16204                         error->plane[i].pos = I915_READ(DSPPOS(i));
16205                 }
16206                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16207                         error->plane[i].addr = I915_READ(DSPADDR(i));
16208                 if (INTEL_GEN(dev_priv) >= 4) {
16209                         error->plane[i].surface = I915_READ(DSPSURF(i));
16210                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16211                 }
16212
16213                 error->pipe[i].source = I915_READ(PIPESRC(i));
16214
16215                 if (HAS_GMCH_DISPLAY(dev_priv))
16216                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16217         }
16218
16219         /* Note: this does not include DSI transcoders. */
16220         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16221         if (HAS_DDI(dev_priv))
16222                 error->num_transcoders++; /* Account for eDP. */
16223
16224         for (i = 0; i < error->num_transcoders; i++) {
16225                 enum transcoder cpu_transcoder = transcoders[i];
16226
16227                 error->transcoder[i].power_domain_on =
16228                         __intel_display_power_is_enabled(dev_priv,
16229                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16230                 if (!error->transcoder[i].power_domain_on)
16231                         continue;
16232
16233                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16234
16235                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16236                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16237                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16238                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16239                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16240                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16241                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16242         }
16243
16244         return error;
16245 }
16246
16247 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16248
16249 void
16250 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16251                                 struct intel_display_error_state *error)
16252 {
16253         struct drm_i915_private *dev_priv = m->i915;
16254         int i;
16255
16256         if (!error)
16257                 return;
16258
16259         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16260         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16261                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16262                            error->power_well_driver);
16263         for_each_pipe(dev_priv, i) {
16264                 err_printf(m, "Pipe [%d]:\n", i);
16265                 err_printf(m, "  Power: %s\n",
16266                            onoff(error->pipe[i].power_domain_on));
16267                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16268                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16269
16270                 err_printf(m, "Plane [%d]:\n", i);
16271                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16272                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16273                 if (INTEL_GEN(dev_priv) <= 3) {
16274                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16275                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16276                 }
16277                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16278                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16279                 if (INTEL_GEN(dev_priv) >= 4) {
16280                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16281                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16282                 }
16283
16284                 err_printf(m, "Cursor [%d]:\n", i);
16285                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16286                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16287                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16288         }
16289
16290         for (i = 0; i < error->num_transcoders; i++) {
16291                 err_printf(m, "CPU transcoder: %s\n",
16292                            transcoder_name(error->transcoder[i].cpu_transcoder));
16293                 err_printf(m, "  Power: %s\n",
16294                            onoff(error->transcoder[i].power_domain_on));
16295                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16296                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16297                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16298                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16299                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16300                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16301                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16302         }
16303 }
16304
16305 #endif