drm/i915: Update DRIVER_DATE to 20161024
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_dmabuf.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <linux/dma_remapping.h>
50 #include <linux/reservation.h>
51
52 static bool is_mmio_work(struct intel_flip_work *work)
53 {
54         return work->mmio_work.func;
55 }
56
57 /* Primary plane formats for gen <= 3 */
58 static const uint32_t i8xx_primary_formats[] = {
59         DRM_FORMAT_C8,
60         DRM_FORMAT_RGB565,
61         DRM_FORMAT_XRGB1555,
62         DRM_FORMAT_XRGB8888,
63 };
64
65 /* Primary plane formats for gen >= 4 */
66 static const uint32_t i965_primary_formats[] = {
67         DRM_FORMAT_C8,
68         DRM_FORMAT_RGB565,
69         DRM_FORMAT_XRGB8888,
70         DRM_FORMAT_XBGR8888,
71         DRM_FORMAT_XRGB2101010,
72         DRM_FORMAT_XBGR2101010,
73 };
74
75 static const uint32_t skl_primary_formats[] = {
76         DRM_FORMAT_C8,
77         DRM_FORMAT_RGB565,
78         DRM_FORMAT_XRGB8888,
79         DRM_FORMAT_XBGR8888,
80         DRM_FORMAT_ARGB8888,
81         DRM_FORMAT_ABGR8888,
82         DRM_FORMAT_XRGB2101010,
83         DRM_FORMAT_XBGR2101010,
84         DRM_FORMAT_YUYV,
85         DRM_FORMAT_YVYU,
86         DRM_FORMAT_UYVY,
87         DRM_FORMAT_VYUY,
88 };
89
90 /* Cursor formats */
91 static const uint32_t intel_cursor_formats[] = {
92         DRM_FORMAT_ARGB8888,
93 };
94
95 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
96                                 struct intel_crtc_state *pipe_config);
97 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
98                                    struct intel_crtc_state *pipe_config);
99
100 static int intel_framebuffer_init(struct drm_device *dev,
101                                   struct intel_framebuffer *ifb,
102                                   struct drm_mode_fb_cmd2 *mode_cmd,
103                                   struct drm_i915_gem_object *obj);
104 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
105 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
106 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
107 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
108                                          struct intel_link_m_n *m_n,
109                                          struct intel_link_m_n *m2_n2);
110 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
111 static void haswell_set_pipeconf(struct drm_crtc *crtc);
112 static void haswell_set_pipemisc(struct drm_crtc *crtc);
113 static void vlv_prepare_pll(struct intel_crtc *crtc,
114                             const struct intel_crtc_state *pipe_config);
115 static void chv_prepare_pll(struct intel_crtc *crtc,
116                             const struct intel_crtc_state *pipe_config);
117 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
118 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
120         struct intel_crtc_state *crtc_state);
121 static void skylake_pfit_enable(struct intel_crtc *crtc);
122 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
123 static void ironlake_pfit_enable(struct intel_crtc *crtc);
124 static void intel_modeset_setup_hw_state(struct drm_device *dev);
125 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
126 static int ilk_max_pixel_rate(struct drm_atomic_state *state);
127 static int bxt_calc_cdclk(int max_pixclk);
128
129 struct intel_limit {
130         struct {
131                 int min, max;
132         } dot, vco, n, m, m1, m2, p, p1;
133
134         struct {
135                 int dot_limit;
136                 int p2_slow, p2_fast;
137         } p2;
138 };
139
140 /* returns HPLL frequency in kHz */
141 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
142 {
143         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145         /* Obtain SKU information */
146         mutex_lock(&dev_priv->sb_lock);
147         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148                 CCK_FUSE_HPLL_FREQ_MASK;
149         mutex_unlock(&dev_priv->sb_lock);
150
151         return vco_freq[hpll_freq] * 1000;
152 }
153
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155                       const char *name, u32 reg, int ref_freq)
156 {
157         u32 val;
158         int divider;
159
160         mutex_lock(&dev_priv->sb_lock);
161         val = vlv_cck_read(dev_priv, reg);
162         mutex_unlock(&dev_priv->sb_lock);
163
164         divider = val & CCK_FREQUENCY_VALUES;
165
166         WARN((val & CCK_FREQUENCY_STATUS) !=
167              (divider << CCK_FREQUENCY_STATUS_SHIFT),
168              "%s change in progress\n", name);
169
170         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
171 }
172
173 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174                                   const char *name, u32 reg)
175 {
176         if (dev_priv->hpll_freq == 0)
177                 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
178
179         return vlv_get_cck_clock(dev_priv, name, reg,
180                                  dev_priv->hpll_freq);
181 }
182
183 static int
184 intel_pch_rawclk(struct drm_i915_private *dev_priv)
185 {
186         return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
187 }
188
189 static int
190 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
191 {
192         /* RAWCLK_FREQ_VLV register updated from power well code */
193         return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
194                                       CCK_DISPLAY_REF_CLOCK_CONTROL);
195 }
196
197 static int
198 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
199 {
200         uint32_t clkcfg;
201
202         /* hrawclock is 1/4 the FSB frequency */
203         clkcfg = I915_READ(CLKCFG);
204         switch (clkcfg & CLKCFG_FSB_MASK) {
205         case CLKCFG_FSB_400:
206                 return 100000;
207         case CLKCFG_FSB_533:
208                 return 133333;
209         case CLKCFG_FSB_667:
210                 return 166667;
211         case CLKCFG_FSB_800:
212                 return 200000;
213         case CLKCFG_FSB_1067:
214                 return 266667;
215         case CLKCFG_FSB_1333:
216                 return 333333;
217         /* these two are just a guess; one of them might be right */
218         case CLKCFG_FSB_1600:
219         case CLKCFG_FSB_1600_ALT:
220                 return 400000;
221         default:
222                 return 133333;
223         }
224 }
225
226 void intel_update_rawclk(struct drm_i915_private *dev_priv)
227 {
228         if (HAS_PCH_SPLIT(dev_priv))
229                 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
230         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
231                 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
232         else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
233                 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
234         else
235                 return; /* no rawclk on other platforms, or no need to know it */
236
237         DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
238 }
239
240 static void intel_update_czclk(struct drm_i915_private *dev_priv)
241 {
242         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
243                 return;
244
245         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
246                                                       CCK_CZ_CLOCK_CONTROL);
247
248         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
249 }
250
251 static inline u32 /* units of 100MHz */
252 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
253                     const struct intel_crtc_state *pipe_config)
254 {
255         if (HAS_DDI(dev_priv))
256                 return pipe_config->port_clock; /* SPLL */
257         else if (IS_GEN5(dev_priv))
258                 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
259         else
260                 return 270000;
261 }
262
263 static const struct intel_limit intel_limits_i8xx_dac = {
264         .dot = { .min = 25000, .max = 350000 },
265         .vco = { .min = 908000, .max = 1512000 },
266         .n = { .min = 2, .max = 16 },
267         .m = { .min = 96, .max = 140 },
268         .m1 = { .min = 18, .max = 26 },
269         .m2 = { .min = 6, .max = 16 },
270         .p = { .min = 4, .max = 128 },
271         .p1 = { .min = 2, .max = 33 },
272         .p2 = { .dot_limit = 165000,
273                 .p2_slow = 4, .p2_fast = 2 },
274 };
275
276 static const struct intel_limit intel_limits_i8xx_dvo = {
277         .dot = { .min = 25000, .max = 350000 },
278         .vco = { .min = 908000, .max = 1512000 },
279         .n = { .min = 2, .max = 16 },
280         .m = { .min = 96, .max = 140 },
281         .m1 = { .min = 18, .max = 26 },
282         .m2 = { .min = 6, .max = 16 },
283         .p = { .min = 4, .max = 128 },
284         .p1 = { .min = 2, .max = 33 },
285         .p2 = { .dot_limit = 165000,
286                 .p2_slow = 4, .p2_fast = 4 },
287 };
288
289 static const struct intel_limit intel_limits_i8xx_lvds = {
290         .dot = { .min = 25000, .max = 350000 },
291         .vco = { .min = 908000, .max = 1512000 },
292         .n = { .min = 2, .max = 16 },
293         .m = { .min = 96, .max = 140 },
294         .m1 = { .min = 18, .max = 26 },
295         .m2 = { .min = 6, .max = 16 },
296         .p = { .min = 4, .max = 128 },
297         .p1 = { .min = 1, .max = 6 },
298         .p2 = { .dot_limit = 165000,
299                 .p2_slow = 14, .p2_fast = 7 },
300 };
301
302 static const struct intel_limit intel_limits_i9xx_sdvo = {
303         .dot = { .min = 20000, .max = 400000 },
304         .vco = { .min = 1400000, .max = 2800000 },
305         .n = { .min = 1, .max = 6 },
306         .m = { .min = 70, .max = 120 },
307         .m1 = { .min = 8, .max = 18 },
308         .m2 = { .min = 3, .max = 7 },
309         .p = { .min = 5, .max = 80 },
310         .p1 = { .min = 1, .max = 8 },
311         .p2 = { .dot_limit = 200000,
312                 .p2_slow = 10, .p2_fast = 5 },
313 };
314
315 static const struct intel_limit intel_limits_i9xx_lvds = {
316         .dot = { .min = 20000, .max = 400000 },
317         .vco = { .min = 1400000, .max = 2800000 },
318         .n = { .min = 1, .max = 6 },
319         .m = { .min = 70, .max = 120 },
320         .m1 = { .min = 8, .max = 18 },
321         .m2 = { .min = 3, .max = 7 },
322         .p = { .min = 7, .max = 98 },
323         .p1 = { .min = 1, .max = 8 },
324         .p2 = { .dot_limit = 112000,
325                 .p2_slow = 14, .p2_fast = 7 },
326 };
327
328
329 static const struct intel_limit intel_limits_g4x_sdvo = {
330         .dot = { .min = 25000, .max = 270000 },
331         .vco = { .min = 1750000, .max = 3500000},
332         .n = { .min = 1, .max = 4 },
333         .m = { .min = 104, .max = 138 },
334         .m1 = { .min = 17, .max = 23 },
335         .m2 = { .min = 5, .max = 11 },
336         .p = { .min = 10, .max = 30 },
337         .p1 = { .min = 1, .max = 3},
338         .p2 = { .dot_limit = 270000,
339                 .p2_slow = 10,
340                 .p2_fast = 10
341         },
342 };
343
344 static const struct intel_limit intel_limits_g4x_hdmi = {
345         .dot = { .min = 22000, .max = 400000 },
346         .vco = { .min = 1750000, .max = 3500000},
347         .n = { .min = 1, .max = 4 },
348         .m = { .min = 104, .max = 138 },
349         .m1 = { .min = 16, .max = 23 },
350         .m2 = { .min = 5, .max = 11 },
351         .p = { .min = 5, .max = 80 },
352         .p1 = { .min = 1, .max = 8},
353         .p2 = { .dot_limit = 165000,
354                 .p2_slow = 10, .p2_fast = 5 },
355 };
356
357 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
358         .dot = { .min = 20000, .max = 115000 },
359         .vco = { .min = 1750000, .max = 3500000 },
360         .n = { .min = 1, .max = 3 },
361         .m = { .min = 104, .max = 138 },
362         .m1 = { .min = 17, .max = 23 },
363         .m2 = { .min = 5, .max = 11 },
364         .p = { .min = 28, .max = 112 },
365         .p1 = { .min = 2, .max = 8 },
366         .p2 = { .dot_limit = 0,
367                 .p2_slow = 14, .p2_fast = 14
368         },
369 };
370
371 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
372         .dot = { .min = 80000, .max = 224000 },
373         .vco = { .min = 1750000, .max = 3500000 },
374         .n = { .min = 1, .max = 3 },
375         .m = { .min = 104, .max = 138 },
376         .m1 = { .min = 17, .max = 23 },
377         .m2 = { .min = 5, .max = 11 },
378         .p = { .min = 14, .max = 42 },
379         .p1 = { .min = 2, .max = 6 },
380         .p2 = { .dot_limit = 0,
381                 .p2_slow = 7, .p2_fast = 7
382         },
383 };
384
385 static const struct intel_limit intel_limits_pineview_sdvo = {
386         .dot = { .min = 20000, .max = 400000},
387         .vco = { .min = 1700000, .max = 3500000 },
388         /* Pineview's Ncounter is a ring counter */
389         .n = { .min = 3, .max = 6 },
390         .m = { .min = 2, .max = 256 },
391         /* Pineview only has one combined m divider, which we treat as m2. */
392         .m1 = { .min = 0, .max = 0 },
393         .m2 = { .min = 0, .max = 254 },
394         .p = { .min = 5, .max = 80 },
395         .p1 = { .min = 1, .max = 8 },
396         .p2 = { .dot_limit = 200000,
397                 .p2_slow = 10, .p2_fast = 5 },
398 };
399
400 static const struct intel_limit intel_limits_pineview_lvds = {
401         .dot = { .min = 20000, .max = 400000 },
402         .vco = { .min = 1700000, .max = 3500000 },
403         .n = { .min = 3, .max = 6 },
404         .m = { .min = 2, .max = 256 },
405         .m1 = { .min = 0, .max = 0 },
406         .m2 = { .min = 0, .max = 254 },
407         .p = { .min = 7, .max = 112 },
408         .p1 = { .min = 1, .max = 8 },
409         .p2 = { .dot_limit = 112000,
410                 .p2_slow = 14, .p2_fast = 14 },
411 };
412
413 /* Ironlake / Sandybridge
414  *
415  * We calculate clock using (register_value + 2) for N/M1/M2, so here
416  * the range value for them is (actual_value - 2).
417  */
418 static const struct intel_limit intel_limits_ironlake_dac = {
419         .dot = { .min = 25000, .max = 350000 },
420         .vco = { .min = 1760000, .max = 3510000 },
421         .n = { .min = 1, .max = 5 },
422         .m = { .min = 79, .max = 127 },
423         .m1 = { .min = 12, .max = 22 },
424         .m2 = { .min = 5, .max = 9 },
425         .p = { .min = 5, .max = 80 },
426         .p1 = { .min = 1, .max = 8 },
427         .p2 = { .dot_limit = 225000,
428                 .p2_slow = 10, .p2_fast = 5 },
429 };
430
431 static const struct intel_limit intel_limits_ironlake_single_lvds = {
432         .dot = { .min = 25000, .max = 350000 },
433         .vco = { .min = 1760000, .max = 3510000 },
434         .n = { .min = 1, .max = 3 },
435         .m = { .min = 79, .max = 118 },
436         .m1 = { .min = 12, .max = 22 },
437         .m2 = { .min = 5, .max = 9 },
438         .p = { .min = 28, .max = 112 },
439         .p1 = { .min = 2, .max = 8 },
440         .p2 = { .dot_limit = 225000,
441                 .p2_slow = 14, .p2_fast = 14 },
442 };
443
444 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
445         .dot = { .min = 25000, .max = 350000 },
446         .vco = { .min = 1760000, .max = 3510000 },
447         .n = { .min = 1, .max = 3 },
448         .m = { .min = 79, .max = 127 },
449         .m1 = { .min = 12, .max = 22 },
450         .m2 = { .min = 5, .max = 9 },
451         .p = { .min = 14, .max = 56 },
452         .p1 = { .min = 2, .max = 8 },
453         .p2 = { .dot_limit = 225000,
454                 .p2_slow = 7, .p2_fast = 7 },
455 };
456
457 /* LVDS 100mhz refclk limits. */
458 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
459         .dot = { .min = 25000, .max = 350000 },
460         .vco = { .min = 1760000, .max = 3510000 },
461         .n = { .min = 1, .max = 2 },
462         .m = { .min = 79, .max = 126 },
463         .m1 = { .min = 12, .max = 22 },
464         .m2 = { .min = 5, .max = 9 },
465         .p = { .min = 28, .max = 112 },
466         .p1 = { .min = 2, .max = 8 },
467         .p2 = { .dot_limit = 225000,
468                 .p2_slow = 14, .p2_fast = 14 },
469 };
470
471 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
472         .dot = { .min = 25000, .max = 350000 },
473         .vco = { .min = 1760000, .max = 3510000 },
474         .n = { .min = 1, .max = 3 },
475         .m = { .min = 79, .max = 126 },
476         .m1 = { .min = 12, .max = 22 },
477         .m2 = { .min = 5, .max = 9 },
478         .p = { .min = 14, .max = 42 },
479         .p1 = { .min = 2, .max = 6 },
480         .p2 = { .dot_limit = 225000,
481                 .p2_slow = 7, .p2_fast = 7 },
482 };
483
484 static const struct intel_limit intel_limits_vlv = {
485          /*
486           * These are the data rate limits (measured in fast clocks)
487           * since those are the strictest limits we have. The fast
488           * clock and actual rate limits are more relaxed, so checking
489           * them would make no difference.
490           */
491         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
492         .vco = { .min = 4000000, .max = 6000000 },
493         .n = { .min = 1, .max = 7 },
494         .m1 = { .min = 2, .max = 3 },
495         .m2 = { .min = 11, .max = 156 },
496         .p1 = { .min = 2, .max = 3 },
497         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
498 };
499
500 static const struct intel_limit intel_limits_chv = {
501         /*
502          * These are the data rate limits (measured in fast clocks)
503          * since those are the strictest limits we have.  The fast
504          * clock and actual rate limits are more relaxed, so checking
505          * them would make no difference.
506          */
507         .dot = { .min = 25000 * 5, .max = 540000 * 5},
508         .vco = { .min = 4800000, .max = 6480000 },
509         .n = { .min = 1, .max = 1 },
510         .m1 = { .min = 2, .max = 2 },
511         .m2 = { .min = 24 << 22, .max = 175 << 22 },
512         .p1 = { .min = 2, .max = 4 },
513         .p2 = { .p2_slow = 1, .p2_fast = 14 },
514 };
515
516 static const struct intel_limit intel_limits_bxt = {
517         /* FIXME: find real dot limits */
518         .dot = { .min = 0, .max = INT_MAX },
519         .vco = { .min = 4800000, .max = 6700000 },
520         .n = { .min = 1, .max = 1 },
521         .m1 = { .min = 2, .max = 2 },
522         /* FIXME: find real m2 limits */
523         .m2 = { .min = 2 << 22, .max = 255 << 22 },
524         .p1 = { .min = 2, .max = 4 },
525         .p2 = { .p2_slow = 1, .p2_fast = 20 },
526 };
527
528 static bool
529 needs_modeset(struct drm_crtc_state *state)
530 {
531         return drm_atomic_crtc_needs_modeset(state);
532 }
533
534 /*
535  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
536  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
537  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
538  * The helpers' return value is the rate of the clock that is fed to the
539  * display engine's pipe which can be the above fast dot clock rate or a
540  * divided-down version of it.
541  */
542 /* m1 is reserved as 0 in Pineview, n is a ring counter */
543 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
544 {
545         clock->m = clock->m2 + 2;
546         clock->p = clock->p1 * clock->p2;
547         if (WARN_ON(clock->n == 0 || clock->p == 0))
548                 return 0;
549         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
550         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
551
552         return clock->dot;
553 }
554
555 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
556 {
557         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
558 }
559
560 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
561 {
562         clock->m = i9xx_dpll_compute_m(clock);
563         clock->p = clock->p1 * clock->p2;
564         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
565                 return 0;
566         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
567         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
568
569         return clock->dot;
570 }
571
572 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
573 {
574         clock->m = clock->m1 * clock->m2;
575         clock->p = clock->p1 * clock->p2;
576         if (WARN_ON(clock->n == 0 || clock->p == 0))
577                 return 0;
578         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
579         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
580
581         return clock->dot / 5;
582 }
583
584 int chv_calc_dpll_params(int refclk, struct dpll *clock)
585 {
586         clock->m = clock->m1 * clock->m2;
587         clock->p = clock->p1 * clock->p2;
588         if (WARN_ON(clock->n == 0 || clock->p == 0))
589                 return 0;
590         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
591                         clock->n << 22);
592         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
593
594         return clock->dot / 5;
595 }
596
597 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
598 /**
599  * Returns whether the given set of divisors are valid for a given refclk with
600  * the given connectors.
601  */
602
603 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
604                                const struct intel_limit *limit,
605                                const struct dpll *clock)
606 {
607         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
608                 INTELPllInvalid("n out of range\n");
609         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
610                 INTELPllInvalid("p1 out of range\n");
611         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
612                 INTELPllInvalid("m2 out of range\n");
613         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
614                 INTELPllInvalid("m1 out of range\n");
615
616         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
617             !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
618                 if (clock->m1 <= clock->m2)
619                         INTELPllInvalid("m1 <= m2\n");
620
621         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
622             !IS_BROXTON(dev_priv)) {
623                 if (clock->p < limit->p.min || limit->p.max < clock->p)
624                         INTELPllInvalid("p out of range\n");
625                 if (clock->m < limit->m.min || limit->m.max < clock->m)
626                         INTELPllInvalid("m out of range\n");
627         }
628
629         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
630                 INTELPllInvalid("vco out of range\n");
631         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
632          * connector, etc., rather than just a single range.
633          */
634         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
635                 INTELPllInvalid("dot out of range\n");
636
637         return true;
638 }
639
640 static int
641 i9xx_select_p2_div(const struct intel_limit *limit,
642                    const struct intel_crtc_state *crtc_state,
643                    int target)
644 {
645         struct drm_device *dev = crtc_state->base.crtc->dev;
646
647         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
648                 /*
649                  * For LVDS just rely on its current settings for dual-channel.
650                  * We haven't figured out how to reliably set up different
651                  * single/dual channel state, if we even can.
652                  */
653                 if (intel_is_dual_link_lvds(dev))
654                         return limit->p2.p2_fast;
655                 else
656                         return limit->p2.p2_slow;
657         } else {
658                 if (target < limit->p2.dot_limit)
659                         return limit->p2.p2_slow;
660                 else
661                         return limit->p2.p2_fast;
662         }
663 }
664
665 /*
666  * Returns a set of divisors for the desired target clock with the given
667  * refclk, or FALSE.  The returned values represent the clock equation:
668  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
669  *
670  * Target and reference clocks are specified in kHz.
671  *
672  * If match_clock is provided, then best_clock P divider must match the P
673  * divider from @match_clock used for LVDS downclocking.
674  */
675 static bool
676 i9xx_find_best_dpll(const struct intel_limit *limit,
677                     struct intel_crtc_state *crtc_state,
678                     int target, int refclk, struct dpll *match_clock,
679                     struct dpll *best_clock)
680 {
681         struct drm_device *dev = crtc_state->base.crtc->dev;
682         struct dpll clock;
683         int err = target;
684
685         memset(best_clock, 0, sizeof(*best_clock));
686
687         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
688
689         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
690              clock.m1++) {
691                 for (clock.m2 = limit->m2.min;
692                      clock.m2 <= limit->m2.max; clock.m2++) {
693                         if (clock.m2 >= clock.m1)
694                                 break;
695                         for (clock.n = limit->n.min;
696                              clock.n <= limit->n.max; clock.n++) {
697                                 for (clock.p1 = limit->p1.min;
698                                         clock.p1 <= limit->p1.max; clock.p1++) {
699                                         int this_err;
700
701                                         i9xx_calc_dpll_params(refclk, &clock);
702                                         if (!intel_PLL_is_valid(to_i915(dev),
703                                                                 limit,
704                                                                 &clock))
705                                                 continue;
706                                         if (match_clock &&
707                                             clock.p != match_clock->p)
708                                                 continue;
709
710                                         this_err = abs(clock.dot - target);
711                                         if (this_err < err) {
712                                                 *best_clock = clock;
713                                                 err = this_err;
714                                         }
715                                 }
716                         }
717                 }
718         }
719
720         return (err != target);
721 }
722
723 /*
724  * Returns a set of divisors for the desired target clock with the given
725  * refclk, or FALSE.  The returned values represent the clock equation:
726  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
727  *
728  * Target and reference clocks are specified in kHz.
729  *
730  * If match_clock is provided, then best_clock P divider must match the P
731  * divider from @match_clock used for LVDS downclocking.
732  */
733 static bool
734 pnv_find_best_dpll(const struct intel_limit *limit,
735                    struct intel_crtc_state *crtc_state,
736                    int target, int refclk, struct dpll *match_clock,
737                    struct dpll *best_clock)
738 {
739         struct drm_device *dev = crtc_state->base.crtc->dev;
740         struct dpll clock;
741         int err = target;
742
743         memset(best_clock, 0, sizeof(*best_clock));
744
745         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
746
747         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
748              clock.m1++) {
749                 for (clock.m2 = limit->m2.min;
750                      clock.m2 <= limit->m2.max; clock.m2++) {
751                         for (clock.n = limit->n.min;
752                              clock.n <= limit->n.max; clock.n++) {
753                                 for (clock.p1 = limit->p1.min;
754                                         clock.p1 <= limit->p1.max; clock.p1++) {
755                                         int this_err;
756
757                                         pnv_calc_dpll_params(refclk, &clock);
758                                         if (!intel_PLL_is_valid(to_i915(dev),
759                                                                 limit,
760                                                                 &clock))
761                                                 continue;
762                                         if (match_clock &&
763                                             clock.p != match_clock->p)
764                                                 continue;
765
766                                         this_err = abs(clock.dot - target);
767                                         if (this_err < err) {
768                                                 *best_clock = clock;
769                                                 err = this_err;
770                                         }
771                                 }
772                         }
773                 }
774         }
775
776         return (err != target);
777 }
778
779 /*
780  * Returns a set of divisors for the desired target clock with the given
781  * refclk, or FALSE.  The returned values represent the clock equation:
782  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
783  *
784  * Target and reference clocks are specified in kHz.
785  *
786  * If match_clock is provided, then best_clock P divider must match the P
787  * divider from @match_clock used for LVDS downclocking.
788  */
789 static bool
790 g4x_find_best_dpll(const struct intel_limit *limit,
791                    struct intel_crtc_state *crtc_state,
792                    int target, int refclk, struct dpll *match_clock,
793                    struct dpll *best_clock)
794 {
795         struct drm_device *dev = crtc_state->base.crtc->dev;
796         struct dpll clock;
797         int max_n;
798         bool found = false;
799         /* approximately equals target * 0.00585 */
800         int err_most = (target >> 8) + (target >> 9);
801
802         memset(best_clock, 0, sizeof(*best_clock));
803
804         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
805
806         max_n = limit->n.max;
807         /* based on hardware requirement, prefer smaller n to precision */
808         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
809                 /* based on hardware requirement, prefere larger m1,m2 */
810                 for (clock.m1 = limit->m1.max;
811                      clock.m1 >= limit->m1.min; clock.m1--) {
812                         for (clock.m2 = limit->m2.max;
813                              clock.m2 >= limit->m2.min; clock.m2--) {
814                                 for (clock.p1 = limit->p1.max;
815                                      clock.p1 >= limit->p1.min; clock.p1--) {
816                                         int this_err;
817
818                                         i9xx_calc_dpll_params(refclk, &clock);
819                                         if (!intel_PLL_is_valid(to_i915(dev),
820                                                                 limit,
821                                                                 &clock))
822                                                 continue;
823
824                                         this_err = abs(clock.dot - target);
825                                         if (this_err < err_most) {
826                                                 *best_clock = clock;
827                                                 err_most = this_err;
828                                                 max_n = clock.n;
829                                                 found = true;
830                                         }
831                                 }
832                         }
833                 }
834         }
835         return found;
836 }
837
838 /*
839  * Check if the calculated PLL configuration is more optimal compared to the
840  * best configuration and error found so far. Return the calculated error.
841  */
842 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
843                                const struct dpll *calculated_clock,
844                                const struct dpll *best_clock,
845                                unsigned int best_error_ppm,
846                                unsigned int *error_ppm)
847 {
848         /*
849          * For CHV ignore the error and consider only the P value.
850          * Prefer a bigger P value based on HW requirements.
851          */
852         if (IS_CHERRYVIEW(to_i915(dev))) {
853                 *error_ppm = 0;
854
855                 return calculated_clock->p > best_clock->p;
856         }
857
858         if (WARN_ON_ONCE(!target_freq))
859                 return false;
860
861         *error_ppm = div_u64(1000000ULL *
862                                 abs(target_freq - calculated_clock->dot),
863                              target_freq);
864         /*
865          * Prefer a better P value over a better (smaller) error if the error
866          * is small. Ensure this preference for future configurations too by
867          * setting the error to 0.
868          */
869         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
870                 *error_ppm = 0;
871
872                 return true;
873         }
874
875         return *error_ppm + 10 < best_error_ppm;
876 }
877
878 /*
879  * Returns a set of divisors for the desired target clock with the given
880  * refclk, or FALSE.  The returned values represent the clock equation:
881  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
882  */
883 static bool
884 vlv_find_best_dpll(const struct intel_limit *limit,
885                    struct intel_crtc_state *crtc_state,
886                    int target, int refclk, struct dpll *match_clock,
887                    struct dpll *best_clock)
888 {
889         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
890         struct drm_device *dev = crtc->base.dev;
891         struct dpll clock;
892         unsigned int bestppm = 1000000;
893         /* min update 19.2 MHz */
894         int max_n = min(limit->n.max, refclk / 19200);
895         bool found = false;
896
897         target *= 5; /* fast clock */
898
899         memset(best_clock, 0, sizeof(*best_clock));
900
901         /* based on hardware requirement, prefer smaller n to precision */
902         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
903                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
904                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
905                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
906                                 clock.p = clock.p1 * clock.p2;
907                                 /* based on hardware requirement, prefer bigger m1,m2 values */
908                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
909                                         unsigned int ppm;
910
911                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
912                                                                      refclk * clock.m1);
913
914                                         vlv_calc_dpll_params(refclk, &clock);
915
916                                         if (!intel_PLL_is_valid(to_i915(dev),
917                                                                 limit,
918                                                                 &clock))
919                                                 continue;
920
921                                         if (!vlv_PLL_is_optimal(dev, target,
922                                                                 &clock,
923                                                                 best_clock,
924                                                                 bestppm, &ppm))
925                                                 continue;
926
927                                         *best_clock = clock;
928                                         bestppm = ppm;
929                                         found = true;
930                                 }
931                         }
932                 }
933         }
934
935         return found;
936 }
937
938 /*
939  * Returns a set of divisors for the desired target clock with the given
940  * refclk, or FALSE.  The returned values represent the clock equation:
941  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
942  */
943 static bool
944 chv_find_best_dpll(const struct intel_limit *limit,
945                    struct intel_crtc_state *crtc_state,
946                    int target, int refclk, struct dpll *match_clock,
947                    struct dpll *best_clock)
948 {
949         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
950         struct drm_device *dev = crtc->base.dev;
951         unsigned int best_error_ppm;
952         struct dpll clock;
953         uint64_t m2;
954         int found = false;
955
956         memset(best_clock, 0, sizeof(*best_clock));
957         best_error_ppm = 1000000;
958
959         /*
960          * Based on hardware doc, the n always set to 1, and m1 always
961          * set to 2.  If requires to support 200Mhz refclk, we need to
962          * revisit this because n may not 1 anymore.
963          */
964         clock.n = 1, clock.m1 = 2;
965         target *= 5;    /* fast clock */
966
967         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
968                 for (clock.p2 = limit->p2.p2_fast;
969                                 clock.p2 >= limit->p2.p2_slow;
970                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
971                         unsigned int error_ppm;
972
973                         clock.p = clock.p1 * clock.p2;
974
975                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
976                                         clock.n) << 22, refclk * clock.m1);
977
978                         if (m2 > INT_MAX/clock.m1)
979                                 continue;
980
981                         clock.m2 = m2;
982
983                         chv_calc_dpll_params(refclk, &clock);
984
985                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
986                                 continue;
987
988                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
989                                                 best_error_ppm, &error_ppm))
990                                 continue;
991
992                         *best_clock = clock;
993                         best_error_ppm = error_ppm;
994                         found = true;
995                 }
996         }
997
998         return found;
999 }
1000
1001 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1002                         struct dpll *best_clock)
1003 {
1004         int refclk = 100000;
1005         const struct intel_limit *limit = &intel_limits_bxt;
1006
1007         return chv_find_best_dpll(limit, crtc_state,
1008                                   target_clock, refclk, NULL, best_clock);
1009 }
1010
1011 bool intel_crtc_active(struct drm_crtc *crtc)
1012 {
1013         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1014
1015         /* Be paranoid as we can arrive here with only partial
1016          * state retrieved from the hardware during setup.
1017          *
1018          * We can ditch the adjusted_mode.crtc_clock check as soon
1019          * as Haswell has gained clock readout/fastboot support.
1020          *
1021          * We can ditch the crtc->primary->fb check as soon as we can
1022          * properly reconstruct framebuffers.
1023          *
1024          * FIXME: The intel_crtc->active here should be switched to
1025          * crtc->state->active once we have proper CRTC states wired up
1026          * for atomic.
1027          */
1028         return intel_crtc->active && crtc->primary->state->fb &&
1029                 intel_crtc->config->base.adjusted_mode.crtc_clock;
1030 }
1031
1032 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1033                                              enum pipe pipe)
1034 {
1035         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1036         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1037
1038         return intel_crtc->config->cpu_transcoder;
1039 }
1040
1041 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1042 {
1043         struct drm_i915_private *dev_priv = to_i915(dev);
1044         i915_reg_t reg = PIPEDSL(pipe);
1045         u32 line1, line2;
1046         u32 line_mask;
1047
1048         if (IS_GEN2(dev_priv))
1049                 line_mask = DSL_LINEMASK_GEN2;
1050         else
1051                 line_mask = DSL_LINEMASK_GEN3;
1052
1053         line1 = I915_READ(reg) & line_mask;
1054         msleep(5);
1055         line2 = I915_READ(reg) & line_mask;
1056
1057         return line1 == line2;
1058 }
1059
1060 /*
1061  * intel_wait_for_pipe_off - wait for pipe to turn off
1062  * @crtc: crtc whose pipe to wait for
1063  *
1064  * After disabling a pipe, we can't wait for vblank in the usual way,
1065  * spinning on the vblank interrupt status bit, since we won't actually
1066  * see an interrupt when the pipe is disabled.
1067  *
1068  * On Gen4 and above:
1069  *   wait for the pipe register state bit to turn off
1070  *
1071  * Otherwise:
1072  *   wait for the display line value to settle (it usually
1073  *   ends up stopping at the start of the next frame).
1074  *
1075  */
1076 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1077 {
1078         struct drm_device *dev = crtc->base.dev;
1079         struct drm_i915_private *dev_priv = to_i915(dev);
1080         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1081         enum pipe pipe = crtc->pipe;
1082
1083         if (INTEL_INFO(dev)->gen >= 4) {
1084                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1085
1086                 /* Wait for the Pipe State to go off */
1087                 if (intel_wait_for_register(dev_priv,
1088                                             reg, I965_PIPECONF_ACTIVE, 0,
1089                                             100))
1090                         WARN(1, "pipe_off wait timed out\n");
1091         } else {
1092                 /* Wait for the display line to settle */
1093                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1094                         WARN(1, "pipe_off wait timed out\n");
1095         }
1096 }
1097
1098 /* Only for pre-ILK configs */
1099 void assert_pll(struct drm_i915_private *dev_priv,
1100                 enum pipe pipe, bool state)
1101 {
1102         u32 val;
1103         bool cur_state;
1104
1105         val = I915_READ(DPLL(pipe));
1106         cur_state = !!(val & DPLL_VCO_ENABLE);
1107         I915_STATE_WARN(cur_state != state,
1108              "PLL state assertion failure (expected %s, current %s)\n",
1109                         onoff(state), onoff(cur_state));
1110 }
1111
1112 /* XXX: the dsi pll is shared between MIPI DSI ports */
1113 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1114 {
1115         u32 val;
1116         bool cur_state;
1117
1118         mutex_lock(&dev_priv->sb_lock);
1119         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1120         mutex_unlock(&dev_priv->sb_lock);
1121
1122         cur_state = val & DSI_PLL_VCO_EN;
1123         I915_STATE_WARN(cur_state != state,
1124              "DSI PLL state assertion failure (expected %s, current %s)\n",
1125                         onoff(state), onoff(cur_state));
1126 }
1127
1128 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1129                           enum pipe pipe, bool state)
1130 {
1131         bool cur_state;
1132         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1133                                                                       pipe);
1134
1135         if (HAS_DDI(dev_priv)) {
1136                 /* DDI does not have a specific FDI_TX register */
1137                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1138                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1139         } else {
1140                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1141                 cur_state = !!(val & FDI_TX_ENABLE);
1142         }
1143         I915_STATE_WARN(cur_state != state,
1144              "FDI TX state assertion failure (expected %s, current %s)\n",
1145                         onoff(state), onoff(cur_state));
1146 }
1147 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1148 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1149
1150 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1151                           enum pipe pipe, bool state)
1152 {
1153         u32 val;
1154         bool cur_state;
1155
1156         val = I915_READ(FDI_RX_CTL(pipe));
1157         cur_state = !!(val & FDI_RX_ENABLE);
1158         I915_STATE_WARN(cur_state != state,
1159              "FDI RX state assertion failure (expected %s, current %s)\n",
1160                         onoff(state), onoff(cur_state));
1161 }
1162 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1163 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1164
1165 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1166                                       enum pipe pipe)
1167 {
1168         u32 val;
1169
1170         /* ILK FDI PLL is always enabled */
1171         if (IS_GEN5(dev_priv))
1172                 return;
1173
1174         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1175         if (HAS_DDI(dev_priv))
1176                 return;
1177
1178         val = I915_READ(FDI_TX_CTL(pipe));
1179         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1180 }
1181
1182 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1183                        enum pipe pipe, bool state)
1184 {
1185         u32 val;
1186         bool cur_state;
1187
1188         val = I915_READ(FDI_RX_CTL(pipe));
1189         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1190         I915_STATE_WARN(cur_state != state,
1191              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1192                         onoff(state), onoff(cur_state));
1193 }
1194
1195 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1196 {
1197         i915_reg_t pp_reg;
1198         u32 val;
1199         enum pipe panel_pipe = PIPE_A;
1200         bool locked = true;
1201
1202         if (WARN_ON(HAS_DDI(dev_priv)))
1203                 return;
1204
1205         if (HAS_PCH_SPLIT(dev_priv)) {
1206                 u32 port_sel;
1207
1208                 pp_reg = PP_CONTROL(0);
1209                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1210
1211                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1212                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1213                         panel_pipe = PIPE_B;
1214                 /* XXX: else fix for eDP */
1215         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1216                 /* presumably write lock depends on pipe, not port select */
1217                 pp_reg = PP_CONTROL(pipe);
1218                 panel_pipe = pipe;
1219         } else {
1220                 pp_reg = PP_CONTROL(0);
1221                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1222                         panel_pipe = PIPE_B;
1223         }
1224
1225         val = I915_READ(pp_reg);
1226         if (!(val & PANEL_POWER_ON) ||
1227             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1228                 locked = false;
1229
1230         I915_STATE_WARN(panel_pipe == pipe && locked,
1231              "panel assertion failure, pipe %c regs locked\n",
1232              pipe_name(pipe));
1233 }
1234
1235 static void assert_cursor(struct drm_i915_private *dev_priv,
1236                           enum pipe pipe, bool state)
1237 {
1238         bool cur_state;
1239
1240         if (IS_845G(dev_priv) || IS_I865G(dev_priv))
1241                 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1242         else
1243                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1244
1245         I915_STATE_WARN(cur_state != state,
1246              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1247                         pipe_name(pipe), onoff(state), onoff(cur_state));
1248 }
1249 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1250 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1251
1252 void assert_pipe(struct drm_i915_private *dev_priv,
1253                  enum pipe pipe, bool state)
1254 {
1255         bool cur_state;
1256         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1257                                                                       pipe);
1258         enum intel_display_power_domain power_domain;
1259
1260         /* if we need the pipe quirk it must be always on */
1261         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1262             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1263                 state = true;
1264
1265         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1266         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1267                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1268                 cur_state = !!(val & PIPECONF_ENABLE);
1269
1270                 intel_display_power_put(dev_priv, power_domain);
1271         } else {
1272                 cur_state = false;
1273         }
1274
1275         I915_STATE_WARN(cur_state != state,
1276              "pipe %c assertion failure (expected %s, current %s)\n",
1277                         pipe_name(pipe), onoff(state), onoff(cur_state));
1278 }
1279
1280 static void assert_plane(struct drm_i915_private *dev_priv,
1281                          enum plane plane, bool state)
1282 {
1283         u32 val;
1284         bool cur_state;
1285
1286         val = I915_READ(DSPCNTR(plane));
1287         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1288         I915_STATE_WARN(cur_state != state,
1289              "plane %c assertion failure (expected %s, current %s)\n",
1290                         plane_name(plane), onoff(state), onoff(cur_state));
1291 }
1292
1293 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1294 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1295
1296 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1297                                    enum pipe pipe)
1298 {
1299         struct drm_device *dev = &dev_priv->drm;
1300         int i;
1301
1302         /* Primary planes are fixed to pipes on gen4+ */
1303         if (INTEL_INFO(dev)->gen >= 4) {
1304                 u32 val = I915_READ(DSPCNTR(pipe));
1305                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1306                      "plane %c assertion failure, should be disabled but not\n",
1307                      plane_name(pipe));
1308                 return;
1309         }
1310
1311         /* Need to check both planes against the pipe */
1312         for_each_pipe(dev_priv, i) {
1313                 u32 val = I915_READ(DSPCNTR(i));
1314                 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1315                         DISPPLANE_SEL_PIPE_SHIFT;
1316                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1317                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1318                      plane_name(i), pipe_name(pipe));
1319         }
1320 }
1321
1322 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1323                                     enum pipe pipe)
1324 {
1325         struct drm_device *dev = &dev_priv->drm;
1326         int sprite;
1327
1328         if (INTEL_INFO(dev)->gen >= 9) {
1329                 for_each_sprite(dev_priv, pipe, sprite) {
1330                         u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1331                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1332                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1333                              sprite, pipe_name(pipe));
1334                 }
1335         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1336                 for_each_sprite(dev_priv, pipe, sprite) {
1337                         u32 val = I915_READ(SPCNTR(pipe, sprite));
1338                         I915_STATE_WARN(val & SP_ENABLE,
1339                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1340                              sprite_name(pipe, sprite), pipe_name(pipe));
1341                 }
1342         } else if (INTEL_INFO(dev)->gen >= 7) {
1343                 u32 val = I915_READ(SPRCTL(pipe));
1344                 I915_STATE_WARN(val & SPRITE_ENABLE,
1345                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1346                      plane_name(pipe), pipe_name(pipe));
1347         } else if (INTEL_INFO(dev)->gen >= 5) {
1348                 u32 val = I915_READ(DVSCNTR(pipe));
1349                 I915_STATE_WARN(val & DVS_ENABLE,
1350                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1351                      plane_name(pipe), pipe_name(pipe));
1352         }
1353 }
1354
1355 static void assert_vblank_disabled(struct drm_crtc *crtc)
1356 {
1357         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1358                 drm_crtc_vblank_put(crtc);
1359 }
1360
1361 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1362                                     enum pipe pipe)
1363 {
1364         u32 val;
1365         bool enabled;
1366
1367         val = I915_READ(PCH_TRANSCONF(pipe));
1368         enabled = !!(val & TRANS_ENABLE);
1369         I915_STATE_WARN(enabled,
1370              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1371              pipe_name(pipe));
1372 }
1373
1374 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1375                             enum pipe pipe, u32 port_sel, u32 val)
1376 {
1377         if ((val & DP_PORT_EN) == 0)
1378                 return false;
1379
1380         if (HAS_PCH_CPT(dev_priv)) {
1381                 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1382                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1383                         return false;
1384         } else if (IS_CHERRYVIEW(dev_priv)) {
1385                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1386                         return false;
1387         } else {
1388                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1389                         return false;
1390         }
1391         return true;
1392 }
1393
1394 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1395                               enum pipe pipe, u32 val)
1396 {
1397         if ((val & SDVO_ENABLE) == 0)
1398                 return false;
1399
1400         if (HAS_PCH_CPT(dev_priv)) {
1401                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1402                         return false;
1403         } else if (IS_CHERRYVIEW(dev_priv)) {
1404                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1405                         return false;
1406         } else {
1407                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1408                         return false;
1409         }
1410         return true;
1411 }
1412
1413 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1414                               enum pipe pipe, u32 val)
1415 {
1416         if ((val & LVDS_PORT_EN) == 0)
1417                 return false;
1418
1419         if (HAS_PCH_CPT(dev_priv)) {
1420                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1421                         return false;
1422         } else {
1423                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1424                         return false;
1425         }
1426         return true;
1427 }
1428
1429 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1430                               enum pipe pipe, u32 val)
1431 {
1432         if ((val & ADPA_DAC_ENABLE) == 0)
1433                 return false;
1434         if (HAS_PCH_CPT(dev_priv)) {
1435                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1436                         return false;
1437         } else {
1438                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1439                         return false;
1440         }
1441         return true;
1442 }
1443
1444 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1445                                    enum pipe pipe, i915_reg_t reg,
1446                                    u32 port_sel)
1447 {
1448         u32 val = I915_READ(reg);
1449         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1450              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1451              i915_mmio_reg_offset(reg), pipe_name(pipe));
1452
1453         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1454              && (val & DP_PIPEB_SELECT),
1455              "IBX PCH dp port still using transcoder B\n");
1456 }
1457
1458 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1459                                      enum pipe pipe, i915_reg_t reg)
1460 {
1461         u32 val = I915_READ(reg);
1462         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1463              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1464              i915_mmio_reg_offset(reg), pipe_name(pipe));
1465
1466         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1467              && (val & SDVO_PIPE_B_SELECT),
1468              "IBX PCH hdmi port still using transcoder B\n");
1469 }
1470
1471 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1472                                       enum pipe pipe)
1473 {
1474         u32 val;
1475
1476         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1477         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1478         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1479
1480         val = I915_READ(PCH_ADPA);
1481         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1482              "PCH VGA enabled on transcoder %c, should be disabled\n",
1483              pipe_name(pipe));
1484
1485         val = I915_READ(PCH_LVDS);
1486         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1487              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1488              pipe_name(pipe));
1489
1490         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1491         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1492         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1493 }
1494
1495 static void _vlv_enable_pll(struct intel_crtc *crtc,
1496                             const struct intel_crtc_state *pipe_config)
1497 {
1498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1499         enum pipe pipe = crtc->pipe;
1500
1501         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1502         POSTING_READ(DPLL(pipe));
1503         udelay(150);
1504
1505         if (intel_wait_for_register(dev_priv,
1506                                     DPLL(pipe),
1507                                     DPLL_LOCK_VLV,
1508                                     DPLL_LOCK_VLV,
1509                                     1))
1510                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1511 }
1512
1513 static void vlv_enable_pll(struct intel_crtc *crtc,
1514                            const struct intel_crtc_state *pipe_config)
1515 {
1516         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1517         enum pipe pipe = crtc->pipe;
1518
1519         assert_pipe_disabled(dev_priv, pipe);
1520
1521         /* PLL is protected by panel, make sure we can write it */
1522         assert_panel_unlocked(dev_priv, pipe);
1523
1524         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1525                 _vlv_enable_pll(crtc, pipe_config);
1526
1527         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1528         POSTING_READ(DPLL_MD(pipe));
1529 }
1530
1531
1532 static void _chv_enable_pll(struct intel_crtc *crtc,
1533                             const struct intel_crtc_state *pipe_config)
1534 {
1535         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1536         enum pipe pipe = crtc->pipe;
1537         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1538         u32 tmp;
1539
1540         mutex_lock(&dev_priv->sb_lock);
1541
1542         /* Enable back the 10bit clock to display controller */
1543         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1544         tmp |= DPIO_DCLKP_EN;
1545         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1546
1547         mutex_unlock(&dev_priv->sb_lock);
1548
1549         /*
1550          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1551          */
1552         udelay(1);
1553
1554         /* Enable PLL */
1555         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1556
1557         /* Check PLL is locked */
1558         if (intel_wait_for_register(dev_priv,
1559                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1560                                     1))
1561                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1562 }
1563
1564 static void chv_enable_pll(struct intel_crtc *crtc,
1565                            const struct intel_crtc_state *pipe_config)
1566 {
1567         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1568         enum pipe pipe = crtc->pipe;
1569
1570         assert_pipe_disabled(dev_priv, pipe);
1571
1572         /* PLL is protected by panel, make sure we can write it */
1573         assert_panel_unlocked(dev_priv, pipe);
1574
1575         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1576                 _chv_enable_pll(crtc, pipe_config);
1577
1578         if (pipe != PIPE_A) {
1579                 /*
1580                  * WaPixelRepeatModeFixForC0:chv
1581                  *
1582                  * DPLLCMD is AWOL. Use chicken bits to propagate
1583                  * the value from DPLLBMD to either pipe B or C.
1584                  */
1585                 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1586                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1587                 I915_WRITE(CBR4_VLV, 0);
1588                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1589
1590                 /*
1591                  * DPLLB VGA mode also seems to cause problems.
1592                  * We should always have it disabled.
1593                  */
1594                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1595         } else {
1596                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1597                 POSTING_READ(DPLL_MD(pipe));
1598         }
1599 }
1600
1601 static int intel_num_dvo_pipes(struct drm_device *dev)
1602 {
1603         struct intel_crtc *crtc;
1604         int count = 0;
1605
1606         for_each_intel_crtc(dev, crtc) {
1607                 count += crtc->base.state->active &&
1608                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1609         }
1610
1611         return count;
1612 }
1613
1614 static void i9xx_enable_pll(struct intel_crtc *crtc)
1615 {
1616         struct drm_device *dev = crtc->base.dev;
1617         struct drm_i915_private *dev_priv = to_i915(dev);
1618         i915_reg_t reg = DPLL(crtc->pipe);
1619         u32 dpll = crtc->config->dpll_hw_state.dpll;
1620
1621         assert_pipe_disabled(dev_priv, crtc->pipe);
1622
1623         /* PLL is protected by panel, make sure we can write it */
1624         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1625                 assert_panel_unlocked(dev_priv, crtc->pipe);
1626
1627         /* Enable DVO 2x clock on both PLLs if necessary */
1628         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) {
1629                 /*
1630                  * It appears to be important that we don't enable this
1631                  * for the current pipe before otherwise configuring the
1632                  * PLL. No idea how this should be handled if multiple
1633                  * DVO outputs are enabled simultaneosly.
1634                  */
1635                 dpll |= DPLL_DVO_2X_MODE;
1636                 I915_WRITE(DPLL(!crtc->pipe),
1637                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1638         }
1639
1640         /*
1641          * Apparently we need to have VGA mode enabled prior to changing
1642          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1643          * dividers, even though the register value does change.
1644          */
1645         I915_WRITE(reg, 0);
1646
1647         I915_WRITE(reg, dpll);
1648
1649         /* Wait for the clocks to stabilize. */
1650         POSTING_READ(reg);
1651         udelay(150);
1652
1653         if (INTEL_INFO(dev)->gen >= 4) {
1654                 I915_WRITE(DPLL_MD(crtc->pipe),
1655                            crtc->config->dpll_hw_state.dpll_md);
1656         } else {
1657                 /* The pixel multiplier can only be updated once the
1658                  * DPLL is enabled and the clocks are stable.
1659                  *
1660                  * So write it again.
1661                  */
1662                 I915_WRITE(reg, dpll);
1663         }
1664
1665         /* We do this three times for luck */
1666         I915_WRITE(reg, dpll);
1667         POSTING_READ(reg);
1668         udelay(150); /* wait for warmup */
1669         I915_WRITE(reg, dpll);
1670         POSTING_READ(reg);
1671         udelay(150); /* wait for warmup */
1672         I915_WRITE(reg, dpll);
1673         POSTING_READ(reg);
1674         udelay(150); /* wait for warmup */
1675 }
1676
1677 /**
1678  * i9xx_disable_pll - disable a PLL
1679  * @dev_priv: i915 private structure
1680  * @pipe: pipe PLL to disable
1681  *
1682  * Disable the PLL for @pipe, making sure the pipe is off first.
1683  *
1684  * Note!  This is for pre-ILK only.
1685  */
1686 static void i9xx_disable_pll(struct intel_crtc *crtc)
1687 {
1688         struct drm_device *dev = crtc->base.dev;
1689         struct drm_i915_private *dev_priv = to_i915(dev);
1690         enum pipe pipe = crtc->pipe;
1691
1692         /* Disable DVO 2x clock on both PLLs if necessary */
1693         if (IS_I830(dev_priv) &&
1694             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1695             !intel_num_dvo_pipes(dev)) {
1696                 I915_WRITE(DPLL(PIPE_B),
1697                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1698                 I915_WRITE(DPLL(PIPE_A),
1699                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1700         }
1701
1702         /* Don't disable pipe or pipe PLLs if needed */
1703         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1704             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1705                 return;
1706
1707         /* Make sure the pipe isn't still relying on us */
1708         assert_pipe_disabled(dev_priv, pipe);
1709
1710         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1711         POSTING_READ(DPLL(pipe));
1712 }
1713
1714 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1715 {
1716         u32 val;
1717
1718         /* Make sure the pipe isn't still relying on us */
1719         assert_pipe_disabled(dev_priv, pipe);
1720
1721         val = DPLL_INTEGRATED_REF_CLK_VLV |
1722                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1723         if (pipe != PIPE_A)
1724                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1725
1726         I915_WRITE(DPLL(pipe), val);
1727         POSTING_READ(DPLL(pipe));
1728 }
1729
1730 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1731 {
1732         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1733         u32 val;
1734
1735         /* Make sure the pipe isn't still relying on us */
1736         assert_pipe_disabled(dev_priv, pipe);
1737
1738         val = DPLL_SSC_REF_CLK_CHV |
1739                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1740         if (pipe != PIPE_A)
1741                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1742
1743         I915_WRITE(DPLL(pipe), val);
1744         POSTING_READ(DPLL(pipe));
1745
1746         mutex_lock(&dev_priv->sb_lock);
1747
1748         /* Disable 10bit clock to display controller */
1749         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1750         val &= ~DPIO_DCLKP_EN;
1751         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1752
1753         mutex_unlock(&dev_priv->sb_lock);
1754 }
1755
1756 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1757                          struct intel_digital_port *dport,
1758                          unsigned int expected_mask)
1759 {
1760         u32 port_mask;
1761         i915_reg_t dpll_reg;
1762
1763         switch (dport->port) {
1764         case PORT_B:
1765                 port_mask = DPLL_PORTB_READY_MASK;
1766                 dpll_reg = DPLL(0);
1767                 break;
1768         case PORT_C:
1769                 port_mask = DPLL_PORTC_READY_MASK;
1770                 dpll_reg = DPLL(0);
1771                 expected_mask <<= 4;
1772                 break;
1773         case PORT_D:
1774                 port_mask = DPLL_PORTD_READY_MASK;
1775                 dpll_reg = DPIO_PHY_STATUS;
1776                 break;
1777         default:
1778                 BUG();
1779         }
1780
1781         if (intel_wait_for_register(dev_priv,
1782                                     dpll_reg, port_mask, expected_mask,
1783                                     1000))
1784                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1785                      port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1786 }
1787
1788 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1789                                            enum pipe pipe)
1790 {
1791         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1792         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1793         i915_reg_t reg;
1794         uint32_t val, pipeconf_val;
1795
1796         /* Make sure PCH DPLL is enabled */
1797         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1798
1799         /* FDI must be feeding us bits for PCH ports */
1800         assert_fdi_tx_enabled(dev_priv, pipe);
1801         assert_fdi_rx_enabled(dev_priv, pipe);
1802
1803         if (HAS_PCH_CPT(dev_priv)) {
1804                 /* Workaround: Set the timing override bit before enabling the
1805                  * pch transcoder. */
1806                 reg = TRANS_CHICKEN2(pipe);
1807                 val = I915_READ(reg);
1808                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1809                 I915_WRITE(reg, val);
1810         }
1811
1812         reg = PCH_TRANSCONF(pipe);
1813         val = I915_READ(reg);
1814         pipeconf_val = I915_READ(PIPECONF(pipe));
1815
1816         if (HAS_PCH_IBX(dev_priv)) {
1817                 /*
1818                  * Make the BPC in transcoder be consistent with
1819                  * that in pipeconf reg. For HDMI we must use 8bpc
1820                  * here for both 8bpc and 12bpc.
1821                  */
1822                 val &= ~PIPECONF_BPC_MASK;
1823                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1824                         val |= PIPECONF_8BPC;
1825                 else
1826                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1827         }
1828
1829         val &= ~TRANS_INTERLACE_MASK;
1830         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1831                 if (HAS_PCH_IBX(dev_priv) &&
1832                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1833                         val |= TRANS_LEGACY_INTERLACED_ILK;
1834                 else
1835                         val |= TRANS_INTERLACED;
1836         else
1837                 val |= TRANS_PROGRESSIVE;
1838
1839         I915_WRITE(reg, val | TRANS_ENABLE);
1840         if (intel_wait_for_register(dev_priv,
1841                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1842                                     100))
1843                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1844 }
1845
1846 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1847                                       enum transcoder cpu_transcoder)
1848 {
1849         u32 val, pipeconf_val;
1850
1851         /* FDI must be feeding us bits for PCH ports */
1852         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1853         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1854
1855         /* Workaround: set timing override bit. */
1856         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1857         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1858         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1859
1860         val = TRANS_ENABLE;
1861         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1862
1863         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1864             PIPECONF_INTERLACED_ILK)
1865                 val |= TRANS_INTERLACED;
1866         else
1867                 val |= TRANS_PROGRESSIVE;
1868
1869         I915_WRITE(LPT_TRANSCONF, val);
1870         if (intel_wait_for_register(dev_priv,
1871                                     LPT_TRANSCONF,
1872                                     TRANS_STATE_ENABLE,
1873                                     TRANS_STATE_ENABLE,
1874                                     100))
1875                 DRM_ERROR("Failed to enable PCH transcoder\n");
1876 }
1877
1878 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1879                                             enum pipe pipe)
1880 {
1881         i915_reg_t reg;
1882         uint32_t val;
1883
1884         /* FDI relies on the transcoder */
1885         assert_fdi_tx_disabled(dev_priv, pipe);
1886         assert_fdi_rx_disabled(dev_priv, pipe);
1887
1888         /* Ports must be off as well */
1889         assert_pch_ports_disabled(dev_priv, pipe);
1890
1891         reg = PCH_TRANSCONF(pipe);
1892         val = I915_READ(reg);
1893         val &= ~TRANS_ENABLE;
1894         I915_WRITE(reg, val);
1895         /* wait for PCH transcoder off, transcoder state */
1896         if (intel_wait_for_register(dev_priv,
1897                                     reg, TRANS_STATE_ENABLE, 0,
1898                                     50))
1899                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1900
1901         if (HAS_PCH_CPT(dev_priv)) {
1902                 /* Workaround: Clear the timing override chicken bit again. */
1903                 reg = TRANS_CHICKEN2(pipe);
1904                 val = I915_READ(reg);
1905                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1906                 I915_WRITE(reg, val);
1907         }
1908 }
1909
1910 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1911 {
1912         u32 val;
1913
1914         val = I915_READ(LPT_TRANSCONF);
1915         val &= ~TRANS_ENABLE;
1916         I915_WRITE(LPT_TRANSCONF, val);
1917         /* wait for PCH transcoder off, transcoder state */
1918         if (intel_wait_for_register(dev_priv,
1919                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1920                                     50))
1921                 DRM_ERROR("Failed to disable PCH transcoder\n");
1922
1923         /* Workaround: clear timing override bit. */
1924         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1925         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1926         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1927 }
1928
1929 enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1930 {
1931         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1932
1933         WARN_ON(!crtc->config->has_pch_encoder);
1934
1935         if (HAS_PCH_LPT(dev_priv))
1936                 return TRANSCODER_A;
1937         else
1938                 return (enum transcoder) crtc->pipe;
1939 }
1940
1941 /**
1942  * intel_enable_pipe - enable a pipe, asserting requirements
1943  * @crtc: crtc responsible for the pipe
1944  *
1945  * Enable @crtc's pipe, making sure that various hardware specific requirements
1946  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1947  */
1948 static void intel_enable_pipe(struct intel_crtc *crtc)
1949 {
1950         struct drm_device *dev = crtc->base.dev;
1951         struct drm_i915_private *dev_priv = to_i915(dev);
1952         enum pipe pipe = crtc->pipe;
1953         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1954         i915_reg_t reg;
1955         u32 val;
1956
1957         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1958
1959         assert_planes_disabled(dev_priv, pipe);
1960         assert_cursor_disabled(dev_priv, pipe);
1961         assert_sprites_disabled(dev_priv, pipe);
1962
1963         /*
1964          * A pipe without a PLL won't actually be able to drive bits from
1965          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1966          * need the check.
1967          */
1968         if (HAS_GMCH_DISPLAY(dev_priv)) {
1969                 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
1970                         assert_dsi_pll_enabled(dev_priv);
1971                 else
1972                         assert_pll_enabled(dev_priv, pipe);
1973         } else {
1974                 if (crtc->config->has_pch_encoder) {
1975                         /* if driving the PCH, we need FDI enabled */
1976                         assert_fdi_rx_pll_enabled(dev_priv,
1977                                                   (enum pipe) intel_crtc_pch_transcoder(crtc));
1978                         assert_fdi_tx_pll_enabled(dev_priv,
1979                                                   (enum pipe) cpu_transcoder);
1980                 }
1981                 /* FIXME: assert CPU port conditions for SNB+ */
1982         }
1983
1984         reg = PIPECONF(cpu_transcoder);
1985         val = I915_READ(reg);
1986         if (val & PIPECONF_ENABLE) {
1987                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1988                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
1989                 return;
1990         }
1991
1992         I915_WRITE(reg, val | PIPECONF_ENABLE);
1993         POSTING_READ(reg);
1994
1995         /*
1996          * Until the pipe starts DSL will read as 0, which would cause
1997          * an apparent vblank timestamp jump, which messes up also the
1998          * frame count when it's derived from the timestamps. So let's
1999          * wait for the pipe to start properly before we call
2000          * drm_crtc_vblank_on()
2001          */
2002         if (dev->max_vblank_count == 0 &&
2003             wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2004                 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2005 }
2006
2007 /**
2008  * intel_disable_pipe - disable a pipe, asserting requirements
2009  * @crtc: crtc whose pipes is to be disabled
2010  *
2011  * Disable the pipe of @crtc, making sure that various hardware
2012  * specific requirements are met, if applicable, e.g. plane
2013  * disabled, panel fitter off, etc.
2014  *
2015  * Will wait until the pipe has shut down before returning.
2016  */
2017 static void intel_disable_pipe(struct intel_crtc *crtc)
2018 {
2019         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2020         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2021         enum pipe pipe = crtc->pipe;
2022         i915_reg_t reg;
2023         u32 val;
2024
2025         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2026
2027         /*
2028          * Make sure planes won't keep trying to pump pixels to us,
2029          * or we might hang the display.
2030          */
2031         assert_planes_disabled(dev_priv, pipe);
2032         assert_cursor_disabled(dev_priv, pipe);
2033         assert_sprites_disabled(dev_priv, pipe);
2034
2035         reg = PIPECONF(cpu_transcoder);
2036         val = I915_READ(reg);
2037         if ((val & PIPECONF_ENABLE) == 0)
2038                 return;
2039
2040         /*
2041          * Double wide has implications for planes
2042          * so best keep it disabled when not needed.
2043          */
2044         if (crtc->config->double_wide)
2045                 val &= ~PIPECONF_DOUBLE_WIDE;
2046
2047         /* Don't disable pipe or pipe PLLs if needed */
2048         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2049             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2050                 val &= ~PIPECONF_ENABLE;
2051
2052         I915_WRITE(reg, val);
2053         if ((val & PIPECONF_ENABLE) == 0)
2054                 intel_wait_for_pipe_off(crtc);
2055 }
2056
2057 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2058 {
2059         return IS_GEN2(dev_priv) ? 2048 : 4096;
2060 }
2061
2062 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2063                                            uint64_t fb_modifier, unsigned int cpp)
2064 {
2065         switch (fb_modifier) {
2066         case DRM_FORMAT_MOD_NONE:
2067                 return cpp;
2068         case I915_FORMAT_MOD_X_TILED:
2069                 if (IS_GEN2(dev_priv))
2070                         return 128;
2071                 else
2072                         return 512;
2073         case I915_FORMAT_MOD_Y_TILED:
2074                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2075                         return 128;
2076                 else
2077                         return 512;
2078         case I915_FORMAT_MOD_Yf_TILED:
2079                 switch (cpp) {
2080                 case 1:
2081                         return 64;
2082                 case 2:
2083                 case 4:
2084                         return 128;
2085                 case 8:
2086                 case 16:
2087                         return 256;
2088                 default:
2089                         MISSING_CASE(cpp);
2090                         return cpp;
2091                 }
2092                 break;
2093         default:
2094                 MISSING_CASE(fb_modifier);
2095                 return cpp;
2096         }
2097 }
2098
2099 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2100                                uint64_t fb_modifier, unsigned int cpp)
2101 {
2102         if (fb_modifier == DRM_FORMAT_MOD_NONE)
2103                 return 1;
2104         else
2105                 return intel_tile_size(dev_priv) /
2106                         intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2107 }
2108
2109 /* Return the tile dimensions in pixel units */
2110 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2111                             unsigned int *tile_width,
2112                             unsigned int *tile_height,
2113                             uint64_t fb_modifier,
2114                             unsigned int cpp)
2115 {
2116         unsigned int tile_width_bytes =
2117                 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2118
2119         *tile_width = tile_width_bytes / cpp;
2120         *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2121 }
2122
2123 unsigned int
2124 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2125                       uint32_t pixel_format, uint64_t fb_modifier)
2126 {
2127         unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2128         unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2129
2130         return ALIGN(height, tile_height);
2131 }
2132
2133 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2134 {
2135         unsigned int size = 0;
2136         int i;
2137
2138         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2139                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2140
2141         return size;
2142 }
2143
2144 static void
2145 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2146                         const struct drm_framebuffer *fb,
2147                         unsigned int rotation)
2148 {
2149         if (intel_rotation_90_or_270(rotation)) {
2150                 *view = i915_ggtt_view_rotated;
2151                 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2152         } else {
2153                 *view = i915_ggtt_view_normal;
2154         }
2155 }
2156
2157 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2158 {
2159         if (INTEL_INFO(dev_priv)->gen >= 9)
2160                 return 256 * 1024;
2161         else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2162                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2163                 return 128 * 1024;
2164         else if (INTEL_INFO(dev_priv)->gen >= 4)
2165                 return 4 * 1024;
2166         else
2167                 return 0;
2168 }
2169
2170 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2171                                          uint64_t fb_modifier)
2172 {
2173         switch (fb_modifier) {
2174         case DRM_FORMAT_MOD_NONE:
2175                 return intel_linear_alignment(dev_priv);
2176         case I915_FORMAT_MOD_X_TILED:
2177                 if (INTEL_INFO(dev_priv)->gen >= 9)
2178                         return 256 * 1024;
2179                 return 0;
2180         case I915_FORMAT_MOD_Y_TILED:
2181         case I915_FORMAT_MOD_Yf_TILED:
2182                 return 1 * 1024 * 1024;
2183         default:
2184                 MISSING_CASE(fb_modifier);
2185                 return 0;
2186         }
2187 }
2188
2189 struct i915_vma *
2190 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2191 {
2192         struct drm_device *dev = fb->dev;
2193         struct drm_i915_private *dev_priv = to_i915(dev);
2194         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2195         struct i915_ggtt_view view;
2196         struct i915_vma *vma;
2197         u32 alignment;
2198
2199         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2200
2201         alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2202
2203         intel_fill_fb_ggtt_view(&view, fb, rotation);
2204
2205         /* Note that the w/a also requires 64 PTE of padding following the
2206          * bo. We currently fill all unused PTE with the shadow page and so
2207          * we should always have valid PTE following the scanout preventing
2208          * the VT-d warning.
2209          */
2210         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2211                 alignment = 256 * 1024;
2212
2213         /*
2214          * Global gtt pte registers are special registers which actually forward
2215          * writes to a chunk of system memory. Which means that there is no risk
2216          * that the register values disappear as soon as we call
2217          * intel_runtime_pm_put(), so it is correct to wrap only the
2218          * pin/unpin/fence and not more.
2219          */
2220         intel_runtime_pm_get(dev_priv);
2221
2222         vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
2223         if (IS_ERR(vma))
2224                 goto err;
2225
2226         if (i915_vma_is_map_and_fenceable(vma)) {
2227                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2228                  * fence, whereas 965+ only requires a fence if using
2229                  * framebuffer compression.  For simplicity, we always, when
2230                  * possible, install a fence as the cost is not that onerous.
2231                  *
2232                  * If we fail to fence the tiled scanout, then either the
2233                  * modeset will reject the change (which is highly unlikely as
2234                  * the affected systems, all but one, do not have unmappable
2235                  * space) or we will not be able to enable full powersaving
2236                  * techniques (also likely not to apply due to various limits
2237                  * FBC and the like impose on the size of the buffer, which
2238                  * presumably we violated anyway with this unmappable buffer).
2239                  * Anyway, it is presumably better to stumble onwards with
2240                  * something and try to run the system in a "less than optimal"
2241                  * mode that matches the user configuration.
2242                  */
2243                 if (i915_vma_get_fence(vma) == 0)
2244                         i915_vma_pin_fence(vma);
2245         }
2246
2247 err:
2248         intel_runtime_pm_put(dev_priv);
2249         return vma;
2250 }
2251
2252 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2253 {
2254         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2255         struct i915_ggtt_view view;
2256         struct i915_vma *vma;
2257
2258         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2259
2260         intel_fill_fb_ggtt_view(&view, fb, rotation);
2261         vma = i915_gem_object_to_ggtt(obj, &view);
2262
2263         i915_vma_unpin_fence(vma);
2264         i915_gem_object_unpin_from_display_plane(vma);
2265 }
2266
2267 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2268                           unsigned int rotation)
2269 {
2270         if (intel_rotation_90_or_270(rotation))
2271                 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2272         else
2273                 return fb->pitches[plane];
2274 }
2275
2276 /*
2277  * Convert the x/y offsets into a linear offset.
2278  * Only valid with 0/180 degree rotation, which is fine since linear
2279  * offset is only used with linear buffers on pre-hsw and tiled buffers
2280  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2281  */
2282 u32 intel_fb_xy_to_linear(int x, int y,
2283                           const struct intel_plane_state *state,
2284                           int plane)
2285 {
2286         const struct drm_framebuffer *fb = state->base.fb;
2287         unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2288         unsigned int pitch = fb->pitches[plane];
2289
2290         return y * pitch + x * cpp;
2291 }
2292
2293 /*
2294  * Add the x/y offsets derived from fb->offsets[] to the user
2295  * specified plane src x/y offsets. The resulting x/y offsets
2296  * specify the start of scanout from the beginning of the gtt mapping.
2297  */
2298 void intel_add_fb_offsets(int *x, int *y,
2299                           const struct intel_plane_state *state,
2300                           int plane)
2301
2302 {
2303         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2304         unsigned int rotation = state->base.rotation;
2305
2306         if (intel_rotation_90_or_270(rotation)) {
2307                 *x += intel_fb->rotated[plane].x;
2308                 *y += intel_fb->rotated[plane].y;
2309         } else {
2310                 *x += intel_fb->normal[plane].x;
2311                 *y += intel_fb->normal[plane].y;
2312         }
2313 }
2314
2315 /*
2316  * Input tile dimensions and pitch must already be
2317  * rotated to match x and y, and in pixel units.
2318  */
2319 static u32 _intel_adjust_tile_offset(int *x, int *y,
2320                                      unsigned int tile_width,
2321                                      unsigned int tile_height,
2322                                      unsigned int tile_size,
2323                                      unsigned int pitch_tiles,
2324                                      u32 old_offset,
2325                                      u32 new_offset)
2326 {
2327         unsigned int pitch_pixels = pitch_tiles * tile_width;
2328         unsigned int tiles;
2329
2330         WARN_ON(old_offset & (tile_size - 1));
2331         WARN_ON(new_offset & (tile_size - 1));
2332         WARN_ON(new_offset > old_offset);
2333
2334         tiles = (old_offset - new_offset) / tile_size;
2335
2336         *y += tiles / pitch_tiles * tile_height;
2337         *x += tiles % pitch_tiles * tile_width;
2338
2339         /* minimize x in case it got needlessly big */
2340         *y += *x / pitch_pixels * tile_height;
2341         *x %= pitch_pixels;
2342
2343         return new_offset;
2344 }
2345
2346 /*
2347  * Adjust the tile offset by moving the difference into
2348  * the x/y offsets.
2349  */
2350 static u32 intel_adjust_tile_offset(int *x, int *y,
2351                                     const struct intel_plane_state *state, int plane,
2352                                     u32 old_offset, u32 new_offset)
2353 {
2354         const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
2355         const struct drm_framebuffer *fb = state->base.fb;
2356         unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2357         unsigned int rotation = state->base.rotation;
2358         unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2359
2360         WARN_ON(new_offset > old_offset);
2361
2362         if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
2363                 unsigned int tile_size, tile_width, tile_height;
2364                 unsigned int pitch_tiles;
2365
2366                 tile_size = intel_tile_size(dev_priv);
2367                 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2368                                 fb->modifier[plane], cpp);
2369
2370                 if (intel_rotation_90_or_270(rotation)) {
2371                         pitch_tiles = pitch / tile_height;
2372                         swap(tile_width, tile_height);
2373                 } else {
2374                         pitch_tiles = pitch / (tile_width * cpp);
2375                 }
2376
2377                 _intel_adjust_tile_offset(x, y, tile_width, tile_height,
2378                                           tile_size, pitch_tiles,
2379                                           old_offset, new_offset);
2380         } else {
2381                 old_offset += *y * pitch + *x * cpp;
2382
2383                 *y = (old_offset - new_offset) / pitch;
2384                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2385         }
2386
2387         return new_offset;
2388 }
2389
2390 /*
2391  * Computes the linear offset to the base tile and adjusts
2392  * x, y. bytes per pixel is assumed to be a power-of-two.
2393  *
2394  * In the 90/270 rotated case, x and y are assumed
2395  * to be already rotated to match the rotated GTT view, and
2396  * pitch is the tile_height aligned framebuffer height.
2397  *
2398  * This function is used when computing the derived information
2399  * under intel_framebuffer, so using any of that information
2400  * here is not allowed. Anything under drm_framebuffer can be
2401  * used. This is why the user has to pass in the pitch since it
2402  * is specified in the rotated orientation.
2403  */
2404 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2405                                       int *x, int *y,
2406                                       const struct drm_framebuffer *fb, int plane,
2407                                       unsigned int pitch,
2408                                       unsigned int rotation,
2409                                       u32 alignment)
2410 {
2411         uint64_t fb_modifier = fb->modifier[plane];
2412         unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2413         u32 offset, offset_aligned;
2414
2415         if (alignment)
2416                 alignment--;
2417
2418         if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2419                 unsigned int tile_size, tile_width, tile_height;
2420                 unsigned int tile_rows, tiles, pitch_tiles;
2421
2422                 tile_size = intel_tile_size(dev_priv);
2423                 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2424                                 fb_modifier, cpp);
2425
2426                 if (intel_rotation_90_or_270(rotation)) {
2427                         pitch_tiles = pitch / tile_height;
2428                         swap(tile_width, tile_height);
2429                 } else {
2430                         pitch_tiles = pitch / (tile_width * cpp);
2431                 }
2432
2433                 tile_rows = *y / tile_height;
2434                 *y %= tile_height;
2435
2436                 tiles = *x / tile_width;
2437                 *x %= tile_width;
2438
2439                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2440                 offset_aligned = offset & ~alignment;
2441
2442                 _intel_adjust_tile_offset(x, y, tile_width, tile_height,
2443                                           tile_size, pitch_tiles,
2444                                           offset, offset_aligned);
2445         } else {
2446                 offset = *y * pitch + *x * cpp;
2447                 offset_aligned = offset & ~alignment;
2448
2449                 *y = (offset & alignment) / pitch;
2450                 *x = ((offset & alignment) - *y * pitch) / cpp;
2451         }
2452
2453         return offset_aligned;
2454 }
2455
2456 u32 intel_compute_tile_offset(int *x, int *y,
2457                               const struct intel_plane_state *state,
2458                               int plane)
2459 {
2460         const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
2461         const struct drm_framebuffer *fb = state->base.fb;
2462         unsigned int rotation = state->base.rotation;
2463         int pitch = intel_fb_pitch(fb, plane, rotation);
2464         u32 alignment;
2465
2466         /* AUX_DIST needs only 4K alignment */
2467         if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
2468                 alignment = 4096;
2469         else
2470                 alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
2471
2472         return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2473                                           rotation, alignment);
2474 }
2475
2476 /* Convert the fb->offset[] linear offset into x/y offsets */
2477 static void intel_fb_offset_to_xy(int *x, int *y,
2478                                   const struct drm_framebuffer *fb, int plane)
2479 {
2480         unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2481         unsigned int pitch = fb->pitches[plane];
2482         u32 linear_offset = fb->offsets[plane];
2483
2484         *y = linear_offset / pitch;
2485         *x = linear_offset % pitch / cpp;
2486 }
2487
2488 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2489 {
2490         switch (fb_modifier) {
2491         case I915_FORMAT_MOD_X_TILED:
2492                 return I915_TILING_X;
2493         case I915_FORMAT_MOD_Y_TILED:
2494                 return I915_TILING_Y;
2495         default:
2496                 return I915_TILING_NONE;
2497         }
2498 }
2499
2500 static int
2501 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2502                    struct drm_framebuffer *fb)
2503 {
2504         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2505         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2506         u32 gtt_offset_rotated = 0;
2507         unsigned int max_size = 0;
2508         uint32_t format = fb->pixel_format;
2509         int i, num_planes = drm_format_num_planes(format);
2510         unsigned int tile_size = intel_tile_size(dev_priv);
2511
2512         for (i = 0; i < num_planes; i++) {
2513                 unsigned int width, height;
2514                 unsigned int cpp, size;
2515                 u32 offset;
2516                 int x, y;
2517
2518                 cpp = drm_format_plane_cpp(format, i);
2519                 width = drm_format_plane_width(fb->width, format, i);
2520                 height = drm_format_plane_height(fb->height, format, i);
2521
2522                 intel_fb_offset_to_xy(&x, &y, fb, i);
2523
2524                 /*
2525                  * The fence (if used) is aligned to the start of the object
2526                  * so having the framebuffer wrap around across the edge of the
2527                  * fenced region doesn't really work. We have no API to configure
2528                  * the fence start offset within the object (nor could we probably
2529                  * on gen2/3). So it's just easier if we just require that the
2530                  * fb layout agrees with the fence layout. We already check that the
2531                  * fb stride matches the fence stride elsewhere.
2532                  */
2533                 if (i915_gem_object_is_tiled(intel_fb->obj) &&
2534                     (x + width) * cpp > fb->pitches[i]) {
2535                         DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
2536                                   i, fb->offsets[i]);
2537                         return -EINVAL;
2538                 }
2539
2540                 /*
2541                  * First pixel of the framebuffer from
2542                  * the start of the normal gtt mapping.
2543                  */
2544                 intel_fb->normal[i].x = x;
2545                 intel_fb->normal[i].y = y;
2546
2547                 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
2548                                                     fb, 0, fb->pitches[i],
2549                                                     DRM_ROTATE_0, tile_size);
2550                 offset /= tile_size;
2551
2552                 if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
2553                         unsigned int tile_width, tile_height;
2554                         unsigned int pitch_tiles;
2555                         struct drm_rect r;
2556
2557                         intel_tile_dims(dev_priv, &tile_width, &tile_height,
2558                                         fb->modifier[i], cpp);
2559
2560                         rot_info->plane[i].offset = offset;
2561                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2562                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2563                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2564
2565                         intel_fb->rotated[i].pitch =
2566                                 rot_info->plane[i].height * tile_height;
2567
2568                         /* how many tiles does this plane need */
2569                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2570                         /*
2571                          * If the plane isn't horizontally tile aligned,
2572                          * we need one more tile.
2573                          */
2574                         if (x != 0)
2575                                 size++;
2576
2577                         /* rotate the x/y offsets to match the GTT view */
2578                         r.x1 = x;
2579                         r.y1 = y;
2580                         r.x2 = x + width;
2581                         r.y2 = y + height;
2582                         drm_rect_rotate(&r,
2583                                         rot_info->plane[i].width * tile_width,
2584                                         rot_info->plane[i].height * tile_height,
2585                                         DRM_ROTATE_270);
2586                         x = r.x1;
2587                         y = r.y1;
2588
2589                         /* rotate the tile dimensions to match the GTT view */
2590                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2591                         swap(tile_width, tile_height);
2592
2593                         /*
2594                          * We only keep the x/y offsets, so push all of the
2595                          * gtt offset into the x/y offsets.
2596                          */
2597                         _intel_adjust_tile_offset(&x, &y, tile_size,
2598                                                   tile_width, tile_height, pitch_tiles,
2599                                                   gtt_offset_rotated * tile_size, 0);
2600
2601                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2602
2603                         /*
2604                          * First pixel of the framebuffer from
2605                          * the start of the rotated gtt mapping.
2606                          */
2607                         intel_fb->rotated[i].x = x;
2608                         intel_fb->rotated[i].y = y;
2609                 } else {
2610                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2611                                             x * cpp, tile_size);
2612                 }
2613
2614                 /* how many tiles in total needed in the bo */
2615                 max_size = max(max_size, offset + size);
2616         }
2617
2618         if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
2619                 DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
2620                           max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
2621                 return -EINVAL;
2622         }
2623
2624         return 0;
2625 }
2626
2627 static int i9xx_format_to_fourcc(int format)
2628 {
2629         switch (format) {
2630         case DISPPLANE_8BPP:
2631                 return DRM_FORMAT_C8;
2632         case DISPPLANE_BGRX555:
2633                 return DRM_FORMAT_XRGB1555;
2634         case DISPPLANE_BGRX565:
2635                 return DRM_FORMAT_RGB565;
2636         default:
2637         case DISPPLANE_BGRX888:
2638                 return DRM_FORMAT_XRGB8888;
2639         case DISPPLANE_RGBX888:
2640                 return DRM_FORMAT_XBGR8888;
2641         case DISPPLANE_BGRX101010:
2642                 return DRM_FORMAT_XRGB2101010;
2643         case DISPPLANE_RGBX101010:
2644                 return DRM_FORMAT_XBGR2101010;
2645         }
2646 }
2647
2648 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2649 {
2650         switch (format) {
2651         case PLANE_CTL_FORMAT_RGB_565:
2652                 return DRM_FORMAT_RGB565;
2653         default:
2654         case PLANE_CTL_FORMAT_XRGB_8888:
2655                 if (rgb_order) {
2656                         if (alpha)
2657                                 return DRM_FORMAT_ABGR8888;
2658                         else
2659                                 return DRM_FORMAT_XBGR8888;
2660                 } else {
2661                         if (alpha)
2662                                 return DRM_FORMAT_ARGB8888;
2663                         else
2664                                 return DRM_FORMAT_XRGB8888;
2665                 }
2666         case PLANE_CTL_FORMAT_XRGB_2101010:
2667                 if (rgb_order)
2668                         return DRM_FORMAT_XBGR2101010;
2669                 else
2670                         return DRM_FORMAT_XRGB2101010;
2671         }
2672 }
2673
2674 static bool
2675 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2676                               struct intel_initial_plane_config *plane_config)
2677 {
2678         struct drm_device *dev = crtc->base.dev;
2679         struct drm_i915_private *dev_priv = to_i915(dev);
2680         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2681         struct drm_i915_gem_object *obj = NULL;
2682         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2683         struct drm_framebuffer *fb = &plane_config->fb->base;
2684         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2685         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2686                                     PAGE_SIZE);
2687
2688         size_aligned -= base_aligned;
2689
2690         if (plane_config->size == 0)
2691                 return false;
2692
2693         /* If the FB is too big, just don't use it since fbdev is not very
2694          * important and we should probably use that space with FBC or other
2695          * features. */
2696         if (size_aligned * 2 > ggtt->stolen_usable_size)
2697                 return false;
2698
2699         mutex_lock(&dev->struct_mutex);
2700
2701         obj = i915_gem_object_create_stolen_for_preallocated(dev,
2702                                                              base_aligned,
2703                                                              base_aligned,
2704                                                              size_aligned);
2705         if (!obj) {
2706                 mutex_unlock(&dev->struct_mutex);
2707                 return false;
2708         }
2709
2710         if (plane_config->tiling == I915_TILING_X)
2711                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2712
2713         mode_cmd.pixel_format = fb->pixel_format;
2714         mode_cmd.width = fb->width;
2715         mode_cmd.height = fb->height;
2716         mode_cmd.pitches[0] = fb->pitches[0];
2717         mode_cmd.modifier[0] = fb->modifier[0];
2718         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2719
2720         if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2721                                    &mode_cmd, obj)) {
2722                 DRM_DEBUG_KMS("intel fb init failed\n");
2723                 goto out_unref_obj;
2724         }
2725
2726         mutex_unlock(&dev->struct_mutex);
2727
2728         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2729         return true;
2730
2731 out_unref_obj:
2732         i915_gem_object_put(obj);
2733         mutex_unlock(&dev->struct_mutex);
2734         return false;
2735 }
2736
2737 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2738 static void
2739 update_state_fb(struct drm_plane *plane)
2740 {
2741         if (plane->fb == plane->state->fb)
2742                 return;
2743
2744         if (plane->state->fb)
2745                 drm_framebuffer_unreference(plane->state->fb);
2746         plane->state->fb = plane->fb;
2747         if (plane->state->fb)
2748                 drm_framebuffer_reference(plane->state->fb);
2749 }
2750
2751 static void
2752 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2753                              struct intel_initial_plane_config *plane_config)
2754 {
2755         struct drm_device *dev = intel_crtc->base.dev;
2756         struct drm_i915_private *dev_priv = to_i915(dev);
2757         struct drm_crtc *c;
2758         struct intel_crtc *i;
2759         struct drm_i915_gem_object *obj;
2760         struct drm_plane *primary = intel_crtc->base.primary;
2761         struct drm_plane_state *plane_state = primary->state;
2762         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2763         struct intel_plane *intel_plane = to_intel_plane(primary);
2764         struct intel_plane_state *intel_state =
2765                 to_intel_plane_state(plane_state);
2766         struct drm_framebuffer *fb;
2767
2768         if (!plane_config->fb)
2769                 return;
2770
2771         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2772                 fb = &plane_config->fb->base;
2773                 goto valid_fb;
2774         }
2775
2776         kfree(plane_config->fb);
2777
2778         /*
2779          * Failed to alloc the obj, check to see if we should share
2780          * an fb with another CRTC instead
2781          */
2782         for_each_crtc(dev, c) {
2783                 i = to_intel_crtc(c);
2784
2785                 if (c == &intel_crtc->base)
2786                         continue;
2787
2788                 if (!i->active)
2789                         continue;
2790
2791                 fb = c->primary->fb;
2792                 if (!fb)
2793                         continue;
2794
2795                 obj = intel_fb_obj(fb);
2796                 if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
2797                         drm_framebuffer_reference(fb);
2798                         goto valid_fb;
2799                 }
2800         }
2801
2802         /*
2803          * We've failed to reconstruct the BIOS FB.  Current display state
2804          * indicates that the primary plane is visible, but has a NULL FB,
2805          * which will lead to problems later if we don't fix it up.  The
2806          * simplest solution is to just disable the primary plane now and
2807          * pretend the BIOS never had it enabled.
2808          */
2809         to_intel_plane_state(plane_state)->base.visible = false;
2810         crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2811         intel_pre_disable_primary_noatomic(&intel_crtc->base);
2812         intel_plane->disable_plane(primary, &intel_crtc->base);
2813
2814         return;
2815
2816 valid_fb:
2817         plane_state->src_x = 0;
2818         plane_state->src_y = 0;
2819         plane_state->src_w = fb->width << 16;
2820         plane_state->src_h = fb->height << 16;
2821
2822         plane_state->crtc_x = 0;
2823         plane_state->crtc_y = 0;
2824         plane_state->crtc_w = fb->width;
2825         plane_state->crtc_h = fb->height;
2826
2827         intel_state->base.src.x1 = plane_state->src_x;
2828         intel_state->base.src.y1 = plane_state->src_y;
2829         intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
2830         intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
2831         intel_state->base.dst.x1 = plane_state->crtc_x;
2832         intel_state->base.dst.y1 = plane_state->crtc_y;
2833         intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2834         intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2835
2836         obj = intel_fb_obj(fb);
2837         if (i915_gem_object_is_tiled(obj))
2838                 dev_priv->preserve_bios_swizzle = true;
2839
2840         drm_framebuffer_reference(fb);
2841         primary->fb = primary->state->fb = fb;
2842         primary->crtc = primary->state->crtc = &intel_crtc->base;
2843         intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2844         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2845                   &obj->frontbuffer_bits);
2846 }
2847
2848 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2849                                unsigned int rotation)
2850 {
2851         int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2852
2853         switch (fb->modifier[plane]) {
2854         case DRM_FORMAT_MOD_NONE:
2855         case I915_FORMAT_MOD_X_TILED:
2856                 switch (cpp) {
2857                 case 8:
2858                         return 4096;
2859                 case 4:
2860                 case 2:
2861                 case 1:
2862                         return 8192;
2863                 default:
2864                         MISSING_CASE(cpp);
2865                         break;
2866                 }
2867                 break;
2868         case I915_FORMAT_MOD_Y_TILED:
2869         case I915_FORMAT_MOD_Yf_TILED:
2870                 switch (cpp) {
2871                 case 8:
2872                         return 2048;
2873                 case 4:
2874                         return 4096;
2875                 case 2:
2876                 case 1:
2877                         return 8192;
2878                 default:
2879                         MISSING_CASE(cpp);
2880                         break;
2881                 }
2882                 break;
2883         default:
2884                 MISSING_CASE(fb->modifier[plane]);
2885         }
2886
2887         return 2048;
2888 }
2889
2890 static int skl_check_main_surface(struct intel_plane_state *plane_state)
2891 {
2892         const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
2893         const struct drm_framebuffer *fb = plane_state->base.fb;
2894         unsigned int rotation = plane_state->base.rotation;
2895         int x = plane_state->base.src.x1 >> 16;
2896         int y = plane_state->base.src.y1 >> 16;
2897         int w = drm_rect_width(&plane_state->base.src) >> 16;
2898         int h = drm_rect_height(&plane_state->base.src) >> 16;
2899         int max_width = skl_max_plane_width(fb, 0, rotation);
2900         int max_height = 4096;
2901         u32 alignment, offset, aux_offset = plane_state->aux.offset;
2902
2903         if (w > max_width || h > max_height) {
2904                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
2905                               w, h, max_width, max_height);
2906                 return -EINVAL;
2907         }
2908
2909         intel_add_fb_offsets(&x, &y, plane_state, 0);
2910         offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
2911
2912         alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2913
2914         /*
2915          * AUX surface offset is specified as the distance from the
2916          * main surface offset, and it must be non-negative. Make
2917          * sure that is what we will get.
2918          */
2919         if (offset > aux_offset)
2920                 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
2921                                                   offset, aux_offset & ~(alignment - 1));
2922
2923         /*
2924          * When using an X-tiled surface, the plane blows up
2925          * if the x offset + width exceed the stride.
2926          *
2927          * TODO: linear and Y-tiled seem fine, Yf untested,
2928          */
2929         if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
2930                 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2931
2932                 while ((x + w) * cpp > fb->pitches[0]) {
2933                         if (offset == 0) {
2934                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
2935                                 return -EINVAL;
2936                         }
2937
2938                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
2939                                                           offset, offset - alignment);
2940                 }
2941         }
2942
2943         plane_state->main.offset = offset;
2944         plane_state->main.x = x;
2945         plane_state->main.y = y;
2946
2947         return 0;
2948 }
2949
2950 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
2951 {
2952         const struct drm_framebuffer *fb = plane_state->base.fb;
2953         unsigned int rotation = plane_state->base.rotation;
2954         int max_width = skl_max_plane_width(fb, 1, rotation);
2955         int max_height = 4096;
2956         int x = plane_state->base.src.x1 >> 17;
2957         int y = plane_state->base.src.y1 >> 17;
2958         int w = drm_rect_width(&plane_state->base.src) >> 17;
2959         int h = drm_rect_height(&plane_state->base.src) >> 17;
2960         u32 offset;
2961
2962         intel_add_fb_offsets(&x, &y, plane_state, 1);
2963         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
2964
2965         /* FIXME not quite sure how/if these apply to the chroma plane */
2966         if (w > max_width || h > max_height) {
2967                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
2968                               w, h, max_width, max_height);
2969                 return -EINVAL;
2970         }
2971
2972         plane_state->aux.offset = offset;
2973         plane_state->aux.x = x;
2974         plane_state->aux.y = y;
2975
2976         return 0;
2977 }
2978
2979 int skl_check_plane_surface(struct intel_plane_state *plane_state)
2980 {
2981         const struct drm_framebuffer *fb = plane_state->base.fb;
2982         unsigned int rotation = plane_state->base.rotation;
2983         int ret;
2984
2985         /* Rotate src coordinates to match rotated GTT view */
2986         if (intel_rotation_90_or_270(rotation))
2987                 drm_rect_rotate(&plane_state->base.src,
2988                                 fb->width, fb->height, DRM_ROTATE_270);
2989
2990         /*
2991          * Handle the AUX surface first since
2992          * the main surface setup depends on it.
2993          */
2994         if (fb->pixel_format == DRM_FORMAT_NV12) {
2995                 ret = skl_check_nv12_aux_surface(plane_state);
2996                 if (ret)
2997                         return ret;
2998         } else {
2999                 plane_state->aux.offset = ~0xfff;
3000                 plane_state->aux.x = 0;
3001                 plane_state->aux.y = 0;
3002         }
3003
3004         ret = skl_check_main_surface(plane_state);
3005         if (ret)
3006                 return ret;
3007
3008         return 0;
3009 }
3010
3011 static void i9xx_update_primary_plane(struct drm_plane *primary,
3012                                       const struct intel_crtc_state *crtc_state,
3013                                       const struct intel_plane_state *plane_state)
3014 {
3015         struct drm_device *dev = primary->dev;
3016         struct drm_i915_private *dev_priv = to_i915(dev);
3017         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3018         struct drm_framebuffer *fb = plane_state->base.fb;
3019         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3020         int plane = intel_crtc->plane;
3021         u32 linear_offset;
3022         u32 dspcntr;
3023         i915_reg_t reg = DSPCNTR(plane);
3024         unsigned int rotation = plane_state->base.rotation;
3025         int x = plane_state->base.src.x1 >> 16;
3026         int y = plane_state->base.src.y1 >> 16;
3027
3028         dspcntr = DISPPLANE_GAMMA_ENABLE;
3029
3030         dspcntr |= DISPLAY_PLANE_ENABLE;
3031
3032         if (INTEL_INFO(dev)->gen < 4) {
3033                 if (intel_crtc->pipe == PIPE_B)
3034                         dspcntr |= DISPPLANE_SEL_PIPE_B;
3035
3036                 /* pipesrc and dspsize control the size that is scaled from,
3037                  * which should always be the user's requested size.
3038                  */
3039                 I915_WRITE(DSPSIZE(plane),
3040                            ((crtc_state->pipe_src_h - 1) << 16) |
3041                            (crtc_state->pipe_src_w - 1));
3042                 I915_WRITE(DSPPOS(plane), 0);
3043         } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
3044                 I915_WRITE(PRIMSIZE(plane),
3045                            ((crtc_state->pipe_src_h - 1) << 16) |
3046                            (crtc_state->pipe_src_w - 1));
3047                 I915_WRITE(PRIMPOS(plane), 0);
3048                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
3049         }
3050
3051         switch (fb->pixel_format) {
3052         case DRM_FORMAT_C8:
3053                 dspcntr |= DISPPLANE_8BPP;
3054                 break;
3055         case DRM_FORMAT_XRGB1555:
3056                 dspcntr |= DISPPLANE_BGRX555;
3057                 break;
3058         case DRM_FORMAT_RGB565:
3059                 dspcntr |= DISPPLANE_BGRX565;
3060                 break;
3061         case DRM_FORMAT_XRGB8888:
3062                 dspcntr |= DISPPLANE_BGRX888;
3063                 break;
3064         case DRM_FORMAT_XBGR8888:
3065                 dspcntr |= DISPPLANE_RGBX888;
3066                 break;
3067         case DRM_FORMAT_XRGB2101010:
3068                 dspcntr |= DISPPLANE_BGRX101010;
3069                 break;
3070         case DRM_FORMAT_XBGR2101010:
3071                 dspcntr |= DISPPLANE_RGBX101010;
3072                 break;
3073         default:
3074                 BUG();
3075         }
3076
3077         if (INTEL_GEN(dev_priv) >= 4 &&
3078             fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
3079                 dspcntr |= DISPPLANE_TILED;
3080
3081         if (IS_G4X(dev_priv))
3082                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3083
3084         intel_add_fb_offsets(&x, &y, plane_state, 0);
3085
3086         if (INTEL_INFO(dev)->gen >= 4)
3087                 intel_crtc->dspaddr_offset =
3088                         intel_compute_tile_offset(&x, &y, plane_state, 0);
3089
3090         if (rotation == DRM_ROTATE_180) {
3091                 dspcntr |= DISPPLANE_ROTATE_180;
3092
3093                 x += (crtc_state->pipe_src_w - 1);
3094                 y += (crtc_state->pipe_src_h - 1);
3095         }
3096
3097         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3098
3099         if (INTEL_INFO(dev)->gen < 4)
3100                 intel_crtc->dspaddr_offset = linear_offset;
3101
3102         intel_crtc->adjusted_x = x;
3103         intel_crtc->adjusted_y = y;
3104
3105         I915_WRITE(reg, dspcntr);
3106
3107         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3108         if (INTEL_INFO(dev)->gen >= 4) {
3109                 I915_WRITE(DSPSURF(plane),
3110                            intel_fb_gtt_offset(fb, rotation) +
3111                            intel_crtc->dspaddr_offset);
3112                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3113                 I915_WRITE(DSPLINOFF(plane), linear_offset);
3114         } else
3115                 I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
3116         POSTING_READ(reg);
3117 }
3118
3119 static void i9xx_disable_primary_plane(struct drm_plane *primary,
3120                                        struct drm_crtc *crtc)
3121 {
3122         struct drm_device *dev = crtc->dev;
3123         struct drm_i915_private *dev_priv = to_i915(dev);
3124         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3125         int plane = intel_crtc->plane;
3126
3127         I915_WRITE(DSPCNTR(plane), 0);
3128         if (INTEL_INFO(dev_priv)->gen >= 4)
3129                 I915_WRITE(DSPSURF(plane), 0);
3130         else
3131                 I915_WRITE(DSPADDR(plane), 0);
3132         POSTING_READ(DSPCNTR(plane));
3133 }
3134
3135 static void ironlake_update_primary_plane(struct drm_plane *primary,
3136                                           const struct intel_crtc_state *crtc_state,
3137                                           const struct intel_plane_state *plane_state)
3138 {
3139         struct drm_device *dev = primary->dev;
3140         struct drm_i915_private *dev_priv = to_i915(dev);
3141         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3142         struct drm_framebuffer *fb = plane_state->base.fb;
3143         int plane = intel_crtc->plane;
3144         u32 linear_offset;
3145         u32 dspcntr;
3146         i915_reg_t reg = DSPCNTR(plane);
3147         unsigned int rotation = plane_state->base.rotation;
3148         int x = plane_state->base.src.x1 >> 16;
3149         int y = plane_state->base.src.y1 >> 16;
3150
3151         dspcntr = DISPPLANE_GAMMA_ENABLE;
3152         dspcntr |= DISPLAY_PLANE_ENABLE;
3153
3154         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3155                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3156
3157         switch (fb->pixel_format) {
3158         case DRM_FORMAT_C8:
3159                 dspcntr |= DISPPLANE_8BPP;
3160                 break;
3161         case DRM_FORMAT_RGB565:
3162                 dspcntr |= DISPPLANE_BGRX565;
3163                 break;
3164         case DRM_FORMAT_XRGB8888:
3165                 dspcntr |= DISPPLANE_BGRX888;
3166                 break;
3167         case DRM_FORMAT_XBGR8888:
3168                 dspcntr |= DISPPLANE_RGBX888;
3169                 break;
3170         case DRM_FORMAT_XRGB2101010:
3171                 dspcntr |= DISPPLANE_BGRX101010;
3172                 break;
3173         case DRM_FORMAT_XBGR2101010:
3174                 dspcntr |= DISPPLANE_RGBX101010;
3175                 break;
3176         default:
3177                 BUG();
3178         }
3179
3180         if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
3181                 dspcntr |= DISPPLANE_TILED;
3182
3183         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
3184                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3185
3186         intel_add_fb_offsets(&x, &y, plane_state, 0);
3187
3188         intel_crtc->dspaddr_offset =
3189                 intel_compute_tile_offset(&x, &y, plane_state, 0);
3190
3191         if (rotation == DRM_ROTATE_180) {
3192                 dspcntr |= DISPPLANE_ROTATE_180;
3193
3194                 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3195                         x += (crtc_state->pipe_src_w - 1);
3196                         y += (crtc_state->pipe_src_h - 1);
3197                 }
3198         }
3199
3200         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3201
3202         intel_crtc->adjusted_x = x;
3203         intel_crtc->adjusted_y = y;
3204
3205         I915_WRITE(reg, dspcntr);
3206
3207         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3208         I915_WRITE(DSPSURF(plane),
3209                    intel_fb_gtt_offset(fb, rotation) +
3210                    intel_crtc->dspaddr_offset);
3211         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3212                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
3213         } else {
3214                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3215                 I915_WRITE(DSPLINOFF(plane), linear_offset);
3216         }
3217         POSTING_READ(reg);
3218 }
3219
3220 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
3221                               uint64_t fb_modifier, uint32_t pixel_format)
3222 {
3223         if (fb_modifier == DRM_FORMAT_MOD_NONE) {
3224                 return 64;
3225         } else {
3226                 int cpp = drm_format_plane_cpp(pixel_format, 0);
3227
3228                 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
3229         }
3230 }
3231
3232 u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
3233                         unsigned int rotation)
3234 {
3235         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3236         struct i915_ggtt_view view;
3237         struct i915_vma *vma;
3238
3239         intel_fill_fb_ggtt_view(&view, fb, rotation);
3240
3241         vma = i915_gem_object_to_ggtt(obj, &view);
3242         if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
3243                  view.type))
3244                 return -1;
3245
3246         return i915_ggtt_offset(vma);
3247 }
3248
3249 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3250 {
3251         struct drm_device *dev = intel_crtc->base.dev;
3252         struct drm_i915_private *dev_priv = to_i915(dev);
3253
3254         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3255         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3256         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3257 }
3258
3259 /*
3260  * This function detaches (aka. unbinds) unused scalers in hardware
3261  */
3262 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3263 {
3264         struct intel_crtc_scaler_state *scaler_state;
3265         int i;
3266
3267         scaler_state = &intel_crtc->config->scaler_state;
3268
3269         /* loop through and disable scalers that aren't in use */
3270         for (i = 0; i < intel_crtc->num_scalers; i++) {
3271                 if (!scaler_state->scalers[i].in_use)
3272                         skl_detach_scaler(intel_crtc, i);
3273         }
3274 }
3275
3276 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3277                      unsigned int rotation)
3278 {
3279         const struct drm_i915_private *dev_priv = to_i915(fb->dev);
3280         u32 stride = intel_fb_pitch(fb, plane, rotation);
3281
3282         /*
3283          * The stride is either expressed as a multiple of 64 bytes chunks for
3284          * linear buffers or in number of tiles for tiled buffers.
3285          */
3286         if (intel_rotation_90_or_270(rotation)) {
3287                 int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
3288
3289                 stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
3290         } else {
3291                 stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3292                                                     fb->pixel_format);
3293         }
3294
3295         return stride;
3296 }
3297
3298 u32 skl_plane_ctl_format(uint32_t pixel_format)
3299 {
3300         switch (pixel_format) {
3301         case DRM_FORMAT_C8:
3302                 return PLANE_CTL_FORMAT_INDEXED;
3303         case DRM_FORMAT_RGB565:
3304                 return PLANE_CTL_FORMAT_RGB_565;
3305         case DRM_FORMAT_XBGR8888:
3306                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3307         case DRM_FORMAT_XRGB8888:
3308                 return PLANE_CTL_FORMAT_XRGB_8888;
3309         /*
3310          * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3311          * to be already pre-multiplied. We need to add a knob (or a different
3312          * DRM_FORMAT) for user-space to configure that.
3313          */
3314         case DRM_FORMAT_ABGR8888:
3315                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3316                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3317         case DRM_FORMAT_ARGB8888:
3318                 return PLANE_CTL_FORMAT_XRGB_8888 |
3319                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3320         case DRM_FORMAT_XRGB2101010:
3321                 return PLANE_CTL_FORMAT_XRGB_2101010;
3322         case DRM_FORMAT_XBGR2101010:
3323                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3324         case DRM_FORMAT_YUYV:
3325                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3326         case DRM_FORMAT_YVYU:
3327                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3328         case DRM_FORMAT_UYVY:
3329                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3330         case DRM_FORMAT_VYUY:
3331                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3332         default:
3333                 MISSING_CASE(pixel_format);
3334         }
3335
3336         return 0;
3337 }
3338
3339 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3340 {
3341         switch (fb_modifier) {
3342         case DRM_FORMAT_MOD_NONE:
3343                 break;
3344         case I915_FORMAT_MOD_X_TILED:
3345                 return PLANE_CTL_TILED_X;
3346         case I915_FORMAT_MOD_Y_TILED:
3347                 return PLANE_CTL_TILED_Y;
3348         case I915_FORMAT_MOD_Yf_TILED:
3349                 return PLANE_CTL_TILED_YF;
3350         default:
3351                 MISSING_CASE(fb_modifier);
3352         }
3353
3354         return 0;
3355 }
3356
3357 u32 skl_plane_ctl_rotation(unsigned int rotation)
3358 {
3359         switch (rotation) {
3360         case DRM_ROTATE_0:
3361                 break;
3362         /*
3363          * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3364          * while i915 HW rotation is clockwise, thats why this swapping.
3365          */
3366         case DRM_ROTATE_90:
3367                 return PLANE_CTL_ROTATE_270;
3368         case DRM_ROTATE_180:
3369                 return PLANE_CTL_ROTATE_180;
3370         case DRM_ROTATE_270:
3371                 return PLANE_CTL_ROTATE_90;
3372         default:
3373                 MISSING_CASE(rotation);
3374         }
3375
3376         return 0;
3377 }
3378
3379 static void skylake_update_primary_plane(struct drm_plane *plane,
3380                                          const struct intel_crtc_state *crtc_state,
3381                                          const struct intel_plane_state *plane_state)
3382 {
3383         struct drm_device *dev = plane->dev;
3384         struct drm_i915_private *dev_priv = to_i915(dev);
3385         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3386         struct drm_framebuffer *fb = plane_state->base.fb;
3387         const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
3388         const struct skl_plane_wm *p_wm =
3389                 &crtc_state->wm.skl.optimal.planes[0];
3390         int pipe = intel_crtc->pipe;
3391         u32 plane_ctl;
3392         unsigned int rotation = plane_state->base.rotation;
3393         u32 stride = skl_plane_stride(fb, 0, rotation);
3394         u32 surf_addr = plane_state->main.offset;
3395         int scaler_id = plane_state->scaler_id;
3396         int src_x = plane_state->main.x;
3397         int src_y = plane_state->main.y;
3398         int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3399         int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3400         int dst_x = plane_state->base.dst.x1;
3401         int dst_y = plane_state->base.dst.y1;
3402         int dst_w = drm_rect_width(&plane_state->base.dst);
3403         int dst_h = drm_rect_height(&plane_state->base.dst);
3404
3405         plane_ctl = PLANE_CTL_ENABLE |
3406                     PLANE_CTL_PIPE_GAMMA_ENABLE |
3407                     PLANE_CTL_PIPE_CSC_ENABLE;
3408
3409         plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3410         plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3411         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3412         plane_ctl |= skl_plane_ctl_rotation(rotation);
3413
3414         /* Sizes are 0 based */
3415         src_w--;
3416         src_h--;
3417         dst_w--;
3418         dst_h--;
3419
3420         intel_crtc->dspaddr_offset = surf_addr;
3421
3422         intel_crtc->adjusted_x = src_x;
3423         intel_crtc->adjusted_y = src_y;
3424
3425         if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
3426                 skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0);
3427
3428         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3429         I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
3430         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3431         I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
3432
3433         if (scaler_id >= 0) {
3434                 uint32_t ps_ctrl = 0;
3435
3436                 WARN_ON(!dst_w || !dst_h);
3437                 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3438                         crtc_state->scaler_state.scalers[scaler_id].mode;
3439                 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3440                 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3441                 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3442                 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3443                 I915_WRITE(PLANE_POS(pipe, 0), 0);
3444         } else {
3445                 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3446         }
3447
3448         I915_WRITE(PLANE_SURF(pipe, 0),
3449                    intel_fb_gtt_offset(fb, rotation) + surf_addr);
3450
3451         POSTING_READ(PLANE_SURF(pipe, 0));
3452 }
3453
3454 static void skylake_disable_primary_plane(struct drm_plane *primary,
3455                                           struct drm_crtc *crtc)
3456 {
3457         struct drm_device *dev = crtc->dev;
3458         struct drm_i915_private *dev_priv = to_i915(dev);
3459         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3460         struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3461         const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0];
3462         int pipe = intel_crtc->pipe;
3463
3464         /*
3465          * We only populate skl_results on watermark updates, and if the
3466          * plane's visiblity isn't actually changing neither is its watermarks.
3467          */
3468         if (!crtc->primary->state->visible)
3469                 skl_write_plane_wm(intel_crtc, p_wm,
3470                                    &dev_priv->wm.skl_results.ddb, 0);
3471
3472         I915_WRITE(PLANE_CTL(pipe, 0), 0);
3473         I915_WRITE(PLANE_SURF(pipe, 0), 0);
3474         POSTING_READ(PLANE_SURF(pipe, 0));
3475 }
3476
3477 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3478 static int
3479 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3480                            int x, int y, enum mode_set_atomic state)
3481 {
3482         /* Support for kgdboc is disabled, this needs a major rework. */
3483         DRM_ERROR("legacy panic handler not supported any more.\n");
3484
3485         return -ENODEV;
3486 }
3487
3488 static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3489 {
3490         struct intel_crtc *crtc;
3491
3492         for_each_intel_crtc(&dev_priv->drm, crtc)
3493                 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3494 }
3495
3496 static void intel_update_primary_planes(struct drm_device *dev)
3497 {
3498         struct drm_crtc *crtc;
3499
3500         for_each_crtc(dev, crtc) {
3501                 struct intel_plane *plane = to_intel_plane(crtc->primary);
3502                 struct intel_plane_state *plane_state =
3503                         to_intel_plane_state(plane->base.state);
3504
3505                 if (plane_state->base.visible)
3506                         plane->update_plane(&plane->base,
3507                                             to_intel_crtc_state(crtc->state),
3508                                             plane_state);
3509         }
3510 }
3511
3512 static int
3513 __intel_display_resume(struct drm_device *dev,
3514                        struct drm_atomic_state *state)
3515 {
3516         struct drm_crtc_state *crtc_state;
3517         struct drm_crtc *crtc;
3518         int i, ret;
3519
3520         intel_modeset_setup_hw_state(dev);
3521         i915_redisable_vga(dev);
3522
3523         if (!state)
3524                 return 0;
3525
3526         for_each_crtc_in_state(state, crtc, crtc_state, i) {
3527                 /*
3528                  * Force recalculation even if we restore
3529                  * current state. With fast modeset this may not result
3530                  * in a modeset when the state is compatible.
3531                  */
3532                 crtc_state->mode_changed = true;
3533         }
3534
3535         /* ignore any reset values/BIOS leftovers in the WM registers */
3536         to_intel_atomic_state(state)->skip_intermediate_wm = true;
3537
3538         ret = drm_atomic_commit(state);
3539
3540         WARN_ON(ret == -EDEADLK);
3541         return ret;
3542 }
3543
3544 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3545 {
3546         return intel_has_gpu_reset(dev_priv) &&
3547                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3548 }
3549
3550 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3551 {
3552         struct drm_device *dev = &dev_priv->drm;
3553         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3554         struct drm_atomic_state *state;
3555         int ret;
3556
3557         /*
3558          * Need mode_config.mutex so that we don't
3559          * trample ongoing ->detect() and whatnot.
3560          */
3561         mutex_lock(&dev->mode_config.mutex);
3562         drm_modeset_acquire_init(ctx, 0);
3563         while (1) {
3564                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3565                 if (ret != -EDEADLK)
3566                         break;
3567
3568                 drm_modeset_backoff(ctx);
3569         }
3570
3571         /* reset doesn't touch the display, but flips might get nuked anyway, */
3572         if (!i915.force_reset_modeset_test &&
3573             !gpu_reset_clobbers_display(dev_priv))
3574                 return;
3575
3576         /*
3577          * Disabling the crtcs gracefully seems nicer. Also the
3578          * g33 docs say we should at least disable all the planes.
3579          */
3580         state = drm_atomic_helper_duplicate_state(dev, ctx);
3581         if (IS_ERR(state)) {
3582                 ret = PTR_ERR(state);
3583                 state = NULL;
3584                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3585                 goto err;
3586         }
3587
3588         ret = drm_atomic_helper_disable_all(dev, ctx);
3589         if (ret) {
3590                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3591                 goto err;
3592         }
3593
3594         dev_priv->modeset_restore_state = state;
3595         state->acquire_ctx = ctx;
3596         return;
3597
3598 err:
3599         drm_atomic_state_free(state);
3600 }
3601
3602 void intel_finish_reset(struct drm_i915_private *dev_priv)
3603 {
3604         struct drm_device *dev = &dev_priv->drm;
3605         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3606         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3607         int ret;
3608
3609         /*
3610          * Flips in the rings will be nuked by the reset,
3611          * so complete all pending flips so that user space
3612          * will get its events and not get stuck.
3613          */
3614         intel_complete_page_flips(dev_priv);
3615
3616         dev_priv->modeset_restore_state = NULL;
3617
3618         /* reset doesn't touch the display */
3619         if (!gpu_reset_clobbers_display(dev_priv)) {
3620                 if (!state) {
3621                         /*
3622                          * Flips in the rings have been nuked by the reset,
3623                          * so update the base address of all primary
3624                          * planes to the the last fb to make sure we're
3625                          * showing the correct fb after a reset.
3626                          *
3627                          * FIXME: Atomic will make this obsolete since we won't schedule
3628                          * CS-based flips (which might get lost in gpu resets) any more.
3629                          */
3630                         intel_update_primary_planes(dev);
3631                 } else {
3632                         ret = __intel_display_resume(dev, state);
3633                         if (ret)
3634                                 DRM_ERROR("Restoring old state failed with %i\n", ret);
3635                 }
3636         } else {
3637                 /*
3638                  * The display has been reset as well,
3639                  * so need a full re-initialization.
3640                  */
3641                 intel_runtime_pm_disable_interrupts(dev_priv);
3642                 intel_runtime_pm_enable_interrupts(dev_priv);
3643
3644                 intel_pps_unlock_regs_wa(dev_priv);
3645                 intel_modeset_init_hw(dev);
3646
3647                 spin_lock_irq(&dev_priv->irq_lock);
3648                 if (dev_priv->display.hpd_irq_setup)
3649                         dev_priv->display.hpd_irq_setup(dev_priv);
3650                 spin_unlock_irq(&dev_priv->irq_lock);
3651
3652                 ret = __intel_display_resume(dev, state);
3653                 if (ret)
3654                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3655
3656                 intel_hpd_init(dev_priv);
3657         }
3658
3659         drm_modeset_drop_locks(ctx);
3660         drm_modeset_acquire_fini(ctx);
3661         mutex_unlock(&dev->mode_config.mutex);
3662 }
3663
3664 static bool abort_flip_on_reset(struct intel_crtc *crtc)
3665 {
3666         struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
3667
3668         if (i915_reset_in_progress(error))
3669                 return true;
3670
3671         if (crtc->reset_count != i915_reset_count(error))
3672                 return true;
3673
3674         return false;
3675 }
3676
3677 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3678 {
3679         struct drm_device *dev = crtc->dev;
3680         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3681         bool pending;
3682
3683         if (abort_flip_on_reset(intel_crtc))
3684                 return false;
3685
3686         spin_lock_irq(&dev->event_lock);
3687         pending = to_intel_crtc(crtc)->flip_work != NULL;
3688         spin_unlock_irq(&dev->event_lock);
3689
3690         return pending;
3691 }
3692
3693 static void intel_update_pipe_config(struct intel_crtc *crtc,
3694                                      struct intel_crtc_state *old_crtc_state)
3695 {
3696         struct drm_device *dev = crtc->base.dev;
3697         struct drm_i915_private *dev_priv = to_i915(dev);
3698         struct intel_crtc_state *pipe_config =
3699                 to_intel_crtc_state(crtc->base.state);
3700
3701         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3702         crtc->base.mode = crtc->base.state->mode;
3703
3704         DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3705                       old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3706                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3707
3708         /*
3709          * Update pipe size and adjust fitter if needed: the reason for this is
3710          * that in compute_mode_changes we check the native mode (not the pfit
3711          * mode) to see if we can flip rather than do a full mode set. In the
3712          * fastboot case, we'll flip, but if we don't update the pipesrc and
3713          * pfit state, we'll end up with a big fb scanned out into the wrong
3714          * sized surface.
3715          */
3716
3717         I915_WRITE(PIPESRC(crtc->pipe),
3718                    ((pipe_config->pipe_src_w - 1) << 16) |
3719                    (pipe_config->pipe_src_h - 1));
3720
3721         /* on skylake this is done by detaching scalers */
3722         if (INTEL_INFO(dev)->gen >= 9) {
3723                 skl_detach_scalers(crtc);
3724
3725                 if (pipe_config->pch_pfit.enabled)
3726                         skylake_pfit_enable(crtc);
3727         } else if (HAS_PCH_SPLIT(dev_priv)) {
3728                 if (pipe_config->pch_pfit.enabled)
3729                         ironlake_pfit_enable(crtc);
3730                 else if (old_crtc_state->pch_pfit.enabled)
3731                         ironlake_pfit_disable(crtc, true);
3732         }
3733 }
3734
3735 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3736 {
3737         struct drm_device *dev = crtc->dev;
3738         struct drm_i915_private *dev_priv = to_i915(dev);
3739         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3740         int pipe = intel_crtc->pipe;
3741         i915_reg_t reg;
3742         u32 temp;
3743
3744         /* enable normal train */
3745         reg = FDI_TX_CTL(pipe);
3746         temp = I915_READ(reg);
3747         if (IS_IVYBRIDGE(dev_priv)) {
3748                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3749                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3750         } else {
3751                 temp &= ~FDI_LINK_TRAIN_NONE;
3752                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3753         }
3754         I915_WRITE(reg, temp);
3755
3756         reg = FDI_RX_CTL(pipe);
3757         temp = I915_READ(reg);
3758         if (HAS_PCH_CPT(dev_priv)) {
3759                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3760                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3761         } else {
3762                 temp &= ~FDI_LINK_TRAIN_NONE;
3763                 temp |= FDI_LINK_TRAIN_NONE;
3764         }
3765         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3766
3767         /* wait one idle pattern time */
3768         POSTING_READ(reg);
3769         udelay(1000);
3770
3771         /* IVB wants error correction enabled */
3772         if (IS_IVYBRIDGE(dev_priv))
3773                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3774                            FDI_FE_ERRC_ENABLE);
3775 }
3776
3777 /* The FDI link training functions for ILK/Ibexpeak. */
3778 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3779 {
3780         struct drm_device *dev = crtc->dev;
3781         struct drm_i915_private *dev_priv = to_i915(dev);
3782         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3783         int pipe = intel_crtc->pipe;
3784         i915_reg_t reg;
3785         u32 temp, tries;
3786
3787         /* FDI needs bits from pipe first */
3788         assert_pipe_enabled(dev_priv, pipe);
3789
3790         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3791            for train result */
3792         reg = FDI_RX_IMR(pipe);
3793         temp = I915_READ(reg);
3794         temp &= ~FDI_RX_SYMBOL_LOCK;
3795         temp &= ~FDI_RX_BIT_LOCK;
3796         I915_WRITE(reg, temp);
3797         I915_READ(reg);
3798         udelay(150);
3799
3800         /* enable CPU FDI TX and PCH FDI RX */
3801         reg = FDI_TX_CTL(pipe);
3802         temp = I915_READ(reg);
3803         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3804         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3805         temp &= ~FDI_LINK_TRAIN_NONE;
3806         temp |= FDI_LINK_TRAIN_PATTERN_1;
3807         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3808
3809         reg = FDI_RX_CTL(pipe);
3810         temp = I915_READ(reg);
3811         temp &= ~FDI_LINK_TRAIN_NONE;
3812         temp |= FDI_LINK_TRAIN_PATTERN_1;
3813         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3814
3815         POSTING_READ(reg);
3816         udelay(150);
3817
3818         /* Ironlake workaround, enable clock pointer after FDI enable*/
3819         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3820         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3821                    FDI_RX_PHASE_SYNC_POINTER_EN);
3822
3823         reg = FDI_RX_IIR(pipe);
3824         for (tries = 0; tries < 5; tries++) {
3825                 temp = I915_READ(reg);
3826                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3827
3828                 if ((temp & FDI_RX_BIT_LOCK)) {
3829                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3830                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3831                         break;
3832                 }
3833         }
3834         if (tries == 5)
3835                 DRM_ERROR("FDI train 1 fail!\n");
3836
3837         /* Train 2 */
3838         reg = FDI_TX_CTL(pipe);
3839         temp = I915_READ(reg);
3840         temp &= ~FDI_LINK_TRAIN_NONE;
3841         temp |= FDI_LINK_TRAIN_PATTERN_2;
3842         I915_WRITE(reg, temp);
3843
3844         reg = FDI_RX_CTL(pipe);
3845         temp = I915_READ(reg);
3846         temp &= ~FDI_LINK_TRAIN_NONE;
3847         temp |= FDI_LINK_TRAIN_PATTERN_2;
3848         I915_WRITE(reg, temp);
3849
3850         POSTING_READ(reg);
3851         udelay(150);
3852
3853         reg = FDI_RX_IIR(pipe);
3854         for (tries = 0; tries < 5; tries++) {
3855                 temp = I915_READ(reg);
3856                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3857
3858                 if (temp & FDI_RX_SYMBOL_LOCK) {
3859                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3860                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3861                         break;
3862                 }
3863         }
3864         if (tries == 5)
3865                 DRM_ERROR("FDI train 2 fail!\n");
3866
3867         DRM_DEBUG_KMS("FDI train done\n");
3868
3869 }
3870
3871 static const int snb_b_fdi_train_param[] = {
3872         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3873         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3874         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3875         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3876 };
3877
3878 /* The FDI link training functions for SNB/Cougarpoint. */
3879 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3880 {
3881         struct drm_device *dev = crtc->dev;
3882         struct drm_i915_private *dev_priv = to_i915(dev);
3883         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3884         int pipe = intel_crtc->pipe;
3885         i915_reg_t reg;
3886         u32 temp, i, retry;
3887
3888         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3889            for train result */
3890         reg = FDI_RX_IMR(pipe);
3891         temp = I915_READ(reg);
3892         temp &= ~FDI_RX_SYMBOL_LOCK;
3893         temp &= ~FDI_RX_BIT_LOCK;
3894         I915_WRITE(reg, temp);
3895
3896         POSTING_READ(reg);
3897         udelay(150);
3898
3899         /* enable CPU FDI TX and PCH FDI RX */
3900         reg = FDI_TX_CTL(pipe);
3901         temp = I915_READ(reg);
3902         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3903         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3904         temp &= ~FDI_LINK_TRAIN_NONE;
3905         temp |= FDI_LINK_TRAIN_PATTERN_1;
3906         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3907         /* SNB-B */
3908         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3909         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3910
3911         I915_WRITE(FDI_RX_MISC(pipe),
3912                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3913
3914         reg = FDI_RX_CTL(pipe);
3915         temp = I915_READ(reg);
3916         if (HAS_PCH_CPT(dev_priv)) {
3917                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3918                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3919         } else {
3920                 temp &= ~FDI_LINK_TRAIN_NONE;
3921                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3922         }
3923         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3924
3925         POSTING_READ(reg);
3926         udelay(150);
3927
3928         for (i = 0; i < 4; i++) {
3929                 reg = FDI_TX_CTL(pipe);
3930                 temp = I915_READ(reg);
3931                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3932                 temp |= snb_b_fdi_train_param[i];
3933                 I915_WRITE(reg, temp);
3934
3935                 POSTING_READ(reg);
3936                 udelay(500);
3937
3938                 for (retry = 0; retry < 5; retry++) {
3939                         reg = FDI_RX_IIR(pipe);
3940                         temp = I915_READ(reg);
3941                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3942                         if (temp & FDI_RX_BIT_LOCK) {
3943                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3944                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3945                                 break;
3946                         }
3947                         udelay(50);
3948                 }
3949                 if (retry < 5)
3950                         break;
3951         }
3952         if (i == 4)
3953                 DRM_ERROR("FDI train 1 fail!\n");
3954
3955         /* Train 2 */
3956         reg = FDI_TX_CTL(pipe);
3957         temp = I915_READ(reg);
3958         temp &= ~FDI_LINK_TRAIN_NONE;
3959         temp |= FDI_LINK_TRAIN_PATTERN_2;
3960         if (IS_GEN6(dev_priv)) {
3961                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3962                 /* SNB-B */
3963                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3964         }
3965         I915_WRITE(reg, temp);
3966
3967         reg = FDI_RX_CTL(pipe);
3968         temp = I915_READ(reg);
3969         if (HAS_PCH_CPT(dev_priv)) {
3970                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3971                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3972         } else {
3973                 temp &= ~FDI_LINK_TRAIN_NONE;
3974                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3975         }
3976         I915_WRITE(reg, temp);
3977
3978         POSTING_READ(reg);
3979         udelay(150);
3980
3981         for (i = 0; i < 4; i++) {
3982                 reg = FDI_TX_CTL(pipe);
3983                 temp = I915_READ(reg);
3984                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3985                 temp |= snb_b_fdi_train_param[i];
3986                 I915_WRITE(reg, temp);
3987
3988                 POSTING_READ(reg);
3989                 udelay(500);
3990
3991                 for (retry = 0; retry < 5; retry++) {
3992                         reg = FDI_RX_IIR(pipe);
3993                         temp = I915_READ(reg);
3994                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3995                         if (temp & FDI_RX_SYMBOL_LOCK) {
3996                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3997                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3998                                 break;
3999                         }
4000                         udelay(50);
4001                 }
4002                 if (retry < 5)
4003                         break;
4004         }
4005         if (i == 4)
4006                 DRM_ERROR("FDI train 2 fail!\n");
4007
4008         DRM_DEBUG_KMS("FDI train done.\n");
4009 }
4010
4011 /* Manual link training for Ivy Bridge A0 parts */
4012 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
4013 {
4014         struct drm_device *dev = crtc->dev;
4015         struct drm_i915_private *dev_priv = to_i915(dev);
4016         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4017         int pipe = intel_crtc->pipe;
4018         i915_reg_t reg;
4019         u32 temp, i, j;
4020
4021         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4022            for train result */
4023         reg = FDI_RX_IMR(pipe);
4024         temp = I915_READ(reg);
4025         temp &= ~FDI_RX_SYMBOL_LOCK;
4026         temp &= ~FDI_RX_BIT_LOCK;
4027         I915_WRITE(reg, temp);
4028
4029         POSTING_READ(reg);
4030         udelay(150);
4031
4032         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4033                       I915_READ(FDI_RX_IIR(pipe)));
4034
4035         /* Try each vswing and preemphasis setting twice before moving on */
4036         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4037                 /* disable first in case we need to retry */
4038                 reg = FDI_TX_CTL(pipe);
4039                 temp = I915_READ(reg);
4040                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4041                 temp &= ~FDI_TX_ENABLE;
4042                 I915_WRITE(reg, temp);
4043
4044                 reg = FDI_RX_CTL(pipe);
4045                 temp = I915_READ(reg);
4046                 temp &= ~FDI_LINK_TRAIN_AUTO;
4047                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4048                 temp &= ~FDI_RX_ENABLE;
4049                 I915_WRITE(reg, temp);
4050
4051                 /* enable CPU FDI TX and PCH FDI RX */
4052                 reg = FDI_TX_CTL(pipe);
4053                 temp = I915_READ(reg);
4054                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4055                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4056                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4057                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4058                 temp |= snb_b_fdi_train_param[j/2];
4059                 temp |= FDI_COMPOSITE_SYNC;
4060                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4061
4062                 I915_WRITE(FDI_RX_MISC(pipe),
4063                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4064
4065                 reg = FDI_RX_CTL(pipe);
4066                 temp = I915_READ(reg);
4067                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4068                 temp |= FDI_COMPOSITE_SYNC;
4069                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4070
4071                 POSTING_READ(reg);
4072                 udelay(1); /* should be 0.5us */
4073
4074                 for (i = 0; i < 4; i++) {
4075                         reg = FDI_RX_IIR(pipe);
4076                         temp = I915_READ(reg);
4077                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4078
4079                         if (temp & FDI_RX_BIT_LOCK ||
4080                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4081                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4082                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4083                                               i);
4084                                 break;
4085                         }
4086                         udelay(1); /* should be 0.5us */
4087                 }
4088                 if (i == 4) {
4089                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4090                         continue;
4091                 }
4092
4093                 /* Train 2 */
4094                 reg = FDI_TX_CTL(pipe);
4095                 temp = I915_READ(reg);
4096                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4097                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4098                 I915_WRITE(reg, temp);
4099
4100                 reg = FDI_RX_CTL(pipe);
4101                 temp = I915_READ(reg);
4102                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4103                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4104                 I915_WRITE(reg, temp);
4105
4106                 POSTING_READ(reg);
4107                 udelay(2); /* should be 1.5us */
4108
4109                 for (i = 0; i < 4; i++) {
4110                         reg = FDI_RX_IIR(pipe);
4111                         temp = I915_READ(reg);
4112                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4113
4114                         if (temp & FDI_RX_SYMBOL_LOCK ||
4115                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4116                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4117                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4118                                               i);
4119                                 goto train_done;
4120                         }
4121                         udelay(2); /* should be 1.5us */
4122                 }
4123                 if (i == 4)
4124                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4125         }
4126
4127 train_done:
4128         DRM_DEBUG_KMS("FDI train done.\n");
4129 }
4130
4131 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4132 {
4133         struct drm_device *dev = intel_crtc->base.dev;
4134         struct drm_i915_private *dev_priv = to_i915(dev);
4135         int pipe = intel_crtc->pipe;
4136         i915_reg_t reg;
4137         u32 temp;
4138
4139         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4140         reg = FDI_RX_CTL(pipe);
4141         temp = I915_READ(reg);
4142         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4143         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4144         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4145         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4146
4147         POSTING_READ(reg);
4148         udelay(200);
4149
4150         /* Switch from Rawclk to PCDclk */
4151         temp = I915_READ(reg);
4152         I915_WRITE(reg, temp | FDI_PCDCLK);
4153
4154         POSTING_READ(reg);
4155         udelay(200);
4156
4157         /* Enable CPU FDI TX PLL, always on for Ironlake */
4158         reg = FDI_TX_CTL(pipe);
4159         temp = I915_READ(reg);
4160         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4161                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4162
4163                 POSTING_READ(reg);
4164                 udelay(100);
4165         }
4166 }
4167
4168 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4169 {
4170         struct drm_device *dev = intel_crtc->base.dev;
4171         struct drm_i915_private *dev_priv = to_i915(dev);
4172         int pipe = intel_crtc->pipe;
4173         i915_reg_t reg;
4174         u32 temp;
4175
4176         /* Switch from PCDclk to Rawclk */
4177         reg = FDI_RX_CTL(pipe);
4178         temp = I915_READ(reg);
4179         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4180
4181         /* Disable CPU FDI TX PLL */
4182         reg = FDI_TX_CTL(pipe);
4183         temp = I915_READ(reg);
4184         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4185
4186         POSTING_READ(reg);
4187         udelay(100);
4188
4189         reg = FDI_RX_CTL(pipe);
4190         temp = I915_READ(reg);
4191         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4192
4193         /* Wait for the clocks to turn off. */
4194         POSTING_READ(reg);
4195         udelay(100);
4196 }
4197
4198 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4199 {
4200         struct drm_device *dev = crtc->dev;
4201         struct drm_i915_private *dev_priv = to_i915(dev);
4202         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4203         int pipe = intel_crtc->pipe;
4204         i915_reg_t reg;
4205         u32 temp;
4206
4207         /* disable CPU FDI tx and PCH FDI rx */
4208         reg = FDI_TX_CTL(pipe);
4209         temp = I915_READ(reg);
4210         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4211         POSTING_READ(reg);
4212
4213         reg = FDI_RX_CTL(pipe);
4214         temp = I915_READ(reg);
4215         temp &= ~(0x7 << 16);
4216         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4217         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4218
4219         POSTING_READ(reg);
4220         udelay(100);
4221
4222         /* Ironlake workaround, disable clock pointer after downing FDI */
4223         if (HAS_PCH_IBX(dev_priv))
4224                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4225
4226         /* still set train pattern 1 */
4227         reg = FDI_TX_CTL(pipe);
4228         temp = I915_READ(reg);
4229         temp &= ~FDI_LINK_TRAIN_NONE;
4230         temp |= FDI_LINK_TRAIN_PATTERN_1;
4231         I915_WRITE(reg, temp);
4232
4233         reg = FDI_RX_CTL(pipe);
4234         temp = I915_READ(reg);
4235         if (HAS_PCH_CPT(dev_priv)) {
4236                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4237                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4238         } else {
4239                 temp &= ~FDI_LINK_TRAIN_NONE;
4240                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4241         }
4242         /* BPC in FDI rx is consistent with that in PIPECONF */
4243         temp &= ~(0x07 << 16);
4244         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4245         I915_WRITE(reg, temp);
4246
4247         POSTING_READ(reg);
4248         udelay(100);
4249 }
4250
4251 bool intel_has_pending_fb_unpin(struct drm_device *dev)
4252 {
4253         struct intel_crtc *crtc;
4254
4255         /* Note that we don't need to be called with mode_config.lock here
4256          * as our list of CRTC objects is static for the lifetime of the
4257          * device and so cannot disappear as we iterate. Similarly, we can
4258          * happily treat the predicates as racy, atomic checks as userspace
4259          * cannot claim and pin a new fb without at least acquring the
4260          * struct_mutex and so serialising with us.
4261          */
4262         for_each_intel_crtc(dev, crtc) {
4263                 if (atomic_read(&crtc->unpin_work_count) == 0)
4264                         continue;
4265
4266                 if (crtc->flip_work)
4267                         intel_wait_for_vblank(dev, crtc->pipe);
4268
4269                 return true;
4270         }
4271
4272         return false;
4273 }
4274
4275 static void page_flip_completed(struct intel_crtc *intel_crtc)
4276 {
4277         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4278         struct intel_flip_work *work = intel_crtc->flip_work;
4279
4280         intel_crtc->flip_work = NULL;
4281
4282         if (work->event)
4283                 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
4284
4285         drm_crtc_vblank_put(&intel_crtc->base);
4286
4287         wake_up_all(&dev_priv->pending_flip_queue);
4288         queue_work(dev_priv->wq, &work->unpin_work);
4289
4290         trace_i915_flip_complete(intel_crtc->plane,
4291                                  work->pending_flip_obj);
4292 }
4293
4294 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
4295 {
4296         struct drm_device *dev = crtc->dev;
4297         struct drm_i915_private *dev_priv = to_i915(dev);
4298         long ret;
4299
4300         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
4301
4302         ret = wait_event_interruptible_timeout(
4303                                         dev_priv->pending_flip_queue,
4304                                         !intel_crtc_has_pending_flip(crtc),
4305                                         60*HZ);
4306
4307         if (ret < 0)
4308                 return ret;
4309
4310         if (ret == 0) {
4311                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4312                 struct intel_flip_work *work;
4313
4314                 spin_lock_irq(&dev->event_lock);
4315                 work = intel_crtc->flip_work;
4316                 if (work && !is_mmio_work(work)) {
4317                         WARN_ONCE(1, "Removing stuck page flip\n");
4318                         page_flip_completed(intel_crtc);
4319                 }
4320                 spin_unlock_irq(&dev->event_lock);
4321         }
4322
4323         return 0;
4324 }
4325
4326 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4327 {
4328         u32 temp;
4329
4330         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4331
4332         mutex_lock(&dev_priv->sb_lock);
4333
4334         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4335         temp |= SBI_SSCCTL_DISABLE;
4336         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4337
4338         mutex_unlock(&dev_priv->sb_lock);
4339 }
4340
4341 /* Program iCLKIP clock to the desired frequency */
4342 static void lpt_program_iclkip(struct drm_crtc *crtc)
4343 {
4344         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4345         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
4346         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4347         u32 temp;
4348
4349         lpt_disable_iclkip(dev_priv);
4350
4351         /* The iCLK virtual clock root frequency is in MHz,
4352          * but the adjusted_mode->crtc_clock in in KHz. To get the
4353          * divisors, it is necessary to divide one by another, so we
4354          * convert the virtual clock precision to KHz here for higher
4355          * precision.
4356          */
4357         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4358                 u32 iclk_virtual_root_freq = 172800 * 1000;
4359                 u32 iclk_pi_range = 64;
4360                 u32 desired_divisor;
4361
4362                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4363                                                     clock << auxdiv);
4364                 divsel = (desired_divisor / iclk_pi_range) - 2;
4365                 phaseinc = desired_divisor % iclk_pi_range;
4366
4367                 /*
4368                  * Near 20MHz is a corner case which is
4369                  * out of range for the 7-bit divisor
4370                  */
4371                 if (divsel <= 0x7f)
4372                         break;
4373         }
4374
4375         /* This should not happen with any sane values */
4376         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4377                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4378         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4379                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4380
4381         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4382                         clock,
4383                         auxdiv,
4384                         divsel,
4385                         phasedir,
4386                         phaseinc);
4387
4388         mutex_lock(&dev_priv->sb_lock);
4389
4390         /* Program SSCDIVINTPHASE6 */
4391         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4392         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4393         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4394         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4395         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4396         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4397         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4398         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4399
4400         /* Program SSCAUXDIV */
4401         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4402         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4403         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4404         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4405
4406         /* Enable modulator and associated divider */
4407         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4408         temp &= ~SBI_SSCCTL_DISABLE;
4409         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4410
4411         mutex_unlock(&dev_priv->sb_lock);
4412
4413         /* Wait for initialization time */
4414         udelay(24);
4415
4416         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4417 }
4418
4419 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4420 {
4421         u32 divsel, phaseinc, auxdiv;
4422         u32 iclk_virtual_root_freq = 172800 * 1000;
4423         u32 iclk_pi_range = 64;
4424         u32 desired_divisor;
4425         u32 temp;
4426
4427         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4428                 return 0;
4429
4430         mutex_lock(&dev_priv->sb_lock);
4431
4432         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4433         if (temp & SBI_SSCCTL_DISABLE) {
4434                 mutex_unlock(&dev_priv->sb_lock);
4435                 return 0;
4436         }
4437
4438         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4439         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4440                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4441         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4442                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4443
4444         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4445         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4446                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4447
4448         mutex_unlock(&dev_priv->sb_lock);
4449
4450         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4451
4452         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4453                                  desired_divisor << auxdiv);
4454 }
4455
4456 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4457                                                 enum pipe pch_transcoder)
4458 {
4459         struct drm_device *dev = crtc->base.dev;
4460         struct drm_i915_private *dev_priv = to_i915(dev);
4461         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4462
4463         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4464                    I915_READ(HTOTAL(cpu_transcoder)));
4465         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4466                    I915_READ(HBLANK(cpu_transcoder)));
4467         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4468                    I915_READ(HSYNC(cpu_transcoder)));
4469
4470         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4471                    I915_READ(VTOTAL(cpu_transcoder)));
4472         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4473                    I915_READ(VBLANK(cpu_transcoder)));
4474         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4475                    I915_READ(VSYNC(cpu_transcoder)));
4476         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4477                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4478 }
4479
4480 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4481 {
4482         struct drm_i915_private *dev_priv = to_i915(dev);
4483         uint32_t temp;
4484
4485         temp = I915_READ(SOUTH_CHICKEN1);
4486         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4487                 return;
4488
4489         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4490         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4491
4492         temp &= ~FDI_BC_BIFURCATION_SELECT;
4493         if (enable)
4494                 temp |= FDI_BC_BIFURCATION_SELECT;
4495
4496         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4497         I915_WRITE(SOUTH_CHICKEN1, temp);
4498         POSTING_READ(SOUTH_CHICKEN1);
4499 }
4500
4501 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4502 {
4503         struct drm_device *dev = intel_crtc->base.dev;
4504
4505         switch (intel_crtc->pipe) {
4506         case PIPE_A:
4507                 break;
4508         case PIPE_B:
4509                 if (intel_crtc->config->fdi_lanes > 2)
4510                         cpt_set_fdi_bc_bifurcation(dev, false);
4511                 else
4512                         cpt_set_fdi_bc_bifurcation(dev, true);
4513
4514                 break;
4515         case PIPE_C:
4516                 cpt_set_fdi_bc_bifurcation(dev, true);
4517
4518                 break;
4519         default:
4520                 BUG();
4521         }
4522 }
4523
4524 /* Return which DP Port should be selected for Transcoder DP control */
4525 static enum port
4526 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4527 {
4528         struct drm_device *dev = crtc->dev;
4529         struct intel_encoder *encoder;
4530
4531         for_each_encoder_on_crtc(dev, crtc, encoder) {
4532                 if (encoder->type == INTEL_OUTPUT_DP ||
4533                     encoder->type == INTEL_OUTPUT_EDP)
4534                         return enc_to_dig_port(&encoder->base)->port;
4535         }
4536
4537         return -1;
4538 }
4539
4540 /*
4541  * Enable PCH resources required for PCH ports:
4542  *   - PCH PLLs
4543  *   - FDI training & RX/TX
4544  *   - update transcoder timings
4545  *   - DP transcoding bits
4546  *   - transcoder
4547  */
4548 static void ironlake_pch_enable(struct drm_crtc *crtc)
4549 {
4550         struct drm_device *dev = crtc->dev;
4551         struct drm_i915_private *dev_priv = to_i915(dev);
4552         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4553         int pipe = intel_crtc->pipe;
4554         u32 temp;
4555
4556         assert_pch_transcoder_disabled(dev_priv, pipe);
4557
4558         if (IS_IVYBRIDGE(dev_priv))
4559                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4560
4561         /* Write the TU size bits before fdi link training, so that error
4562          * detection works. */
4563         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4564                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4565
4566         /* For PCH output, training FDI link */
4567         dev_priv->display.fdi_link_train(crtc);
4568
4569         /* We need to program the right clock selection before writing the pixel
4570          * mutliplier into the DPLL. */
4571         if (HAS_PCH_CPT(dev_priv)) {
4572                 u32 sel;
4573
4574                 temp = I915_READ(PCH_DPLL_SEL);
4575                 temp |= TRANS_DPLL_ENABLE(pipe);
4576                 sel = TRANS_DPLLB_SEL(pipe);
4577                 if (intel_crtc->config->shared_dpll ==
4578                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4579                         temp |= sel;
4580                 else
4581                         temp &= ~sel;
4582                 I915_WRITE(PCH_DPLL_SEL, temp);
4583         }
4584
4585         /* XXX: pch pll's can be enabled any time before we enable the PCH
4586          * transcoder, and we actually should do this to not upset any PCH
4587          * transcoder that already use the clock when we share it.
4588          *
4589          * Note that enable_shared_dpll tries to do the right thing, but
4590          * get_shared_dpll unconditionally resets the pll - we need that to have
4591          * the right LVDS enable sequence. */
4592         intel_enable_shared_dpll(intel_crtc);
4593
4594         /* set transcoder timing, panel must allow it */
4595         assert_panel_unlocked(dev_priv, pipe);
4596         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4597
4598         intel_fdi_normal_train(crtc);
4599
4600         /* For PCH DP, enable TRANS_DP_CTL */
4601         if (HAS_PCH_CPT(dev_priv) &&
4602             intel_crtc_has_dp_encoder(intel_crtc->config)) {
4603                 const struct drm_display_mode *adjusted_mode =
4604                         &intel_crtc->config->base.adjusted_mode;
4605                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4606                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4607                 temp = I915_READ(reg);
4608                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4609                           TRANS_DP_SYNC_MASK |
4610                           TRANS_DP_BPC_MASK);
4611                 temp |= TRANS_DP_OUTPUT_ENABLE;
4612                 temp |= bpc << 9; /* same format but at 11:9 */
4613
4614                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4615                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4616                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4617                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4618
4619                 switch (intel_trans_dp_port_sel(crtc)) {
4620                 case PORT_B:
4621                         temp |= TRANS_DP_PORT_SEL_B;
4622                         break;
4623                 case PORT_C:
4624                         temp |= TRANS_DP_PORT_SEL_C;
4625                         break;
4626                 case PORT_D:
4627                         temp |= TRANS_DP_PORT_SEL_D;
4628                         break;
4629                 default:
4630                         BUG();
4631                 }
4632
4633                 I915_WRITE(reg, temp);
4634         }
4635
4636         ironlake_enable_pch_transcoder(dev_priv, pipe);
4637 }
4638
4639 static void lpt_pch_enable(struct drm_crtc *crtc)
4640 {
4641         struct drm_device *dev = crtc->dev;
4642         struct drm_i915_private *dev_priv = to_i915(dev);
4643         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4644         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4645
4646         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4647
4648         lpt_program_iclkip(crtc);
4649
4650         /* Set transcoder timing. */
4651         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4652
4653         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4654 }
4655
4656 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4657 {
4658         struct drm_i915_private *dev_priv = to_i915(dev);
4659         i915_reg_t dslreg = PIPEDSL(pipe);
4660         u32 temp;
4661
4662         temp = I915_READ(dslreg);
4663         udelay(500);
4664         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4665                 if (wait_for(I915_READ(dslreg) != temp, 5))
4666                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4667         }
4668 }
4669
4670 static int
4671 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4672                   unsigned scaler_user, int *scaler_id, unsigned int rotation,
4673                   int src_w, int src_h, int dst_w, int dst_h)
4674 {
4675         struct intel_crtc_scaler_state *scaler_state =
4676                 &crtc_state->scaler_state;
4677         struct intel_crtc *intel_crtc =
4678                 to_intel_crtc(crtc_state->base.crtc);
4679         int need_scaling;
4680
4681         need_scaling = intel_rotation_90_or_270(rotation) ?
4682                 (src_h != dst_w || src_w != dst_h):
4683                 (src_w != dst_w || src_h != dst_h);
4684
4685         /*
4686          * if plane is being disabled or scaler is no more required or force detach
4687          *  - free scaler binded to this plane/crtc
4688          *  - in order to do this, update crtc->scaler_usage
4689          *
4690          * Here scaler state in crtc_state is set free so that
4691          * scaler can be assigned to other user. Actual register
4692          * update to free the scaler is done in plane/panel-fit programming.
4693          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4694          */
4695         if (force_detach || !need_scaling) {
4696                 if (*scaler_id >= 0) {
4697                         scaler_state->scaler_users &= ~(1 << scaler_user);
4698                         scaler_state->scalers[*scaler_id].in_use = 0;
4699
4700                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4701                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4702                                 intel_crtc->pipe, scaler_user, *scaler_id,
4703                                 scaler_state->scaler_users);
4704                         *scaler_id = -1;
4705                 }
4706                 return 0;
4707         }
4708
4709         /* range checks */
4710         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4711                 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4712
4713                 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4714                 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4715                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4716                         "size is out of scaler range\n",
4717                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4718                 return -EINVAL;
4719         }
4720
4721         /* mark this plane as a scaler user in crtc_state */
4722         scaler_state->scaler_users |= (1 << scaler_user);
4723         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4724                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4725                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4726                 scaler_state->scaler_users);
4727
4728         return 0;
4729 }
4730
4731 /**
4732  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4733  *
4734  * @state: crtc's scaler state
4735  *
4736  * Return
4737  *     0 - scaler_usage updated successfully
4738  *    error - requested scaling cannot be supported or other error condition
4739  */
4740 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4741 {
4742         struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4743         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4744
4745         DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4746                       intel_crtc->base.base.id, intel_crtc->base.name,
4747                       intel_crtc->pipe, SKL_CRTC_INDEX);
4748
4749         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4750                 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4751                 state->pipe_src_w, state->pipe_src_h,
4752                 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4753 }
4754
4755 /**
4756  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4757  *
4758  * @state: crtc's scaler state
4759  * @plane_state: atomic plane state to update
4760  *
4761  * Return
4762  *     0 - scaler_usage updated successfully
4763  *    error - requested scaling cannot be supported or other error condition
4764  */
4765 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4766                                    struct intel_plane_state *plane_state)
4767 {
4768
4769         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4770         struct intel_plane *intel_plane =
4771                 to_intel_plane(plane_state->base.plane);
4772         struct drm_framebuffer *fb = plane_state->base.fb;
4773         int ret;
4774
4775         bool force_detach = !fb || !plane_state->base.visible;
4776
4777         DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4778                       intel_plane->base.base.id, intel_plane->base.name,
4779                       intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4780
4781         ret = skl_update_scaler(crtc_state, force_detach,
4782                                 drm_plane_index(&intel_plane->base),
4783                                 &plane_state->scaler_id,
4784                                 plane_state->base.rotation,
4785                                 drm_rect_width(&plane_state->base.src) >> 16,
4786                                 drm_rect_height(&plane_state->base.src) >> 16,
4787                                 drm_rect_width(&plane_state->base.dst),
4788                                 drm_rect_height(&plane_state->base.dst));
4789
4790         if (ret || plane_state->scaler_id < 0)
4791                 return ret;
4792
4793         /* check colorkey */
4794         if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4795                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4796                               intel_plane->base.base.id,
4797                               intel_plane->base.name);
4798                 return -EINVAL;
4799         }
4800
4801         /* Check src format */
4802         switch (fb->pixel_format) {
4803         case DRM_FORMAT_RGB565:
4804         case DRM_FORMAT_XBGR8888:
4805         case DRM_FORMAT_XRGB8888:
4806         case DRM_FORMAT_ABGR8888:
4807         case DRM_FORMAT_ARGB8888:
4808         case DRM_FORMAT_XRGB2101010:
4809         case DRM_FORMAT_XBGR2101010:
4810         case DRM_FORMAT_YUYV:
4811         case DRM_FORMAT_YVYU:
4812         case DRM_FORMAT_UYVY:
4813         case DRM_FORMAT_VYUY:
4814                 break;
4815         default:
4816                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4817                               intel_plane->base.base.id, intel_plane->base.name,
4818                               fb->base.id, fb->pixel_format);
4819                 return -EINVAL;
4820         }
4821
4822         return 0;
4823 }
4824
4825 static void skylake_scaler_disable(struct intel_crtc *crtc)
4826 {
4827         int i;
4828
4829         for (i = 0; i < crtc->num_scalers; i++)
4830                 skl_detach_scaler(crtc, i);
4831 }
4832
4833 static void skylake_pfit_enable(struct intel_crtc *crtc)
4834 {
4835         struct drm_device *dev = crtc->base.dev;
4836         struct drm_i915_private *dev_priv = to_i915(dev);
4837         int pipe = crtc->pipe;
4838         struct intel_crtc_scaler_state *scaler_state =
4839                 &crtc->config->scaler_state;
4840
4841         DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4842
4843         if (crtc->config->pch_pfit.enabled) {
4844                 int id;
4845
4846                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4847                         DRM_ERROR("Requesting pfit without getting a scaler first\n");
4848                         return;
4849                 }
4850
4851                 id = scaler_state->scaler_id;
4852                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4853                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4854                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4855                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4856
4857                 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4858         }
4859 }
4860
4861 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4862 {
4863         struct drm_device *dev = crtc->base.dev;
4864         struct drm_i915_private *dev_priv = to_i915(dev);
4865         int pipe = crtc->pipe;
4866
4867         if (crtc->config->pch_pfit.enabled) {
4868                 /* Force use of hard-coded filter coefficients
4869                  * as some pre-programmed values are broken,
4870                  * e.g. x201.
4871                  */
4872                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
4873                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4874                                                  PF_PIPE_SEL_IVB(pipe));
4875                 else
4876                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4877                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4878                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4879         }
4880 }
4881
4882 void hsw_enable_ips(struct intel_crtc *crtc)
4883 {
4884         struct drm_device *dev = crtc->base.dev;
4885         struct drm_i915_private *dev_priv = to_i915(dev);
4886
4887         if (!crtc->config->ips_enabled)
4888                 return;
4889
4890         /*
4891          * We can only enable IPS after we enable a plane and wait for a vblank
4892          * This function is called from post_plane_update, which is run after
4893          * a vblank wait.
4894          */
4895
4896         assert_plane_enabled(dev_priv, crtc->plane);
4897         if (IS_BROADWELL(dev_priv)) {
4898                 mutex_lock(&dev_priv->rps.hw_lock);
4899                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4900                 mutex_unlock(&dev_priv->rps.hw_lock);
4901                 /* Quoting Art Runyan: "its not safe to expect any particular
4902                  * value in IPS_CTL bit 31 after enabling IPS through the
4903                  * mailbox." Moreover, the mailbox may return a bogus state,
4904                  * so we need to just enable it and continue on.
4905                  */
4906         } else {
4907                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4908                 /* The bit only becomes 1 in the next vblank, so this wait here
4909                  * is essentially intel_wait_for_vblank. If we don't have this
4910                  * and don't wait for vblanks until the end of crtc_enable, then
4911                  * the HW state readout code will complain that the expected
4912                  * IPS_CTL value is not the one we read. */
4913                 if (intel_wait_for_register(dev_priv,
4914                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4915                                             50))
4916                         DRM_ERROR("Timed out waiting for IPS enable\n");
4917         }
4918 }
4919
4920 void hsw_disable_ips(struct intel_crtc *crtc)
4921 {
4922         struct drm_device *dev = crtc->base.dev;
4923         struct drm_i915_private *dev_priv = to_i915(dev);
4924
4925         if (!crtc->config->ips_enabled)
4926                 return;
4927
4928         assert_plane_enabled(dev_priv, crtc->plane);
4929         if (IS_BROADWELL(dev_priv)) {
4930                 mutex_lock(&dev_priv->rps.hw_lock);
4931                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4932                 mutex_unlock(&dev_priv->rps.hw_lock);
4933                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4934                 if (intel_wait_for_register(dev_priv,
4935                                             IPS_CTL, IPS_ENABLE, 0,
4936                                             42))
4937                         DRM_ERROR("Timed out waiting for IPS disable\n");
4938         } else {
4939                 I915_WRITE(IPS_CTL, 0);
4940                 POSTING_READ(IPS_CTL);
4941         }
4942
4943         /* We need to wait for a vblank before we can disable the plane. */
4944         intel_wait_for_vblank(dev, crtc->pipe);
4945 }
4946
4947 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4948 {
4949         if (intel_crtc->overlay) {
4950                 struct drm_device *dev = intel_crtc->base.dev;
4951                 struct drm_i915_private *dev_priv = to_i915(dev);
4952
4953                 mutex_lock(&dev->struct_mutex);
4954                 dev_priv->mm.interruptible = false;
4955                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4956                 dev_priv->mm.interruptible = true;
4957                 mutex_unlock(&dev->struct_mutex);
4958         }
4959
4960         /* Let userspace switch the overlay on again. In most cases userspace
4961          * has to recompute where to put it anyway.
4962          */
4963 }
4964
4965 /**
4966  * intel_post_enable_primary - Perform operations after enabling primary plane
4967  * @crtc: the CRTC whose primary plane was just enabled
4968  *
4969  * Performs potentially sleeping operations that must be done after the primary
4970  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4971  * called due to an explicit primary plane update, or due to an implicit
4972  * re-enable that is caused when a sprite plane is updated to no longer
4973  * completely hide the primary plane.
4974  */
4975 static void
4976 intel_post_enable_primary(struct drm_crtc *crtc)
4977 {
4978         struct drm_device *dev = crtc->dev;
4979         struct drm_i915_private *dev_priv = to_i915(dev);
4980         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4981         int pipe = intel_crtc->pipe;
4982
4983         /*
4984          * FIXME IPS should be fine as long as one plane is
4985          * enabled, but in practice it seems to have problems
4986          * when going from primary only to sprite only and vice
4987          * versa.
4988          */
4989         hsw_enable_ips(intel_crtc);
4990
4991         /*
4992          * Gen2 reports pipe underruns whenever all planes are disabled.
4993          * So don't enable underrun reporting before at least some planes
4994          * are enabled.
4995          * FIXME: Need to fix the logic to work when we turn off all planes
4996          * but leave the pipe running.
4997          */
4998         if (IS_GEN2(dev_priv))
4999                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5000
5001         /* Underruns don't always raise interrupts, so check manually. */
5002         intel_check_cpu_fifo_underruns(dev_priv);
5003         intel_check_pch_fifo_underruns(dev_priv);
5004 }
5005
5006 /* FIXME move all this to pre_plane_update() with proper state tracking */
5007 static void
5008 intel_pre_disable_primary(struct drm_crtc *crtc)
5009 {
5010         struct drm_device *dev = crtc->dev;
5011         struct drm_i915_private *dev_priv = to_i915(dev);
5012         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5013         int pipe = intel_crtc->pipe;
5014
5015         /*
5016          * Gen2 reports pipe underruns whenever all planes are disabled.
5017          * So diasble underrun reporting before all the planes get disabled.
5018          * FIXME: Need to fix the logic to work when we turn off all planes
5019          * but leave the pipe running.
5020          */
5021         if (IS_GEN2(dev_priv))
5022                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5023
5024         /*
5025          * FIXME IPS should be fine as long as one plane is
5026          * enabled, but in practice it seems to have problems
5027          * when going from primary only to sprite only and vice
5028          * versa.
5029          */
5030         hsw_disable_ips(intel_crtc);
5031 }
5032
5033 /* FIXME get rid of this and use pre_plane_update */
5034 static void
5035 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5036 {
5037         struct drm_device *dev = crtc->dev;
5038         struct drm_i915_private *dev_priv = to_i915(dev);
5039         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5040         int pipe = intel_crtc->pipe;
5041
5042         intel_pre_disable_primary(crtc);
5043
5044         /*
5045          * Vblank time updates from the shadow to live plane control register
5046          * are blocked if the memory self-refresh mode is active at that
5047          * moment. So to make sure the plane gets truly disabled, disable
5048          * first the self-refresh mode. The self-refresh enable bit in turn
5049          * will be checked/applied by the HW only at the next frame start
5050          * event which is after the vblank start event, so we need to have a
5051          * wait-for-vblank between disabling the plane and the pipe.
5052          */
5053         if (HAS_GMCH_DISPLAY(dev_priv)) {
5054                 intel_set_memory_cxsr(dev_priv, false);
5055                 dev_priv->wm.vlv.cxsr = false;
5056                 intel_wait_for_vblank(dev, pipe);
5057         }
5058 }
5059
5060 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5061 {
5062         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5063         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5064         struct intel_crtc_state *pipe_config =
5065                 to_intel_crtc_state(crtc->base.state);
5066         struct drm_plane *primary = crtc->base.primary;
5067         struct drm_plane_state *old_pri_state =
5068                 drm_atomic_get_existing_plane_state(old_state, primary);
5069
5070         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5071
5072         crtc->wm.cxsr_allowed = true;
5073
5074         if (pipe_config->update_wm_post && pipe_config->base.active)
5075                 intel_update_watermarks(&crtc->base);
5076
5077         if (old_pri_state) {
5078                 struct intel_plane_state *primary_state =
5079                         to_intel_plane_state(primary->state);
5080                 struct intel_plane_state *old_primary_state =
5081                         to_intel_plane_state(old_pri_state);
5082
5083                 intel_fbc_post_update(crtc);
5084
5085                 if (primary_state->base.visible &&
5086                     (needs_modeset(&pipe_config->base) ||
5087                      !old_primary_state->base.visible))
5088                         intel_post_enable_primary(&crtc->base);
5089         }
5090 }
5091
5092 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
5093 {
5094         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5095         struct drm_device *dev = crtc->base.dev;
5096         struct drm_i915_private *dev_priv = to_i915(dev);
5097         struct intel_crtc_state *pipe_config =
5098                 to_intel_crtc_state(crtc->base.state);
5099         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5100         struct drm_plane *primary = crtc->base.primary;
5101         struct drm_plane_state *old_pri_state =
5102                 drm_atomic_get_existing_plane_state(old_state, primary);
5103         bool modeset = needs_modeset(&pipe_config->base);
5104
5105         if (old_pri_state) {
5106                 struct intel_plane_state *primary_state =
5107                         to_intel_plane_state(primary->state);
5108                 struct intel_plane_state *old_primary_state =
5109                         to_intel_plane_state(old_pri_state);
5110
5111                 intel_fbc_pre_update(crtc, pipe_config, primary_state);
5112
5113                 if (old_primary_state->base.visible &&
5114                     (modeset || !primary_state->base.visible))
5115                         intel_pre_disable_primary(&crtc->base);
5116         }
5117
5118         if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
5119                 crtc->wm.cxsr_allowed = false;
5120
5121                 /*
5122                  * Vblank time updates from the shadow to live plane control register
5123                  * are blocked if the memory self-refresh mode is active at that
5124                  * moment. So to make sure the plane gets truly disabled, disable
5125                  * first the self-refresh mode. The self-refresh enable bit in turn
5126                  * will be checked/applied by the HW only at the next frame start
5127                  * event which is after the vblank start event, so we need to have a
5128                  * wait-for-vblank between disabling the plane and the pipe.
5129                  */
5130                 if (old_crtc_state->base.active) {
5131                         intel_set_memory_cxsr(dev_priv, false);
5132                         dev_priv->wm.vlv.cxsr = false;
5133                         intel_wait_for_vblank(dev, crtc->pipe);
5134                 }
5135         }
5136
5137         /*
5138          * IVB workaround: must disable low power watermarks for at least
5139          * one frame before enabling scaling.  LP watermarks can be re-enabled
5140          * when scaling is disabled.
5141          *
5142          * WaCxSRDisabledForSpriteScaling:ivb
5143          */
5144         if (pipe_config->disable_lp_wm) {
5145                 ilk_disable_lp_wm(dev);
5146                 intel_wait_for_vblank(dev, crtc->pipe);
5147         }
5148
5149         /*
5150          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5151          * watermark programming here.
5152          */
5153         if (needs_modeset(&pipe_config->base))
5154                 return;
5155
5156         /*
5157          * For platforms that support atomic watermarks, program the
5158          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5159          * will be the intermediate values that are safe for both pre- and
5160          * post- vblank; when vblank happens, the 'active' values will be set
5161          * to the final 'target' values and we'll do this again to get the
5162          * optimal watermarks.  For gen9+ platforms, the values we program here
5163          * will be the final target values which will get automatically latched
5164          * at vblank time; no further programming will be necessary.
5165          *
5166          * If a platform hasn't been transitioned to atomic watermarks yet,
5167          * we'll continue to update watermarks the old way, if flags tell
5168          * us to.
5169          */
5170         if (dev_priv->display.initial_watermarks != NULL)
5171                 dev_priv->display.initial_watermarks(pipe_config);
5172         else if (pipe_config->update_wm_pre)
5173                 intel_update_watermarks(&crtc->base);
5174 }
5175
5176 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
5177 {
5178         struct drm_device *dev = crtc->dev;
5179         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5180         struct drm_plane *p;
5181         int pipe = intel_crtc->pipe;
5182
5183         intel_crtc_dpms_overlay_disable(intel_crtc);
5184
5185         drm_for_each_plane_mask(p, dev, plane_mask)
5186                 to_intel_plane(p)->disable_plane(p, crtc);
5187
5188         /*
5189          * FIXME: Once we grow proper nuclear flip support out of this we need
5190          * to compute the mask of flip planes precisely. For the time being
5191          * consider this a flip to a NULL plane.
5192          */
5193         intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5194 }
5195
5196 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5197                                           struct intel_crtc_state *crtc_state,
5198                                           struct drm_atomic_state *old_state)
5199 {
5200         struct drm_connector_state *old_conn_state;
5201         struct drm_connector *conn;
5202         int i;
5203
5204         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5205                 struct drm_connector_state *conn_state = conn->state;
5206                 struct intel_encoder *encoder =
5207                         to_intel_encoder(conn_state->best_encoder);
5208
5209                 if (conn_state->crtc != crtc)
5210                         continue;
5211
5212                 if (encoder->pre_pll_enable)
5213                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5214         }
5215 }
5216
5217 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5218                                       struct intel_crtc_state *crtc_state,
5219                                       struct drm_atomic_state *old_state)
5220 {
5221         struct drm_connector_state *old_conn_state;
5222         struct drm_connector *conn;
5223         int i;
5224
5225         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5226                 struct drm_connector_state *conn_state = conn->state;
5227                 struct intel_encoder *encoder =
5228                         to_intel_encoder(conn_state->best_encoder);
5229
5230                 if (conn_state->crtc != crtc)
5231                         continue;
5232
5233                 if (encoder->pre_enable)
5234                         encoder->pre_enable(encoder, crtc_state, conn_state);
5235         }
5236 }
5237
5238 static void intel_encoders_enable(struct drm_crtc *crtc,
5239                                   struct intel_crtc_state *crtc_state,
5240                                   struct drm_atomic_state *old_state)
5241 {
5242         struct drm_connector_state *old_conn_state;
5243         struct drm_connector *conn;
5244         int i;
5245
5246         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5247                 struct drm_connector_state *conn_state = conn->state;
5248                 struct intel_encoder *encoder =
5249                         to_intel_encoder(conn_state->best_encoder);
5250
5251                 if (conn_state->crtc != crtc)
5252                         continue;
5253
5254                 encoder->enable(encoder, crtc_state, conn_state);
5255                 intel_opregion_notify_encoder(encoder, true);
5256         }
5257 }
5258
5259 static void intel_encoders_disable(struct drm_crtc *crtc,
5260                                    struct intel_crtc_state *old_crtc_state,
5261                                    struct drm_atomic_state *old_state)
5262 {
5263         struct drm_connector_state *old_conn_state;
5264         struct drm_connector *conn;
5265         int i;
5266
5267         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5268                 struct intel_encoder *encoder =
5269                         to_intel_encoder(old_conn_state->best_encoder);
5270
5271                 if (old_conn_state->crtc != crtc)
5272                         continue;
5273
5274                 intel_opregion_notify_encoder(encoder, false);
5275                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5276         }
5277 }
5278
5279 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5280                                         struct intel_crtc_state *old_crtc_state,
5281                                         struct drm_atomic_state *old_state)
5282 {
5283         struct drm_connector_state *old_conn_state;
5284         struct drm_connector *conn;
5285         int i;
5286
5287         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5288                 struct intel_encoder *encoder =
5289                         to_intel_encoder(old_conn_state->best_encoder);
5290
5291                 if (old_conn_state->crtc != crtc)
5292                         continue;
5293
5294                 if (encoder->post_disable)
5295                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5296         }
5297 }
5298
5299 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5300                                             struct intel_crtc_state *old_crtc_state,
5301                                             struct drm_atomic_state *old_state)
5302 {
5303         struct drm_connector_state *old_conn_state;
5304         struct drm_connector *conn;
5305         int i;
5306
5307         for_each_connector_in_state(old_state, conn, old_conn_state, i) {
5308                 struct intel_encoder *encoder =
5309                         to_intel_encoder(old_conn_state->best_encoder);
5310
5311                 if (old_conn_state->crtc != crtc)
5312                         continue;
5313
5314                 if (encoder->post_pll_disable)
5315                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5316         }
5317 }
5318
5319 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5320                                  struct drm_atomic_state *old_state)
5321 {
5322         struct drm_crtc *crtc = pipe_config->base.crtc;
5323         struct drm_device *dev = crtc->dev;
5324         struct drm_i915_private *dev_priv = to_i915(dev);
5325         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5326         int pipe = intel_crtc->pipe;
5327
5328         if (WARN_ON(intel_crtc->active))
5329                 return;
5330
5331         /*
5332          * Sometimes spurious CPU pipe underruns happen during FDI
5333          * training, at least with VGA+HDMI cloning. Suppress them.
5334          *
5335          * On ILK we get an occasional spurious CPU pipe underruns
5336          * between eDP port A enable and vdd enable. Also PCH port
5337          * enable seems to result in the occasional CPU pipe underrun.
5338          *
5339          * Spurious PCH underruns also occur during PCH enabling.
5340          */
5341         if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
5342                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5343         if (intel_crtc->config->has_pch_encoder)
5344                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5345
5346         if (intel_crtc->config->has_pch_encoder)
5347                 intel_prepare_shared_dpll(intel_crtc);
5348
5349         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5350                 intel_dp_set_m_n(intel_crtc, M1_N1);
5351
5352         intel_set_pipe_timings(intel_crtc);
5353         intel_set_pipe_src_size(intel_crtc);
5354
5355         if (intel_crtc->config->has_pch_encoder) {
5356                 intel_cpu_transcoder_set_m_n(intel_crtc,
5357                                      &intel_crtc->config->fdi_m_n, NULL);
5358         }
5359
5360         ironlake_set_pipeconf(crtc);
5361
5362         intel_crtc->active = true;
5363
5364         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5365
5366         if (intel_crtc->config->has_pch_encoder) {
5367                 /* Note: FDI PLL enabling _must_ be done before we enable the
5368                  * cpu pipes, hence this is separate from all the other fdi/pch
5369                  * enabling. */
5370                 ironlake_fdi_pll_enable(intel_crtc);
5371         } else {
5372                 assert_fdi_tx_disabled(dev_priv, pipe);
5373                 assert_fdi_rx_disabled(dev_priv, pipe);
5374         }
5375
5376         ironlake_pfit_enable(intel_crtc);
5377
5378         /*
5379          * On ILK+ LUT must be loaded before the pipe is running but with
5380          * clocks enabled
5381          */
5382         intel_color_load_luts(&pipe_config->base);
5383
5384         if (dev_priv->display.initial_watermarks != NULL)
5385                 dev_priv->display.initial_watermarks(intel_crtc->config);
5386         intel_enable_pipe(intel_crtc);
5387
5388         if (intel_crtc->config->has_pch_encoder)
5389                 ironlake_pch_enable(crtc);
5390
5391         assert_vblank_disabled(crtc);
5392         drm_crtc_vblank_on(crtc);
5393
5394         intel_encoders_enable(crtc, pipe_config, old_state);
5395
5396         if (HAS_PCH_CPT(dev_priv))
5397                 cpt_verify_modeset(dev, intel_crtc->pipe);
5398
5399         /* Must wait for vblank to avoid spurious PCH FIFO underruns */
5400         if (intel_crtc->config->has_pch_encoder)
5401                 intel_wait_for_vblank(dev, pipe);
5402         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5403         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5404 }
5405
5406 /* IPS only exists on ULT machines and is tied to pipe A. */
5407 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5408 {
5409         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5410 }
5411
5412 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5413                                 struct drm_atomic_state *old_state)
5414 {
5415         struct drm_crtc *crtc = pipe_config->base.crtc;
5416         struct drm_device *dev = crtc->dev;
5417         struct drm_i915_private *dev_priv = to_i915(dev);
5418         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5419         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5420         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5421
5422         if (WARN_ON(intel_crtc->active))
5423                 return;
5424
5425         if (intel_crtc->config->has_pch_encoder)
5426                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5427                                                       false);
5428
5429         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5430
5431         if (intel_crtc->config->shared_dpll)
5432                 intel_enable_shared_dpll(intel_crtc);
5433
5434         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5435                 intel_dp_set_m_n(intel_crtc, M1_N1);
5436
5437         if (!transcoder_is_dsi(cpu_transcoder))
5438                 intel_set_pipe_timings(intel_crtc);
5439
5440         intel_set_pipe_src_size(intel_crtc);
5441
5442         if (cpu_transcoder != TRANSCODER_EDP &&
5443             !transcoder_is_dsi(cpu_transcoder)) {
5444                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5445                            intel_crtc->config->pixel_multiplier - 1);
5446         }
5447
5448         if (intel_crtc->config->has_pch_encoder) {
5449                 intel_cpu_transcoder_set_m_n(intel_crtc,
5450                                      &intel_crtc->config->fdi_m_n, NULL);
5451         }
5452
5453         if (!transcoder_is_dsi(cpu_transcoder))
5454                 haswell_set_pipeconf(crtc);
5455
5456         haswell_set_pipemisc(crtc);
5457
5458         intel_color_set_csc(&pipe_config->base);
5459
5460         intel_crtc->active = true;
5461
5462         if (intel_crtc->config->has_pch_encoder)
5463                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5464         else
5465                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5466
5467         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5468
5469         if (intel_crtc->config->has_pch_encoder)
5470                 dev_priv->display.fdi_link_train(crtc);
5471
5472         if (!transcoder_is_dsi(cpu_transcoder))
5473                 intel_ddi_enable_pipe_clock(intel_crtc);
5474
5475         if (INTEL_INFO(dev)->gen >= 9)
5476                 skylake_pfit_enable(intel_crtc);
5477         else
5478                 ironlake_pfit_enable(intel_crtc);
5479
5480         /*
5481          * On ILK+ LUT must be loaded before the pipe is running but with
5482          * clocks enabled
5483          */
5484         intel_color_load_luts(&pipe_config->base);
5485
5486         intel_ddi_set_pipe_settings(crtc);
5487         if (!transcoder_is_dsi(cpu_transcoder))
5488                 intel_ddi_enable_transcoder_func(crtc);
5489
5490         if (dev_priv->display.initial_watermarks != NULL)
5491                 dev_priv->display.initial_watermarks(pipe_config);
5492         else
5493                 intel_update_watermarks(crtc);
5494
5495         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5496         if (!transcoder_is_dsi(cpu_transcoder))
5497                 intel_enable_pipe(intel_crtc);
5498
5499         if (intel_crtc->config->has_pch_encoder)
5500                 lpt_pch_enable(crtc);
5501
5502         if (intel_crtc->config->dp_encoder_is_mst)
5503                 intel_ddi_set_vc_payload_alloc(crtc, true);
5504
5505         assert_vblank_disabled(crtc);
5506         drm_crtc_vblank_on(crtc);
5507
5508         intel_encoders_enable(crtc, pipe_config, old_state);
5509
5510         if (intel_crtc->config->has_pch_encoder) {
5511                 intel_wait_for_vblank(dev, pipe);
5512                 intel_wait_for_vblank(dev, pipe);
5513                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5514                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5515                                                       true);
5516         }
5517
5518         /* If we change the relative order between pipe/planes enabling, we need
5519          * to change the workaround. */
5520         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5521         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5522                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5523                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5524         }
5525 }
5526
5527 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5528 {
5529         struct drm_device *dev = crtc->base.dev;
5530         struct drm_i915_private *dev_priv = to_i915(dev);
5531         int pipe = crtc->pipe;
5532
5533         /* To avoid upsetting the power well on haswell only disable the pfit if
5534          * it's in use. The hw state code will make sure we get this right. */
5535         if (force || crtc->config->pch_pfit.enabled) {
5536                 I915_WRITE(PF_CTL(pipe), 0);
5537                 I915_WRITE(PF_WIN_POS(pipe), 0);
5538                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5539         }
5540 }
5541
5542 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5543                                   struct drm_atomic_state *old_state)
5544 {
5545         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5546         struct drm_device *dev = crtc->dev;
5547         struct drm_i915_private *dev_priv = to_i915(dev);
5548         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5549         int pipe = intel_crtc->pipe;
5550
5551         /*
5552          * Sometimes spurious CPU pipe underruns happen when the
5553          * pipe is already disabled, but FDI RX/TX is still enabled.
5554          * Happens at least with VGA+HDMI cloning. Suppress them.
5555          */
5556         if (intel_crtc->config->has_pch_encoder) {
5557                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5558                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5559         }
5560
5561         intel_encoders_disable(crtc, old_crtc_state, old_state);
5562
5563         drm_crtc_vblank_off(crtc);
5564         assert_vblank_disabled(crtc);
5565
5566         intel_disable_pipe(intel_crtc);
5567
5568         ironlake_pfit_disable(intel_crtc, false);
5569
5570         if (intel_crtc->config->has_pch_encoder)
5571                 ironlake_fdi_disable(crtc);
5572
5573         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5574
5575         if (intel_crtc->config->has_pch_encoder) {
5576                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5577
5578                 if (HAS_PCH_CPT(dev_priv)) {
5579                         i915_reg_t reg;
5580                         u32 temp;
5581
5582                         /* disable TRANS_DP_CTL */
5583                         reg = TRANS_DP_CTL(pipe);
5584                         temp = I915_READ(reg);
5585                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5586                                   TRANS_DP_PORT_SEL_MASK);
5587                         temp |= TRANS_DP_PORT_SEL_NONE;
5588                         I915_WRITE(reg, temp);
5589
5590                         /* disable DPLL_SEL */
5591                         temp = I915_READ(PCH_DPLL_SEL);
5592                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5593                         I915_WRITE(PCH_DPLL_SEL, temp);
5594                 }
5595
5596                 ironlake_fdi_pll_disable(intel_crtc);
5597         }
5598
5599         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5600         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5601 }
5602
5603 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5604                                  struct drm_atomic_state *old_state)
5605 {
5606         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5607         struct drm_device *dev = crtc->dev;
5608         struct drm_i915_private *dev_priv = to_i915(dev);
5609         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5610         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5611
5612         if (intel_crtc->config->has_pch_encoder)
5613                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5614                                                       false);
5615
5616         intel_encoders_disable(crtc, old_crtc_state, old_state);
5617
5618         drm_crtc_vblank_off(crtc);
5619         assert_vblank_disabled(crtc);
5620
5621         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5622         if (!transcoder_is_dsi(cpu_transcoder))
5623                 intel_disable_pipe(intel_crtc);
5624
5625         if (intel_crtc->config->dp_encoder_is_mst)
5626                 intel_ddi_set_vc_payload_alloc(crtc, false);
5627
5628         if (!transcoder_is_dsi(cpu_transcoder))
5629                 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5630
5631         if (INTEL_INFO(dev)->gen >= 9)
5632                 skylake_scaler_disable(intel_crtc);
5633         else
5634                 ironlake_pfit_disable(intel_crtc, false);
5635
5636         if (!transcoder_is_dsi(cpu_transcoder))
5637                 intel_ddi_disable_pipe_clock(intel_crtc);
5638
5639         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5640
5641         if (old_crtc_state->has_pch_encoder)
5642                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5643                                                       true);
5644 }
5645
5646 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5647 {
5648         struct drm_device *dev = crtc->base.dev;
5649         struct drm_i915_private *dev_priv = to_i915(dev);
5650         struct intel_crtc_state *pipe_config = crtc->config;
5651
5652         if (!pipe_config->gmch_pfit.control)
5653                 return;
5654
5655         /*
5656          * The panel fitter should only be adjusted whilst the pipe is disabled,
5657          * according to register description and PRM.
5658          */
5659         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5660         assert_pipe_disabled(dev_priv, crtc->pipe);
5661
5662         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5663         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5664
5665         /* Border color in case we don't scale up to the full screen. Black by
5666          * default, change to something else for debugging. */
5667         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5668 }
5669
5670 static enum intel_display_power_domain port_to_power_domain(enum port port)
5671 {
5672         switch (port) {
5673         case PORT_A:
5674                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5675         case PORT_B:
5676                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5677         case PORT_C:
5678                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5679         case PORT_D:
5680                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5681         case PORT_E:
5682                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5683         default:
5684                 MISSING_CASE(port);
5685                 return POWER_DOMAIN_PORT_OTHER;
5686         }
5687 }
5688
5689 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5690 {
5691         switch (port) {
5692         case PORT_A:
5693                 return POWER_DOMAIN_AUX_A;
5694         case PORT_B:
5695                 return POWER_DOMAIN_AUX_B;
5696         case PORT_C:
5697                 return POWER_DOMAIN_AUX_C;
5698         case PORT_D:
5699                 return POWER_DOMAIN_AUX_D;
5700         case PORT_E:
5701                 /* FIXME: Check VBT for actual wiring of PORT E */
5702                 return POWER_DOMAIN_AUX_D;
5703         default:
5704                 MISSING_CASE(port);
5705                 return POWER_DOMAIN_AUX_A;
5706         }
5707 }
5708
5709 enum intel_display_power_domain
5710 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5711 {
5712         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5713         struct intel_digital_port *intel_dig_port;
5714
5715         switch (intel_encoder->type) {
5716         case INTEL_OUTPUT_UNKNOWN:
5717                 /* Only DDI platforms should ever use this output type */
5718                 WARN_ON_ONCE(!HAS_DDI(dev_priv));
5719         case INTEL_OUTPUT_DP:
5720         case INTEL_OUTPUT_HDMI:
5721         case INTEL_OUTPUT_EDP:
5722                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5723                 return port_to_power_domain(intel_dig_port->port);
5724         case INTEL_OUTPUT_DP_MST:
5725                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5726                 return port_to_power_domain(intel_dig_port->port);
5727         case INTEL_OUTPUT_ANALOG:
5728                 return POWER_DOMAIN_PORT_CRT;
5729         case INTEL_OUTPUT_DSI:
5730                 return POWER_DOMAIN_PORT_DSI;
5731         default:
5732                 return POWER_DOMAIN_PORT_OTHER;
5733         }
5734 }
5735
5736 enum intel_display_power_domain
5737 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5738 {
5739         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5740         struct intel_digital_port *intel_dig_port;
5741
5742         switch (intel_encoder->type) {
5743         case INTEL_OUTPUT_UNKNOWN:
5744         case INTEL_OUTPUT_HDMI:
5745                 /*
5746                  * Only DDI platforms should ever use these output types.
5747                  * We can get here after the HDMI detect code has already set
5748                  * the type of the shared encoder. Since we can't be sure
5749                  * what's the status of the given connectors, play safe and
5750                  * run the DP detection too.
5751                  */
5752                 WARN_ON_ONCE(!HAS_DDI(dev_priv));
5753         case INTEL_OUTPUT_DP:
5754         case INTEL_OUTPUT_EDP:
5755                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5756                 return port_to_aux_power_domain(intel_dig_port->port);
5757         case INTEL_OUTPUT_DP_MST:
5758                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5759                 return port_to_aux_power_domain(intel_dig_port->port);
5760         default:
5761                 MISSING_CASE(intel_encoder->type);
5762                 return POWER_DOMAIN_AUX_A;
5763         }
5764 }
5765
5766 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5767                                             struct intel_crtc_state *crtc_state)
5768 {
5769         struct drm_device *dev = crtc->dev;
5770         struct drm_encoder *encoder;
5771         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5772         enum pipe pipe = intel_crtc->pipe;
5773         unsigned long mask;
5774         enum transcoder transcoder = crtc_state->cpu_transcoder;
5775
5776         if (!crtc_state->base.active)
5777                 return 0;
5778
5779         mask = BIT(POWER_DOMAIN_PIPE(pipe));
5780         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5781         if (crtc_state->pch_pfit.enabled ||
5782             crtc_state->pch_pfit.force_thru)
5783                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5784
5785         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5786                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5787
5788                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5789         }
5790
5791         if (crtc_state->shared_dpll)
5792                 mask |= BIT(POWER_DOMAIN_PLLS);
5793
5794         return mask;
5795 }
5796
5797 static unsigned long
5798 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5799                                struct intel_crtc_state *crtc_state)
5800 {
5801         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5802         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5803         enum intel_display_power_domain domain;
5804         unsigned long domains, new_domains, old_domains;
5805
5806         old_domains = intel_crtc->enabled_power_domains;
5807         intel_crtc->enabled_power_domains = new_domains =
5808                 get_crtc_power_domains(crtc, crtc_state);
5809
5810         domains = new_domains & ~old_domains;
5811
5812         for_each_power_domain(domain, domains)
5813                 intel_display_power_get(dev_priv, domain);
5814
5815         return old_domains & ~new_domains;
5816 }
5817
5818 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5819                                       unsigned long domains)
5820 {
5821         enum intel_display_power_domain domain;
5822
5823         for_each_power_domain(domain, domains)
5824                 intel_display_power_put(dev_priv, domain);
5825 }
5826
5827 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5828 {
5829         int max_cdclk_freq = dev_priv->max_cdclk_freq;
5830
5831         if (INTEL_INFO(dev_priv)->gen >= 9 ||
5832             IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5833                 return max_cdclk_freq;
5834         else if (IS_CHERRYVIEW(dev_priv))
5835                 return max_cdclk_freq*95/100;
5836         else if (INTEL_INFO(dev_priv)->gen < 4)
5837                 return 2*max_cdclk_freq*90/100;
5838         else
5839                 return max_cdclk_freq*90/100;
5840 }
5841
5842 static int skl_calc_cdclk(int max_pixclk, int vco);
5843
5844 static void intel_update_max_cdclk(struct drm_device *dev)
5845 {
5846         struct drm_i915_private *dev_priv = to_i915(dev);
5847
5848         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5849                 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5850                 int max_cdclk, vco;
5851
5852                 vco = dev_priv->skl_preferred_vco_freq;
5853                 WARN_ON(vco != 8100000 && vco != 8640000);
5854
5855                 /*
5856                  * Use the lower (vco 8640) cdclk values as a
5857                  * first guess. skl_calc_cdclk() will correct it
5858                  * if the preferred vco is 8100 instead.
5859                  */
5860                 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5861                         max_cdclk = 617143;
5862                 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5863                         max_cdclk = 540000;
5864                 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5865                         max_cdclk = 432000;
5866                 else
5867                         max_cdclk = 308571;
5868
5869                 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5870         } else if (IS_BROXTON(dev_priv)) {
5871                 dev_priv->max_cdclk_freq = 624000;
5872         } else if (IS_BROADWELL(dev_priv))  {
5873                 /*
5874                  * FIXME with extra cooling we can allow
5875                  * 540 MHz for ULX and 675 Mhz for ULT.
5876                  * How can we know if extra cooling is
5877                  * available? PCI ID, VTB, something else?
5878                  */
5879                 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5880                         dev_priv->max_cdclk_freq = 450000;
5881                 else if (IS_BDW_ULX(dev_priv))
5882                         dev_priv->max_cdclk_freq = 450000;
5883                 else if (IS_BDW_ULT(dev_priv))
5884                         dev_priv->max_cdclk_freq = 540000;
5885                 else
5886                         dev_priv->max_cdclk_freq = 675000;
5887         } else if (IS_CHERRYVIEW(dev_priv)) {
5888                 dev_priv->max_cdclk_freq = 320000;
5889         } else if (IS_VALLEYVIEW(dev_priv)) {
5890                 dev_priv->max_cdclk_freq = 400000;
5891         } else {
5892                 /* otherwise assume cdclk is fixed */
5893                 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5894         }
5895
5896         dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5897
5898         DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5899                          dev_priv->max_cdclk_freq);
5900
5901         DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5902                          dev_priv->max_dotclk_freq);
5903 }
5904
5905 static void intel_update_cdclk(struct drm_device *dev)
5906 {
5907         struct drm_i915_private *dev_priv = to_i915(dev);
5908
5909         dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5910
5911         if (INTEL_GEN(dev_priv) >= 9)
5912                 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5913                                  dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5914                                  dev_priv->cdclk_pll.ref);
5915         else
5916                 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5917                                  dev_priv->cdclk_freq);
5918
5919         /*
5920          * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5921          * Programmng [sic] note: bit[9:2] should be programmed to the number
5922          * of cdclk that generates 4MHz reference clock freq which is used to
5923          * generate GMBus clock. This will vary with the cdclk freq.
5924          */
5925         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5926                 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5927 }
5928
5929 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5930 static int skl_cdclk_decimal(int cdclk)
5931 {
5932         return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5933 }
5934
5935 static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5936 {
5937         int ratio;
5938
5939         if (cdclk == dev_priv->cdclk_pll.ref)
5940                 return 0;
5941
5942         switch (cdclk) {
5943         default:
5944                 MISSING_CASE(cdclk);
5945         case 144000:
5946         case 288000:
5947         case 384000:
5948         case 576000:
5949                 ratio = 60;
5950                 break;
5951         case 624000:
5952                 ratio = 65;
5953                 break;
5954         }
5955
5956         return dev_priv->cdclk_pll.ref * ratio;
5957 }
5958
5959 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5960 {
5961         I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5962
5963         /* Timeout 200us */
5964         if (intel_wait_for_register(dev_priv,
5965                                     BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5966                                     1))
5967                 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5968
5969         dev_priv->cdclk_pll.vco = 0;
5970 }
5971
5972 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5973 {
5974         int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5975         u32 val;
5976
5977         val = I915_READ(BXT_DE_PLL_CTL);
5978         val &= ~BXT_DE_PLL_RATIO_MASK;
5979         val |= BXT_DE_PLL_RATIO(ratio);
5980         I915_WRITE(BXT_DE_PLL_CTL, val);
5981
5982         I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5983
5984         /* Timeout 200us */
5985         if (intel_wait_for_register(dev_priv,
5986                                     BXT_DE_PLL_ENABLE,
5987                                     BXT_DE_PLL_LOCK,
5988                                     BXT_DE_PLL_LOCK,
5989                                     1))
5990                 DRM_ERROR("timeout waiting for DE PLL lock\n");
5991
5992         dev_priv->cdclk_pll.vco = vco;
5993 }
5994
5995 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5996 {
5997         u32 val, divider;
5998         int vco, ret;
5999
6000         vco = bxt_de_pll_vco(dev_priv, cdclk);
6001
6002         DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
6003
6004         /* cdclk = vco / 2 / div{1,1.5,2,4} */
6005         switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
6006         case 8:
6007                 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
6008                 break;
6009         case 4:
6010                 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
6011                 break;
6012         case 3:
6013                 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
6014                 break;
6015         case 2:
6016                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
6017                 break;
6018         default:
6019                 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
6020                 WARN_ON(vco != 0);
6021
6022                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
6023                 break;
6024         }
6025
6026         /* Inform power controller of upcoming frequency change */
6027         mutex_lock(&dev_priv->rps.hw_lock);
6028         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
6029                                       0x80000000);
6030         mutex_unlock(&dev_priv->rps.hw_lock);
6031
6032         if (ret) {
6033                 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
6034                           ret, cdclk);
6035                 return;
6036         }
6037
6038         if (dev_priv->cdclk_pll.vco != 0 &&
6039             dev_priv->cdclk_pll.vco != vco)
6040                 bxt_de_pll_disable(dev_priv);
6041
6042         if (dev_priv->cdclk_pll.vco != vco)
6043                 bxt_de_pll_enable(dev_priv, vco);
6044
6045         val = divider | skl_cdclk_decimal(cdclk);
6046         /*
6047          * FIXME if only the cd2x divider needs changing, it could be done
6048          * without shutting off the pipe (if only one pipe is active).
6049          */
6050         val |= BXT_CDCLK_CD2X_PIPE_NONE;
6051         /*
6052          * Disable SSA Precharge when CD clock frequency < 500 MHz,
6053          * enable otherwise.
6054          */
6055         if (cdclk >= 500000)
6056                 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
6057         I915_WRITE(CDCLK_CTL, val);
6058
6059         mutex_lock(&dev_priv->rps.hw_lock);
6060         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
6061                                       DIV_ROUND_UP(cdclk, 25000));
6062         mutex_unlock(&dev_priv->rps.hw_lock);
6063
6064         if (ret) {
6065                 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
6066                           ret, cdclk);
6067                 return;
6068         }
6069
6070         intel_update_cdclk(&dev_priv->drm);
6071 }
6072
6073 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
6074 {
6075         u32 cdctl, expected;
6076
6077         intel_update_cdclk(&dev_priv->drm);
6078
6079         if (dev_priv->cdclk_pll.vco == 0 ||
6080             dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
6081                 goto sanitize;
6082
6083         /* DPLL okay; verify the cdclock
6084          *
6085          * Some BIOS versions leave an incorrect decimal frequency value and
6086          * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
6087          * so sanitize this register.
6088          */
6089         cdctl = I915_READ(CDCLK_CTL);
6090         /*
6091          * Let's ignore the pipe field, since BIOS could have configured the
6092          * dividers both synching to an active pipe, or asynchronously
6093          * (PIPE_NONE).
6094          */
6095         cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
6096
6097         expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
6098                    skl_cdclk_decimal(dev_priv->cdclk_freq);
6099         /*
6100          * Disable SSA Precharge when CD clock frequency < 500 MHz,
6101          * enable otherwise.
6102          */
6103         if (dev_priv->cdclk_freq >= 500000)
6104                 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
6105
6106         if (cdctl == expected)
6107                 /* All well; nothing to sanitize */
6108                 return;
6109
6110 sanitize:
6111         DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
6112
6113         /* force cdclk programming */
6114         dev_priv->cdclk_freq = 0;
6115
6116         /* force full PLL disable + enable */
6117         dev_priv->cdclk_pll.vco = -1;
6118 }
6119
6120 void bxt_init_cdclk(struct drm_i915_private *dev_priv)
6121 {
6122         bxt_sanitize_cdclk(dev_priv);
6123
6124         if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
6125                 return;
6126
6127         /*
6128          * FIXME:
6129          * - The initial CDCLK needs to be read from VBT.
6130          *   Need to make this change after VBT has changes for BXT.
6131          */
6132         bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
6133 }
6134
6135 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
6136 {
6137         bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
6138 }
6139
6140 static int skl_calc_cdclk(int max_pixclk, int vco)
6141 {
6142         if (vco == 8640000) {
6143                 if (max_pixclk > 540000)
6144                         return 617143;
6145                 else if (max_pixclk > 432000)
6146                         return 540000;
6147                 else if (max_pixclk > 308571)
6148                         return 432000;
6149                 else
6150                         return 308571;
6151         } else {
6152                 if (max_pixclk > 540000)
6153                         return 675000;
6154                 else if (max_pixclk > 450000)
6155                         return 540000;
6156                 else if (max_pixclk > 337500)
6157                         return 450000;
6158                 else
6159                         return 337500;
6160         }
6161 }
6162
6163 static void
6164 skl_dpll0_update(struct drm_i915_private *dev_priv)
6165 {
6166         u32 val;
6167
6168         dev_priv->cdclk_pll.ref = 24000;
6169         dev_priv->cdclk_pll.vco = 0;
6170
6171         val = I915_READ(LCPLL1_CTL);
6172         if ((val & LCPLL_PLL_ENABLE) == 0)
6173                 return;
6174
6175         if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
6176                 return;
6177
6178         val = I915_READ(DPLL_CTRL1);
6179
6180         if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
6181                             DPLL_CTRL1_SSC(SKL_DPLL0) |
6182                             DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
6183                     DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
6184                 return;
6185
6186         switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
6187         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
6188         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
6189         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
6190         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
6191                 dev_priv->cdclk_pll.vco = 8100000;
6192                 break;
6193         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
6194         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
6195                 dev_priv->cdclk_pll.vco = 8640000;
6196                 break;
6197         default:
6198                 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
6199                 break;
6200         }
6201 }
6202
6203 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
6204 {
6205         bool changed = dev_priv->skl_preferred_vco_freq != vco;
6206
6207         dev_priv->skl_preferred_vco_freq = vco;
6208
6209         if (changed)
6210                 intel_update_max_cdclk(&dev_priv->drm);
6211 }
6212
6213 static void
6214 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
6215 {
6216         int min_cdclk = skl_calc_cdclk(0, vco);
6217         u32 val;
6218
6219         WARN_ON(vco != 8100000 && vco != 8640000);
6220
6221         /* select the minimum CDCLK before enabling DPLL 0 */
6222         val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
6223         I915_WRITE(CDCLK_CTL, val);
6224         POSTING_READ(CDCLK_CTL);
6225
6226         /*
6227          * We always enable DPLL0 with the lowest link rate possible, but still
6228          * taking into account the VCO required to operate the eDP panel at the
6229          * desired frequency. The usual DP link rates operate with a VCO of
6230          * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
6231          * The modeset code is responsible for the selection of the exact link
6232          * rate later on, with the constraint of choosing a frequency that
6233          * works with vco.
6234          */
6235         val = I915_READ(DPLL_CTRL1);
6236
6237         val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
6238                  DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
6239         val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
6240         if (vco == 8640000)
6241                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
6242                                             SKL_DPLL0);
6243         else
6244                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
6245                                             SKL_DPLL0);
6246
6247         I915_WRITE(DPLL_CTRL1, val);
6248         POSTING_READ(DPLL_CTRL1);
6249
6250         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
6251
6252         if (intel_wait_for_register(dev_priv,
6253                                     LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
6254                                     5))
6255                 DRM_ERROR("DPLL0 not locked\n");
6256
6257         dev_priv->cdclk_pll.vco = vco;
6258
6259         /* We'll want to keep using the current vco from now on. */
6260         skl_set_preferred_cdclk_vco(dev_priv, vco);
6261 }
6262
6263 static void
6264 skl_dpll0_disable(struct drm_i915_private *dev_priv)
6265 {
6266         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
6267         if (intel_wait_for_register(dev_priv,
6268                                    LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
6269                                    1))
6270                 DRM_ERROR("Couldn't disable DPLL0\n");
6271
6272         dev_priv->cdclk_pll.vco = 0;
6273 }
6274
6275 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
6276 {
6277         int ret;
6278         u32 val;
6279
6280         /* inform PCU we want to change CDCLK */
6281         val = SKL_CDCLK_PREPARE_FOR_CHANGE;
6282         mutex_lock(&dev_priv->rps.hw_lock);
6283         ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
6284         mutex_unlock(&dev_priv->rps.hw_lock);
6285
6286         return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
6287 }
6288
6289 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
6290 {
6291         return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
6292 }
6293
6294 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
6295 {
6296         struct drm_device *dev = &dev_priv->drm;
6297         u32 freq_select, pcu_ack;
6298
6299         WARN_ON((cdclk == 24000) != (vco == 0));
6300
6301         DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
6302
6303         if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
6304                 DRM_ERROR("failed to inform PCU about cdclk change\n");
6305                 return;
6306         }
6307
6308         /* set CDCLK_CTL */
6309         switch (cdclk) {
6310         case 450000:
6311         case 432000:
6312                 freq_select = CDCLK_FREQ_450_432;
6313                 pcu_ack = 1;
6314                 break;
6315         case 540000:
6316                 freq_select = CDCLK_FREQ_540;
6317                 pcu_ack = 2;
6318                 break;
6319         case 308571:
6320         case 337500:
6321         default:
6322                 freq_select = CDCLK_FREQ_337_308;
6323                 pcu_ack = 0;
6324                 break;
6325         case 617143:
6326         case 675000:
6327                 freq_select = CDCLK_FREQ_675_617;
6328                 pcu_ack = 3;
6329                 break;
6330         }
6331
6332         if (dev_priv->cdclk_pll.vco != 0 &&
6333             dev_priv->cdclk_pll.vco != vco)
6334                 skl_dpll0_disable(dev_priv);
6335
6336         if (dev_priv->cdclk_pll.vco != vco)
6337                 skl_dpll0_enable(dev_priv, vco);
6338
6339         I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
6340         POSTING_READ(CDCLK_CTL);
6341
6342         /* inform PCU of the change */
6343         mutex_lock(&dev_priv->rps.hw_lock);
6344         sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
6345         mutex_unlock(&dev_priv->rps.hw_lock);
6346
6347         intel_update_cdclk(dev);
6348 }
6349
6350 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
6351
6352 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
6353 {
6354         skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
6355 }
6356
6357 void skl_init_cdclk(struct drm_i915_private *dev_priv)
6358 {
6359         int cdclk, vco;
6360
6361         skl_sanitize_cdclk(dev_priv);
6362
6363         if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
6364                 /*
6365                  * Use the current vco as our initial
6366                  * guess as to what the preferred vco is.
6367                  */
6368                 if (dev_priv->skl_preferred_vco_freq == 0)
6369                         skl_set_preferred_cdclk_vco(dev_priv,
6370                                                     dev_priv->cdclk_pll.vco);
6371                 return;
6372         }
6373
6374         vco = dev_priv->skl_preferred_vco_freq;
6375         if (vco == 0)
6376                 vco = 8100000;
6377         cdclk = skl_calc_cdclk(0, vco);
6378
6379         skl_set_cdclk(dev_priv, cdclk, vco);
6380 }
6381
6382 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
6383 {
6384         uint32_t cdctl, expected;
6385
6386         /*
6387          * check if the pre-os intialized the display
6388          * There is SWF18 scratchpad register defined which is set by the
6389          * pre-os which can be used by the OS drivers to check the status
6390          */
6391         if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
6392                 goto sanitize;
6393
6394         intel_update_cdclk(&dev_priv->drm);
6395         /* Is PLL enabled and locked ? */
6396         if (dev_priv->cdclk_pll.vco == 0 ||
6397             dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
6398                 goto sanitize;
6399
6400         /* DPLL okay; verify the cdclock
6401          *
6402          * Noticed in some instances that the freq selection is correct but
6403          * decimal part is programmed wrong from BIOS where pre-os does not
6404          * enable display. Verify the same as well.
6405          */
6406         cdctl = I915_READ(CDCLK_CTL);
6407         expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
6408                 skl_cdclk_decimal(dev_priv->cdclk_freq);
6409         if (cdctl == expected)
6410                 /* All well; nothing to sanitize */
6411                 return;
6412
6413 sanitize:
6414         DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
6415
6416         /* force cdclk programming */
6417         dev_priv->cdclk_freq = 0;
6418         /* force full PLL disable + enable */
6419         dev_priv->cdclk_pll.vco = -1;
6420 }
6421
6422 /* Adjust CDclk dividers to allow high res or save power if possible */
6423 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
6424 {
6425         struct drm_i915_private *dev_priv = to_i915(dev);
6426         u32 val, cmd;
6427
6428         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
6429                                         != dev_priv->cdclk_freq);
6430
6431         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
6432                 cmd = 2;
6433         else if (cdclk == 266667)
6434                 cmd = 1;
6435         else
6436                 cmd = 0;
6437
6438         mutex_lock(&dev_priv->rps.hw_lock);
6439         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6440         val &= ~DSPFREQGUAR_MASK;
6441         val |= (cmd << DSPFREQGUAR_SHIFT);
6442         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
6443         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
6444                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
6445                      50)) {
6446                 DRM_ERROR("timed out waiting for CDclk change\n");
6447         }
6448         mutex_unlock(&dev_priv->rps.hw_lock);
6449
6450         mutex_lock(&dev_priv->sb_lock);
6451
6452         if (cdclk == 400000) {
6453                 u32 divider;
6454
6455                 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
6456
6457                 /* adjust cdclk divider */
6458                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
6459                 val &= ~CCK_FREQUENCY_VALUES;
6460                 val |= divider;
6461                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
6462
6463                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
6464                               CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
6465                              50))
6466                         DRM_ERROR("timed out waiting for CDclk change\n");
6467         }
6468
6469         /* adjust self-refresh exit latency value */
6470         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
6471         val &= ~0x7f;
6472
6473         /*
6474          * For high bandwidth configs, we set a higher latency in the bunit
6475          * so that the core display fetch happens in time to avoid underruns.
6476          */
6477         if (cdclk == 400000)
6478                 val |= 4500 / 250; /* 4.5 usec */
6479         else
6480                 val |= 3000 / 250; /* 3.0 usec */
6481         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
6482
6483         mutex_unlock(&dev_priv->sb_lock);
6484
6485         intel_update_cdclk(dev);
6486 }
6487
6488 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
6489 {
6490         struct drm_i915_private *dev_priv = to_i915(dev);
6491         u32 val, cmd;
6492
6493         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
6494                                                 != dev_priv->cdclk_freq);
6495
6496         switch (cdclk) {
6497         case 333333:
6498         case 320000:
6499         case 266667:
6500         case 200000:
6501                 break;
6502         default:
6503                 MISSING_CASE(cdclk);
6504                 return;
6505         }
6506
6507         /*
6508          * Specs are full of misinformation, but testing on actual
6509          * hardware has shown that we just need to write the desired
6510          * CCK divider into the Punit register.
6511          */
6512         cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
6513
6514         mutex_lock(&dev_priv->rps.hw_lock);
6515         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6516         val &= ~DSPFREQGUAR_MASK_CHV;
6517         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
6518         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
6519         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
6520                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
6521                      50)) {
6522                 DRM_ERROR("timed out waiting for CDclk change\n");
6523         }
6524         mutex_unlock(&dev_priv->rps.hw_lock);
6525
6526         intel_update_cdclk(dev);
6527 }
6528
6529 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
6530                                  int max_pixclk)
6531 {
6532         int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
6533         int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
6534
6535         /*
6536          * Really only a few cases to deal with, as only 4 CDclks are supported:
6537          *   200MHz
6538          *   267MHz
6539          *   320/333MHz (depends on HPLL freq)
6540          *   400MHz (VLV only)
6541          * So we check to see whether we're above 90% (VLV) or 95% (CHV)
6542          * of the lower bin and adjust if needed.
6543          *
6544          * We seem to get an unstable or solid color picture at 200MHz.
6545          * Not sure what's wrong. For now use 200MHz only when all pipes
6546          * are off.
6547          */
6548         if (!IS_CHERRYVIEW(dev_priv) &&
6549             max_pixclk > freq_320*limit/100)
6550                 return 400000;
6551         else if (max_pixclk > 266667*limit/100)
6552                 return freq_320;
6553         else if (max_pixclk > 0)
6554                 return 266667;
6555         else
6556                 return 200000;
6557 }
6558
6559 static int bxt_calc_cdclk(int max_pixclk)
6560 {
6561         if (max_pixclk > 576000)
6562                 return 624000;
6563         else if (max_pixclk > 384000)
6564                 return 576000;
6565         else if (max_pixclk > 288000)
6566                 return 384000;
6567         else if (max_pixclk > 144000)
6568                 return 288000;
6569         else
6570                 return 144000;
6571 }
6572
6573 /* Compute the max pixel clock for new configuration. */
6574 static int intel_mode_max_pixclk(struct drm_device *dev,
6575                                  struct drm_atomic_state *state)
6576 {
6577         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6578         struct drm_i915_private *dev_priv = to_i915(dev);
6579         struct drm_crtc *crtc;
6580         struct drm_crtc_state *crtc_state;
6581         unsigned max_pixclk = 0, i;
6582         enum pipe pipe;
6583
6584         memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6585                sizeof(intel_state->min_pixclk));
6586
6587         for_each_crtc_in_state(state, crtc, crtc_state, i) {
6588                 int pixclk = 0;
6589
6590                 if (crtc_state->enable)
6591                         pixclk = crtc_state->adjusted_mode.crtc_clock;
6592
6593                 intel_state->min_pixclk[i] = pixclk;
6594         }
6595
6596         for_each_pipe(dev_priv, pipe)
6597                 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6598
6599         return max_pixclk;
6600 }
6601
6602 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6603 {
6604         struct drm_device *dev = state->dev;
6605         struct drm_i915_private *dev_priv = to_i915(dev);
6606         int max_pixclk = intel_mode_max_pixclk(dev, state);
6607         struct intel_atomic_state *intel_state =
6608                 to_intel_atomic_state(state);
6609
6610         intel_state->cdclk = intel_state->dev_cdclk =
6611                 valleyview_calc_cdclk(dev_priv, max_pixclk);
6612
6613         if (!intel_state->active_crtcs)
6614                 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6615
6616         return 0;
6617 }
6618
6619 static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
6620 {
6621         int max_pixclk = ilk_max_pixel_rate(state);
6622         struct intel_atomic_state *intel_state =
6623                 to_intel_atomic_state(state);
6624
6625         intel_state->cdclk = intel_state->dev_cdclk =
6626                 bxt_calc_cdclk(max_pixclk);
6627
6628         if (!intel_state->active_crtcs)
6629                 intel_state->dev_cdclk = bxt_calc_cdclk(0);
6630
6631         return 0;
6632 }
6633
6634 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6635 {
6636         unsigned int credits, default_credits;
6637
6638         if (IS_CHERRYVIEW(dev_priv))
6639                 default_credits = PFI_CREDIT(12);
6640         else
6641                 default_credits = PFI_CREDIT(8);
6642
6643         if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6644                 /* CHV suggested value is 31 or 63 */
6645                 if (IS_CHERRYVIEW(dev_priv))
6646                         credits = PFI_CREDIT_63;
6647                 else
6648                         credits = PFI_CREDIT(15);
6649         } else {
6650                 credits = default_credits;
6651         }
6652
6653         /*
6654          * WA - write default credits before re-programming
6655          * FIXME: should we also set the resend bit here?
6656          */
6657         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6658                    default_credits);
6659
6660         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6661                    credits | PFI_CREDIT_RESEND);
6662
6663         /*
6664          * FIXME is this guaranteed to clear
6665          * immediately or should we poll for it?
6666          */
6667         WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6668 }
6669
6670 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6671 {
6672         struct drm_device *dev = old_state->dev;
6673         struct drm_i915_private *dev_priv = to_i915(dev);
6674         struct intel_atomic_state *old_intel_state =
6675                 to_intel_atomic_state(old_state);
6676         unsigned req_cdclk = old_intel_state->dev_cdclk;
6677
6678         /*
6679          * FIXME: We can end up here with all power domains off, yet
6680          * with a CDCLK frequency other than the minimum. To account
6681          * for this take the PIPE-A power domain, which covers the HW
6682          * blocks needed for the following programming. This can be
6683          * removed once it's guaranteed that we get here either with
6684          * the minimum CDCLK set, or the required power domains
6685          * enabled.
6686          */
6687         intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6688
6689         if (IS_CHERRYVIEW(dev_priv))
6690                 cherryview_set_cdclk(dev, req_cdclk);
6691         else
6692                 valleyview_set_cdclk(dev, req_cdclk);
6693
6694         vlv_program_pfi_credits(dev_priv);
6695
6696         intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6697 }
6698
6699 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6700                                    struct drm_atomic_state *old_state)
6701 {
6702         struct drm_crtc *crtc = pipe_config->base.crtc;
6703         struct drm_device *dev = crtc->dev;
6704         struct drm_i915_private *dev_priv = to_i915(dev);
6705         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6706         int pipe = intel_crtc->pipe;
6707
6708         if (WARN_ON(intel_crtc->active))
6709                 return;
6710
6711         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6712                 intel_dp_set_m_n(intel_crtc, M1_N1);
6713
6714         intel_set_pipe_timings(intel_crtc);
6715         intel_set_pipe_src_size(intel_crtc);
6716
6717         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6718                 struct drm_i915_private *dev_priv = to_i915(dev);
6719
6720                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6721                 I915_WRITE(CHV_CANVAS(pipe), 0);
6722         }
6723
6724         i9xx_set_pipeconf(intel_crtc);
6725
6726         intel_crtc->active = true;
6727
6728         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6729
6730         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6731
6732         if (IS_CHERRYVIEW(dev_priv)) {
6733                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6734                 chv_enable_pll(intel_crtc, intel_crtc->config);
6735         } else {
6736                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6737                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6738         }
6739
6740         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6741
6742         i9xx_pfit_enable(intel_crtc);
6743
6744         intel_color_load_luts(&pipe_config->base);
6745
6746         intel_update_watermarks(crtc);
6747         intel_enable_pipe(intel_crtc);
6748
6749         assert_vblank_disabled(crtc);
6750         drm_crtc_vblank_on(crtc);
6751
6752         intel_encoders_enable(crtc, pipe_config, old_state);
6753 }
6754
6755 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6756 {
6757         struct drm_device *dev = crtc->base.dev;
6758         struct drm_i915_private *dev_priv = to_i915(dev);
6759
6760         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6761         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6762 }
6763
6764 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6765                              struct drm_atomic_state *old_state)
6766 {
6767         struct drm_crtc *crtc = pipe_config->base.crtc;
6768         struct drm_device *dev = crtc->dev;
6769         struct drm_i915_private *dev_priv = to_i915(dev);
6770         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6771         enum pipe pipe = intel_crtc->pipe;
6772
6773         if (WARN_ON(intel_crtc->active))
6774                 return;
6775
6776         i9xx_set_pll_dividers(intel_crtc);
6777
6778         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6779                 intel_dp_set_m_n(intel_crtc, M1_N1);
6780
6781         intel_set_pipe_timings(intel_crtc);
6782         intel_set_pipe_src_size(intel_crtc);
6783
6784         i9xx_set_pipeconf(intel_crtc);
6785
6786         intel_crtc->active = true;
6787
6788         if (!IS_GEN2(dev_priv))
6789                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6790
6791         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6792
6793         i9xx_enable_pll(intel_crtc);
6794
6795         i9xx_pfit_enable(intel_crtc);
6796
6797         intel_color_load_luts(&pipe_config->base);
6798
6799         intel_update_watermarks(crtc);
6800         intel_enable_pipe(intel_crtc);
6801
6802         assert_vblank_disabled(crtc);
6803         drm_crtc_vblank_on(crtc);
6804
6805         intel_encoders_enable(crtc, pipe_config, old_state);
6806 }
6807
6808 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6809 {
6810         struct drm_device *dev = crtc->base.dev;
6811         struct drm_i915_private *dev_priv = to_i915(dev);
6812
6813         if (!crtc->config->gmch_pfit.control)
6814                 return;
6815
6816         assert_pipe_disabled(dev_priv, crtc->pipe);
6817
6818         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6819                          I915_READ(PFIT_CONTROL));
6820         I915_WRITE(PFIT_CONTROL, 0);
6821 }
6822
6823 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6824                               struct drm_atomic_state *old_state)
6825 {
6826         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6827         struct drm_device *dev = crtc->dev;
6828         struct drm_i915_private *dev_priv = to_i915(dev);
6829         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6830         int pipe = intel_crtc->pipe;
6831
6832         /*
6833          * On gen2 planes are double buffered but the pipe isn't, so we must
6834          * wait for planes to fully turn off before disabling the pipe.
6835          */
6836         if (IS_GEN2(dev_priv))
6837                 intel_wait_for_vblank(dev, pipe);
6838
6839         intel_encoders_disable(crtc, old_crtc_state, old_state);
6840
6841         drm_crtc_vblank_off(crtc);
6842         assert_vblank_disabled(crtc);
6843
6844         intel_disable_pipe(intel_crtc);
6845
6846         i9xx_pfit_disable(intel_crtc);
6847
6848         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6849
6850         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6851                 if (IS_CHERRYVIEW(dev_priv))
6852                         chv_disable_pll(dev_priv, pipe);
6853                 else if (IS_VALLEYVIEW(dev_priv))
6854                         vlv_disable_pll(dev_priv, pipe);
6855                 else
6856                         i9xx_disable_pll(intel_crtc);
6857         }
6858
6859         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6860
6861         if (!IS_GEN2(dev_priv))
6862                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6863 }
6864
6865 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6866 {
6867         struct intel_encoder *encoder;
6868         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6869         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6870         enum intel_display_power_domain domain;
6871         unsigned long domains;
6872         struct drm_atomic_state *state;
6873         struct intel_crtc_state *crtc_state;
6874         int ret;
6875
6876         if (!intel_crtc->active)
6877                 return;
6878
6879         if (to_intel_plane_state(crtc->primary->state)->base.visible) {
6880                 WARN_ON(intel_crtc->flip_work);
6881
6882                 intel_pre_disable_primary_noatomic(crtc);
6883
6884                 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6885                 to_intel_plane_state(crtc->primary->state)->base.visible = false;
6886         }
6887
6888         state = drm_atomic_state_alloc(crtc->dev);
6889         state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
6890
6891         /* Everything's already locked, -EDEADLK can't happen. */
6892         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6893         ret = drm_atomic_add_affected_connectors(state, crtc);
6894
6895         WARN_ON(IS_ERR(crtc_state) || ret);
6896
6897         dev_priv->display.crtc_disable(crtc_state, state);
6898
6899         drm_atomic_state_free(state);
6900
6901         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6902                       crtc->base.id, crtc->name);
6903
6904         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6905         crtc->state->active = false;
6906         intel_crtc->active = false;
6907         crtc->enabled = false;
6908         crtc->state->connector_mask = 0;
6909         crtc->state->encoder_mask = 0;
6910
6911         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6912                 encoder->base.crtc = NULL;
6913
6914         intel_fbc_disable(intel_crtc);
6915         intel_update_watermarks(crtc);
6916         intel_disable_shared_dpll(intel_crtc);
6917
6918         domains = intel_crtc->enabled_power_domains;
6919         for_each_power_domain(domain, domains)
6920                 intel_display_power_put(dev_priv, domain);
6921         intel_crtc->enabled_power_domains = 0;
6922
6923         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6924         dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6925 }
6926
6927 /*
6928  * turn all crtc's off, but do not adjust state
6929  * This has to be paired with a call to intel_modeset_setup_hw_state.
6930  */
6931 int intel_display_suspend(struct drm_device *dev)
6932 {
6933         struct drm_i915_private *dev_priv = to_i915(dev);
6934         struct drm_atomic_state *state;
6935         int ret;
6936
6937         state = drm_atomic_helper_suspend(dev);
6938         ret = PTR_ERR_OR_ZERO(state);
6939         if (ret)
6940                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6941         else
6942                 dev_priv->modeset_restore_state = state;
6943         return ret;
6944 }
6945
6946 void intel_encoder_destroy(struct drm_encoder *encoder)
6947 {
6948         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6949
6950         drm_encoder_cleanup(encoder);
6951         kfree(intel_encoder);
6952 }
6953
6954 /* Cross check the actual hw state with our own modeset state tracking (and it's
6955  * internal consistency). */
6956 static void intel_connector_verify_state(struct intel_connector *connector)
6957 {
6958         struct drm_crtc *crtc = connector->base.state->crtc;
6959
6960         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6961                       connector->base.base.id,
6962                       connector->base.name);
6963
6964         if (connector->get_hw_state(connector)) {
6965                 struct intel_encoder *encoder = connector->encoder;
6966                 struct drm_connector_state *conn_state = connector->base.state;
6967
6968                 I915_STATE_WARN(!crtc,
6969                          "connector enabled without attached crtc\n");
6970
6971                 if (!crtc)
6972                         return;
6973
6974                 I915_STATE_WARN(!crtc->state->active,
6975                       "connector is active, but attached crtc isn't\n");
6976
6977                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6978                         return;
6979
6980                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6981                         "atomic encoder doesn't match attached encoder\n");
6982
6983                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6984                         "attached encoder crtc differs from connector crtc\n");
6985         } else {
6986                 I915_STATE_WARN(crtc && crtc->state->active,
6987                         "attached crtc is active, but connector isn't\n");
6988                 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6989                         "best encoder set without crtc!\n");
6990         }
6991 }
6992
6993 int intel_connector_init(struct intel_connector *connector)
6994 {
6995         drm_atomic_helper_connector_reset(&connector->base);
6996
6997         if (!connector->base.state)
6998                 return -ENOMEM;
6999
7000         return 0;
7001 }
7002
7003 struct intel_connector *intel_connector_alloc(void)
7004 {
7005         struct intel_connector *connector;
7006
7007         connector = kzalloc(sizeof *connector, GFP_KERNEL);
7008         if (!connector)
7009                 return NULL;
7010
7011         if (intel_connector_init(connector) < 0) {
7012                 kfree(connector);
7013                 return NULL;
7014         }
7015
7016         return connector;
7017 }
7018
7019 /* Simple connector->get_hw_state implementation for encoders that support only
7020  * one connector and no cloning and hence the encoder state determines the state
7021  * of the connector. */
7022 bool intel_connector_get_hw_state(struct intel_connector *connector)
7023 {
7024         enum pipe pipe = 0;
7025         struct intel_encoder *encoder = connector->encoder;
7026
7027         return encoder->get_hw_state(encoder, &pipe);
7028 }
7029
7030 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7031 {
7032         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7033                 return crtc_state->fdi_lanes;
7034
7035         return 0;
7036 }
7037
7038 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7039                                      struct intel_crtc_state *pipe_config)
7040 {
7041         struct drm_i915_private *dev_priv = to_i915(dev);
7042         struct drm_atomic_state *state = pipe_config->base.state;
7043         struct intel_crtc *other_crtc;
7044         struct intel_crtc_state *other_crtc_state;
7045
7046         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7047                       pipe_name(pipe), pipe_config->fdi_lanes);
7048         if (pipe_config->fdi_lanes > 4) {
7049                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7050                               pipe_name(pipe), pipe_config->fdi_lanes);
7051                 return -EINVAL;
7052         }
7053
7054         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7055                 if (pipe_config->fdi_lanes > 2) {
7056                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7057                                       pipe_config->fdi_lanes);
7058                         return -EINVAL;
7059                 } else {
7060                         return 0;
7061                 }
7062         }
7063
7064         if (INTEL_INFO(dev)->num_pipes == 2)
7065                 return 0;
7066
7067         /* Ivybridge 3 pipe is really complicated */
7068         switch (pipe) {
7069         case PIPE_A:
7070                 return 0;
7071         case PIPE_B:
7072                 if (pipe_config->fdi_lanes <= 2)
7073                         return 0;
7074
7075                 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
7076                 other_crtc_state =
7077                         intel_atomic_get_crtc_state(state, other_crtc);
7078                 if (IS_ERR(other_crtc_state))
7079                         return PTR_ERR(other_crtc_state);
7080
7081                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7082                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7083                                       pipe_name(pipe), pipe_config->fdi_lanes);
7084                         return -EINVAL;
7085                 }
7086                 return 0;
7087         case PIPE_C:
7088                 if (pipe_config->fdi_lanes > 2) {
7089                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7090                                       pipe_name(pipe), pipe_config->fdi_lanes);
7091                         return -EINVAL;
7092                 }
7093
7094                 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
7095                 other_crtc_state =
7096                         intel_atomic_get_crtc_state(state, other_crtc);
7097                 if (IS_ERR(other_crtc_state))
7098                         return PTR_ERR(other_crtc_state);
7099
7100                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7101                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7102                         return -EINVAL;
7103                 }
7104                 return 0;
7105         default:
7106                 BUG();
7107         }
7108 }
7109
7110 #define RETRY 1
7111 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7112                                        struct intel_crtc_state *pipe_config)
7113 {
7114         struct drm_device *dev = intel_crtc->base.dev;
7115         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7116         int lane, link_bw, fdi_dotclock, ret;
7117         bool needs_recompute = false;
7118
7119 retry:
7120         /* FDI is a binary signal running at ~2.7GHz, encoding
7121          * each output octet as 10 bits. The actual frequency
7122          * is stored as a divider into a 100MHz clock, and the
7123          * mode pixel clock is stored in units of 1KHz.
7124          * Hence the bw of each lane in terms of the mode signal
7125          * is:
7126          */
7127         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7128
7129         fdi_dotclock = adjusted_mode->crtc_clock;
7130
7131         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7132                                            pipe_config->pipe_bpp);
7133
7134         pipe_config->fdi_lanes = lane;
7135
7136         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7137                                link_bw, &pipe_config->fdi_m_n);
7138
7139         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7140         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7141                 pipe_config->pipe_bpp -= 2*3;
7142                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7143                               pipe_config->pipe_bpp);
7144                 needs_recompute = true;
7145                 pipe_config->bw_constrained = true;
7146
7147                 goto retry;
7148         }
7149
7150         if (needs_recompute)
7151                 return RETRY;
7152
7153         return ret;
7154 }
7155
7156 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
7157                                      struct intel_crtc_state *pipe_config)
7158 {
7159         if (pipe_config->pipe_bpp > 24)
7160                 return false;
7161
7162         /* HSW can handle pixel rate up to cdclk? */
7163         if (IS_HASWELL(dev_priv))
7164                 return true;
7165
7166         /*
7167          * We compare against max which means we must take
7168          * the increased cdclk requirement into account when
7169          * calculating the new cdclk.
7170          *
7171          * Should measure whether using a lower cdclk w/o IPS
7172          */
7173         return ilk_pipe_pixel_rate(pipe_config) <=
7174                 dev_priv->max_cdclk_freq * 95 / 100;
7175 }
7176
7177 static void hsw_compute_ips_config(struct intel_crtc *crtc,
7178                                    struct intel_crtc_state *pipe_config)
7179 {
7180         struct drm_device *dev = crtc->base.dev;
7181         struct drm_i915_private *dev_priv = to_i915(dev);
7182
7183         pipe_config->ips_enabled = i915.enable_ips &&
7184                 hsw_crtc_supports_ips(crtc) &&
7185                 pipe_config_supports_ips(dev_priv, pipe_config);
7186 }
7187
7188 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7189 {
7190         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7191
7192         /* GDG double wide on either pipe, otherwise pipe A only */
7193         return INTEL_INFO(dev_priv)->gen < 4 &&
7194                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7195 }
7196
7197 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7198                                      struct intel_crtc_state *pipe_config)
7199 {
7200         struct drm_device *dev = crtc->base.dev;
7201         struct drm_i915_private *dev_priv = to_i915(dev);
7202         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7203         int clock_limit = dev_priv->max_dotclk_freq;
7204
7205         if (INTEL_INFO(dev)->gen < 4) {
7206                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7207
7208                 /*
7209                  * Enable double wide mode when the dot clock
7210                  * is > 90% of the (display) core speed.
7211                  */
7212                 if (intel_crtc_supports_double_wide(crtc) &&
7213                     adjusted_mode->crtc_clock > clock_limit) {
7214                         clock_limit = dev_priv->max_dotclk_freq;
7215                         pipe_config->double_wide = true;
7216                 }
7217         }
7218
7219         if (adjusted_mode->crtc_clock > clock_limit) {
7220                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7221                               adjusted_mode->crtc_clock, clock_limit,
7222                               yesno(pipe_config->double_wide));
7223                 return -EINVAL;
7224         }
7225
7226         /*
7227          * Pipe horizontal size must be even in:
7228          * - DVO ganged mode
7229          * - LVDS dual channel mode
7230          * - Double wide pipe
7231          */
7232         if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7233              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
7234                 pipe_config->pipe_src_w &= ~1;
7235
7236         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7237          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7238          */
7239         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7240                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7241                 return -EINVAL;
7242
7243         if (HAS_IPS(dev_priv))
7244                 hsw_compute_ips_config(crtc, pipe_config);
7245
7246         if (pipe_config->has_pch_encoder)
7247                 return ironlake_fdi_compute_config(crtc, pipe_config);
7248
7249         return 0;
7250 }
7251
7252 static int skylake_get_display_clock_speed(struct drm_device *dev)
7253 {
7254         struct drm_i915_private *dev_priv = to_i915(dev);
7255         uint32_t cdctl;
7256
7257         skl_dpll0_update(dev_priv);
7258
7259         if (dev_priv->cdclk_pll.vco == 0)
7260                 return dev_priv->cdclk_pll.ref;
7261
7262         cdctl = I915_READ(CDCLK_CTL);
7263
7264         if (dev_priv->cdclk_pll.vco == 8640000) {
7265                 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
7266                 case CDCLK_FREQ_450_432:
7267                         return 432000;
7268                 case CDCLK_FREQ_337_308:
7269                         return 308571;
7270                 case CDCLK_FREQ_540:
7271                         return 540000;
7272                 case CDCLK_FREQ_675_617:
7273                         return 617143;
7274                 default:
7275                         MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
7276                 }
7277         } else {
7278                 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
7279                 case CDCLK_FREQ_450_432:
7280                         return 450000;
7281                 case CDCLK_FREQ_337_308:
7282                         return 337500;
7283                 case CDCLK_FREQ_540:
7284                         return 540000;
7285                 case CDCLK_FREQ_675_617:
7286                         return 675000;
7287                 default:
7288                         MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
7289                 }
7290         }
7291
7292         return dev_priv->cdclk_pll.ref;
7293 }
7294
7295 static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
7296 {
7297         u32 val;
7298
7299         dev_priv->cdclk_pll.ref = 19200;
7300         dev_priv->cdclk_pll.vco = 0;
7301
7302         val = I915_READ(BXT_DE_PLL_ENABLE);
7303         if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
7304                 return;
7305
7306         if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
7307                 return;
7308
7309         val = I915_READ(BXT_DE_PLL_CTL);
7310         dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
7311                 dev_priv->cdclk_pll.ref;
7312 }
7313
7314 static int broxton_get_display_clock_speed(struct drm_device *dev)
7315 {
7316         struct drm_i915_private *dev_priv = to_i915(dev);
7317         u32 divider;
7318         int div, vco;
7319
7320         bxt_de_pll_update(dev_priv);
7321
7322         vco = dev_priv->cdclk_pll.vco;
7323         if (vco == 0)
7324                 return dev_priv->cdclk_pll.ref;
7325
7326         divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
7327
7328         switch (divider) {
7329         case BXT_CDCLK_CD2X_DIV_SEL_1:
7330                 div = 2;
7331                 break;
7332         case BXT_CDCLK_CD2X_DIV_SEL_1_5:
7333                 div = 3;
7334                 break;
7335         case BXT_CDCLK_CD2X_DIV_SEL_2:
7336                 div = 4;
7337                 break;
7338         case BXT_CDCLK_CD2X_DIV_SEL_4:
7339                 div = 8;
7340                 break;
7341         default:
7342                 MISSING_CASE(divider);
7343                 return dev_priv->cdclk_pll.ref;
7344         }
7345
7346         return DIV_ROUND_CLOSEST(vco, div);
7347 }
7348
7349 static int broadwell_get_display_clock_speed(struct drm_device *dev)
7350 {
7351         struct drm_i915_private *dev_priv = to_i915(dev);
7352         uint32_t lcpll = I915_READ(LCPLL_CTL);
7353         uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
7354
7355         if (lcpll & LCPLL_CD_SOURCE_FCLK)
7356                 return 800000;
7357         else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
7358                 return 450000;
7359         else if (freq == LCPLL_CLK_FREQ_450)
7360                 return 450000;
7361         else if (freq == LCPLL_CLK_FREQ_54O_BDW)
7362                 return 540000;
7363         else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
7364                 return 337500;
7365         else
7366                 return 675000;
7367 }
7368
7369 static int haswell_get_display_clock_speed(struct drm_device *dev)
7370 {
7371         struct drm_i915_private *dev_priv = to_i915(dev);
7372         uint32_t lcpll = I915_READ(LCPLL_CTL);
7373         uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
7374
7375         if (lcpll & LCPLL_CD_SOURCE_FCLK)
7376                 return 800000;
7377         else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
7378                 return 450000;
7379         else if (freq == LCPLL_CLK_FREQ_450)
7380                 return 450000;
7381         else if (IS_HSW_ULT(dev_priv))
7382                 return 337500;
7383         else
7384                 return 540000;
7385 }
7386
7387 static int valleyview_get_display_clock_speed(struct drm_device *dev)
7388 {
7389         return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
7390                                       CCK_DISPLAY_CLOCK_CONTROL);
7391 }
7392
7393 static int ilk_get_display_clock_speed(struct drm_device *dev)
7394 {
7395         return 450000;
7396 }
7397
7398 static int i945_get_display_clock_speed(struct drm_device *dev)
7399 {
7400         return 400000;
7401 }
7402
7403 static int i915_get_display_clock_speed(struct drm_device *dev)
7404 {
7405         return 333333;
7406 }
7407
7408 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
7409 {
7410         return 200000;
7411 }
7412
7413 static int pnv_get_display_clock_speed(struct drm_device *dev)
7414 {
7415         struct pci_dev *pdev = dev->pdev;
7416         u16 gcfgc = 0;
7417
7418         pci_read_config_word(pdev, GCFGC, &gcfgc);
7419
7420         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
7421         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
7422                 return 266667;
7423         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
7424                 return 333333;
7425         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
7426                 return 444444;
7427         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
7428                 return 200000;
7429         default:
7430                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
7431         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
7432                 return 133333;
7433         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
7434                 return 166667;
7435         }
7436 }
7437
7438 static int i915gm_get_display_clock_speed(struct drm_device *dev)
7439 {
7440         struct pci_dev *pdev = dev->pdev;
7441         u16 gcfgc = 0;
7442
7443         pci_read_config_word(pdev, GCFGC, &gcfgc);
7444
7445         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
7446                 return 133333;
7447         else {
7448                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
7449                 case GC_DISPLAY_CLOCK_333_MHZ:
7450                         return 333333;
7451                 default:
7452                 case GC_DISPLAY_CLOCK_190_200_MHZ:
7453                         return 190000;
7454                 }
7455         }
7456 }
7457
7458 static int i865_get_display_clock_speed(struct drm_device *dev)
7459 {
7460         return 266667;
7461 }
7462
7463 static int i85x_get_display_clock_speed(struct drm_device *dev)
7464 {
7465         struct pci_dev *pdev = dev->pdev;
7466         u16 hpllcc = 0;
7467
7468         /*
7469          * 852GM/852GMV only supports 133 MHz and the HPLLCC
7470          * encoding is different :(
7471          * FIXME is this the right way to detect 852GM/852GMV?
7472          */
7473         if (pdev->revision == 0x1)
7474                 return 133333;
7475
7476         pci_bus_read_config_word(pdev->bus,
7477                                  PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
7478
7479         /* Assume that the hardware is in the high speed state.  This
7480          * should be the default.
7481          */
7482         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
7483         case GC_CLOCK_133_200:
7484         case GC_CLOCK_133_200_2:
7485         case GC_CLOCK_100_200:
7486                 return 200000;
7487         case GC_CLOCK_166_250:
7488                 return 250000;
7489         case GC_CLOCK_100_133:
7490                 return 133333;
7491         case GC_CLOCK_133_266:
7492         case GC_CLOCK_133_266_2:
7493         case GC_CLOCK_166_266:
7494                 return 266667;
7495         }
7496
7497         /* Shouldn't happen */
7498         return 0;
7499 }
7500
7501 static int i830_get_display_clock_speed(struct drm_device *dev)
7502 {
7503         return 133333;
7504 }
7505
7506 static unsigned int intel_hpll_vco(struct drm_device *dev)
7507 {
7508         struct drm_i915_private *dev_priv = to_i915(dev);
7509         static const unsigned int blb_vco[8] = {
7510                 [0] = 3200000,
7511                 [1] = 4000000,
7512                 [2] = 5333333,
7513                 [3] = 4800000,
7514                 [4] = 6400000,
7515         };
7516         static const unsigned int pnv_vco[8] = {
7517                 [0] = 3200000,
7518                 [1] = 4000000,
7519                 [2] = 5333333,
7520                 [3] = 4800000,
7521                 [4] = 2666667,
7522         };
7523         static const unsigned int cl_vco[8] = {
7524                 [0] = 3200000,
7525                 [1] = 4000000,
7526                 [2] = 5333333,
7527                 [3] = 6400000,
7528                 [4] = 3333333,
7529                 [5] = 3566667,
7530                 [6] = 4266667,
7531         };
7532         static const unsigned int elk_vco[8] = {
7533                 [0] = 3200000,
7534                 [1] = 4000000,
7535                 [2] = 5333333,
7536                 [3] = 4800000,
7537         };
7538         static const unsigned int ctg_vco[8] = {
7539                 [0] = 3200000,
7540                 [1] = 4000000,
7541                 [2] = 5333333,
7542                 [3] = 6400000,
7543                 [4] = 2666667,
7544                 [5] = 4266667,
7545         };
7546         const unsigned int *vco_table;
7547         unsigned int vco;
7548         uint8_t tmp = 0;
7549
7550         /* FIXME other chipsets? */
7551         if (IS_GM45(dev_priv))
7552                 vco_table = ctg_vco;
7553         else if (IS_G4X(dev_priv))
7554                 vco_table = elk_vco;
7555         else if (IS_CRESTLINE(dev))
7556                 vco_table = cl_vco;
7557         else if (IS_PINEVIEW(dev))
7558                 vco_table = pnv_vco;
7559         else if (IS_G33(dev))
7560                 vco_table = blb_vco;
7561         else
7562                 return 0;
7563
7564         tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7565
7566         vco = vco_table[tmp & 0x7];
7567         if (vco == 0)
7568                 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7569         else
7570                 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7571
7572         return vco;
7573 }
7574
7575 static int gm45_get_display_clock_speed(struct drm_device *dev)
7576 {
7577         struct pci_dev *pdev = dev->pdev;
7578         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7579         uint16_t tmp = 0;
7580
7581         pci_read_config_word(pdev, GCFGC, &tmp);
7582
7583         cdclk_sel = (tmp >> 12) & 0x1;
7584
7585         switch (vco) {
7586         case 2666667:
7587         case 4000000:
7588         case 5333333:
7589                 return cdclk_sel ? 333333 : 222222;
7590         case 3200000:
7591                 return cdclk_sel ? 320000 : 228571;
7592         default:
7593                 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7594                 return 222222;
7595         }
7596 }
7597
7598 static int i965gm_get_display_clock_speed(struct drm_device *dev)
7599 {
7600         struct pci_dev *pdev = dev->pdev;
7601         static const uint8_t div_3200[] = { 16, 10,  8 };
7602         static const uint8_t div_4000[] = { 20, 12, 10 };
7603         static const uint8_t div_5333[] = { 24, 16, 14 };
7604         const uint8_t *div_table;
7605         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7606         uint16_t tmp = 0;
7607
7608         pci_read_config_word(pdev, GCFGC, &tmp);
7609
7610         cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7611
7612         if (cdclk_sel >= ARRAY_SIZE(div_3200))
7613                 goto fail;
7614
7615         switch (vco) {
7616         case 3200000:
7617                 div_table = div_3200;
7618                 break;
7619         case 4000000:
7620                 div_table = div_4000;
7621                 break;
7622         case 5333333:
7623                 div_table = div_5333;
7624                 break;
7625         default:
7626                 goto fail;
7627         }
7628
7629         return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7630
7631 fail:
7632         DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7633         return 200000;
7634 }
7635
7636 static int g33_get_display_clock_speed(struct drm_device *dev)
7637 {
7638         struct pci_dev *pdev = dev->pdev;
7639         static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7640         static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7641         static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7642         static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7643         const uint8_t *div_table;
7644         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7645         uint16_t tmp = 0;
7646
7647         pci_read_config_word(pdev, GCFGC, &tmp);
7648
7649         cdclk_sel = (tmp >> 4) & 0x7;
7650
7651         if (cdclk_sel >= ARRAY_SIZE(div_3200))
7652                 goto fail;
7653
7654         switch (vco) {
7655         case 3200000:
7656                 div_table = div_3200;
7657                 break;
7658         case 4000000:
7659                 div_table = div_4000;
7660                 break;
7661         case 4800000:
7662                 div_table = div_4800;
7663                 break;
7664         case 5333333:
7665                 div_table = div_5333;
7666                 break;
7667         default:
7668                 goto fail;
7669         }
7670
7671         return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7672
7673 fail:
7674         DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7675         return 190476;
7676 }
7677
7678 static void
7679 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7680 {
7681         while (*num > DATA_LINK_M_N_MASK ||
7682                *den > DATA_LINK_M_N_MASK) {
7683                 *num >>= 1;
7684                 *den >>= 1;
7685         }
7686 }
7687
7688 static void compute_m_n(unsigned int m, unsigned int n,
7689                         uint32_t *ret_m, uint32_t *ret_n)
7690 {
7691         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7692         *ret_m = div_u64((uint64_t) m * *ret_n, n);
7693         intel_reduce_m_n_ratio(ret_m, ret_n);
7694 }
7695
7696 void
7697 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7698                        int pixel_clock, int link_clock,
7699                        struct intel_link_m_n *m_n)
7700 {
7701         m_n->tu = 64;
7702
7703         compute_m_n(bits_per_pixel * pixel_clock,
7704                     link_clock * nlanes * 8,
7705                     &m_n->gmch_m, &m_n->gmch_n);
7706
7707         compute_m_n(pixel_clock, link_clock,
7708                     &m_n->link_m, &m_n->link_n);
7709 }
7710
7711 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7712 {
7713         if (i915.panel_use_ssc >= 0)
7714                 return i915.panel_use_ssc != 0;
7715         return dev_priv->vbt.lvds_use_ssc
7716                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7717 }
7718
7719 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7720 {
7721         return (1 << dpll->n) << 16 | dpll->m2;
7722 }
7723
7724 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7725 {
7726         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7727 }
7728
7729 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7730                                      struct intel_crtc_state *crtc_state,
7731                                      struct dpll *reduced_clock)
7732 {
7733         struct drm_device *dev = crtc->base.dev;
7734         u32 fp, fp2 = 0;
7735
7736         if (IS_PINEVIEW(dev)) {
7737                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7738                 if (reduced_clock)
7739                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7740         } else {
7741                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7742                 if (reduced_clock)
7743                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7744         }
7745
7746         crtc_state->dpll_hw_state.fp0 = fp;
7747
7748         crtc->lowfreq_avail = false;
7749         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7750             reduced_clock) {
7751                 crtc_state->dpll_hw_state.fp1 = fp2;
7752                 crtc->lowfreq_avail = true;
7753         } else {
7754                 crtc_state->dpll_hw_state.fp1 = fp;
7755         }
7756 }
7757
7758 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7759                 pipe)
7760 {
7761         u32 reg_val;
7762
7763         /*
7764          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7765          * and set it to a reasonable value instead.
7766          */
7767         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7768         reg_val &= 0xffffff00;
7769         reg_val |= 0x00000030;
7770         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7771
7772         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7773         reg_val &= 0x8cffffff;
7774         reg_val = 0x8c000000;
7775         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7776
7777         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7778         reg_val &= 0xffffff00;
7779         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7780
7781         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7782         reg_val &= 0x00ffffff;
7783         reg_val |= 0xb0000000;
7784         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7785 }
7786
7787 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7788                                          struct intel_link_m_n *m_n)
7789 {
7790         struct drm_device *dev = crtc->base.dev;
7791         struct drm_i915_private *dev_priv = to_i915(dev);
7792         int pipe = crtc->pipe;
7793
7794         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7795         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7796         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7797         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7798 }
7799
7800 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7801                                          struct intel_link_m_n *m_n,
7802                                          struct intel_link_m_n *m2_n2)
7803 {
7804         struct drm_device *dev = crtc->base.dev;
7805         struct drm_i915_private *dev_priv = to_i915(dev);
7806         int pipe = crtc->pipe;
7807         enum transcoder transcoder = crtc->config->cpu_transcoder;
7808
7809         if (INTEL_INFO(dev)->gen >= 5) {
7810                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7811                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7812                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7813                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7814                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7815                  * for gen < 8) and if DRRS is supported (to make sure the
7816                  * registers are not unnecessarily accessed).
7817                  */
7818                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
7819                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
7820                         I915_WRITE(PIPE_DATA_M2(transcoder),
7821                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7822                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7823                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7824                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7825                 }
7826         } else {
7827                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7828                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7829                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7830                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7831         }
7832 }
7833
7834 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7835 {
7836         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7837
7838         if (m_n == M1_N1) {
7839                 dp_m_n = &crtc->config->dp_m_n;
7840                 dp_m2_n2 = &crtc->config->dp_m2_n2;
7841         } else if (m_n == M2_N2) {
7842
7843                 /*
7844                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7845                  * needs to be programmed into M1_N1.
7846                  */
7847                 dp_m_n = &crtc->config->dp_m2_n2;
7848         } else {
7849                 DRM_ERROR("Unsupported divider value\n");
7850                 return;
7851         }
7852
7853         if (crtc->config->has_pch_encoder)
7854                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7855         else
7856                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7857 }
7858
7859 static void vlv_compute_dpll(struct intel_crtc *crtc,
7860                              struct intel_crtc_state *pipe_config)
7861 {
7862         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7863                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7864         if (crtc->pipe != PIPE_A)
7865                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7866
7867         /* DPLL not used with DSI, but still need the rest set up */
7868         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7869                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7870                         DPLL_EXT_BUFFER_ENABLE_VLV;
7871
7872         pipe_config->dpll_hw_state.dpll_md =
7873                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7874 }
7875
7876 static void chv_compute_dpll(struct intel_crtc *crtc,
7877                              struct intel_crtc_state *pipe_config)
7878 {
7879         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7880                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7881         if (crtc->pipe != PIPE_A)
7882                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7883
7884         /* DPLL not used with DSI, but still need the rest set up */
7885         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7886                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7887
7888         pipe_config->dpll_hw_state.dpll_md =
7889                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7890 }
7891
7892 static void vlv_prepare_pll(struct intel_crtc *crtc,
7893                             const struct intel_crtc_state *pipe_config)
7894 {
7895         struct drm_device *dev = crtc->base.dev;
7896         struct drm_i915_private *dev_priv = to_i915(dev);
7897         enum pipe pipe = crtc->pipe;
7898         u32 mdiv;
7899         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7900         u32 coreclk, reg_val;
7901
7902         /* Enable Refclk */
7903         I915_WRITE(DPLL(pipe),
7904                    pipe_config->dpll_hw_state.dpll &
7905                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7906
7907         /* No need to actually set up the DPLL with DSI */
7908         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7909                 return;
7910
7911         mutex_lock(&dev_priv->sb_lock);
7912
7913         bestn = pipe_config->dpll.n;
7914         bestm1 = pipe_config->dpll.m1;
7915         bestm2 = pipe_config->dpll.m2;
7916         bestp1 = pipe_config->dpll.p1;
7917         bestp2 = pipe_config->dpll.p2;
7918
7919         /* See eDP HDMI DPIO driver vbios notes doc */
7920
7921         /* PLL B needs special handling */
7922         if (pipe == PIPE_B)
7923                 vlv_pllb_recal_opamp(dev_priv, pipe);
7924
7925         /* Set up Tx target for periodic Rcomp update */
7926         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7927
7928         /* Disable target IRef on PLL */
7929         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7930         reg_val &= 0x00ffffff;
7931         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7932
7933         /* Disable fast lock */
7934         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7935
7936         /* Set idtafcrecal before PLL is enabled */
7937         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7938         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7939         mdiv |= ((bestn << DPIO_N_SHIFT));
7940         mdiv |= (1 << DPIO_K_SHIFT);
7941
7942         /*
7943          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7944          * but we don't support that).
7945          * Note: don't use the DAC post divider as it seems unstable.
7946          */
7947         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7948         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7949
7950         mdiv |= DPIO_ENABLE_CALIBRATION;
7951         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7952
7953         /* Set HBR and RBR LPF coefficients */
7954         if (pipe_config->port_clock == 162000 ||
7955             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7956             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7957                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7958                                  0x009f0003);
7959         else
7960                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7961                                  0x00d0000f);
7962
7963         if (intel_crtc_has_dp_encoder(pipe_config)) {
7964                 /* Use SSC source */
7965                 if (pipe == PIPE_A)
7966                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7967                                          0x0df40000);
7968                 else
7969                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7970                                          0x0df70000);
7971         } else { /* HDMI or VGA */
7972                 /* Use bend source */
7973                 if (pipe == PIPE_A)
7974                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7975                                          0x0df70000);
7976                 else
7977                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7978                                          0x0df40000);
7979         }
7980
7981         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7982         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7983         if (intel_crtc_has_dp_encoder(crtc->config))
7984                 coreclk |= 0x01000000;
7985         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7986
7987         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7988         mutex_unlock(&dev_priv->sb_lock);
7989 }
7990
7991 static void chv_prepare_pll(struct intel_crtc *crtc,
7992                             const struct intel_crtc_state *pipe_config)
7993 {
7994         struct drm_device *dev = crtc->base.dev;
7995         struct drm_i915_private *dev_priv = to_i915(dev);
7996         enum pipe pipe = crtc->pipe;
7997         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7998         u32 loopfilter, tribuf_calcntr;
7999         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8000         u32 dpio_val;
8001         int vco;
8002
8003         /* Enable Refclk and SSC */
8004         I915_WRITE(DPLL(pipe),
8005                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8006
8007         /* No need to actually set up the DPLL with DSI */
8008         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8009                 return;
8010
8011         bestn = pipe_config->dpll.n;
8012         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8013         bestm1 = pipe_config->dpll.m1;
8014         bestm2 = pipe_config->dpll.m2 >> 22;
8015         bestp1 = pipe_config->dpll.p1;
8016         bestp2 = pipe_config->dpll.p2;
8017         vco = pipe_config->dpll.vco;
8018         dpio_val = 0;
8019         loopfilter = 0;
8020
8021         mutex_lock(&dev_priv->sb_lock);
8022
8023         /* p1 and p2 divider */
8024         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8025                         5 << DPIO_CHV_S1_DIV_SHIFT |
8026                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8027                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8028                         1 << DPIO_CHV_K_DIV_SHIFT);
8029
8030         /* Feedback post-divider - m2 */
8031         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8032
8033         /* Feedback refclk divider - n and m1 */
8034         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8035                         DPIO_CHV_M1_DIV_BY_2 |
8036                         1 << DPIO_CHV_N_DIV_SHIFT);
8037
8038         /* M2 fraction division */
8039         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8040
8041         /* M2 fraction division enable */
8042         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8043         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8044         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8045         if (bestm2_frac)
8046                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8047         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8048
8049         /* Program digital lock detect threshold */
8050         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8051         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8052                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8053         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8054         if (!bestm2_frac)
8055                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8056         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8057
8058         /* Loop filter */
8059         if (vco == 5400000) {
8060                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8061                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8062                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8063                 tribuf_calcntr = 0x9;
8064         } else if (vco <= 6200000) {
8065                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8066                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8067                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8068                 tribuf_calcntr = 0x9;
8069         } else if (vco <= 6480000) {
8070                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8071                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8072                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8073                 tribuf_calcntr = 0x8;
8074         } else {
8075                 /* Not supported. Apply the same limits as in the max case */
8076                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8077                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8078                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8079                 tribuf_calcntr = 0;
8080         }
8081         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8082
8083         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8084         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8085         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8086         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8087
8088         /* AFC Recal */
8089         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8090                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8091                         DPIO_AFC_RECAL);
8092
8093         mutex_unlock(&dev_priv->sb_lock);
8094 }
8095
8096 /**
8097  * vlv_force_pll_on - forcibly enable just the PLL
8098  * @dev_priv: i915 private structure
8099  * @pipe: pipe PLL to enable
8100  * @dpll: PLL configuration
8101  *
8102  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8103  * in cases where we need the PLL enabled even when @pipe is not going to
8104  * be enabled.
8105  */
8106 int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
8107                      const struct dpll *dpll)
8108 {
8109         struct intel_crtc *crtc =
8110                 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
8111         struct intel_crtc_state *pipe_config;
8112
8113         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8114         if (!pipe_config)
8115                 return -ENOMEM;
8116
8117         pipe_config->base.crtc = &crtc->base;
8118         pipe_config->pixel_multiplier = 1;
8119         pipe_config->dpll = *dpll;
8120
8121         if (IS_CHERRYVIEW(to_i915(dev))) {
8122                 chv_compute_dpll(crtc, pipe_config);
8123                 chv_prepare_pll(crtc, pipe_config);
8124                 chv_enable_pll(crtc, pipe_config);
8125         } else {
8126                 vlv_compute_dpll(crtc, pipe_config);
8127                 vlv_prepare_pll(crtc, pipe_config);
8128                 vlv_enable_pll(crtc, pipe_config);
8129         }
8130
8131         kfree(pipe_config);
8132
8133         return 0;
8134 }
8135
8136 /**
8137  * vlv_force_pll_off - forcibly disable just the PLL
8138  * @dev_priv: i915 private structure
8139  * @pipe: pipe PLL to disable
8140  *
8141  * Disable the PLL for @pipe. To be used in cases where we need
8142  * the PLL enabled even when @pipe is not going to be enabled.
8143  */
8144 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
8145 {
8146         if (IS_CHERRYVIEW(to_i915(dev)))
8147                 chv_disable_pll(to_i915(dev), pipe);
8148         else
8149                 vlv_disable_pll(to_i915(dev), pipe);
8150 }
8151
8152 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8153                               struct intel_crtc_state *crtc_state,
8154                               struct dpll *reduced_clock)
8155 {
8156         struct drm_device *dev = crtc->base.dev;
8157         struct drm_i915_private *dev_priv = to_i915(dev);
8158         u32 dpll;
8159         struct dpll *clock = &crtc_state->dpll;
8160
8161         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8162
8163         dpll = DPLL_VGA_MODE_DIS;
8164
8165         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8166                 dpll |= DPLLB_MODE_LVDS;
8167         else
8168                 dpll |= DPLLB_MODE_DAC_SERIAL;
8169
8170         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
8171                 dpll |= (crtc_state->pixel_multiplier - 1)
8172                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8173         }
8174
8175         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8176             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8177                 dpll |= DPLL_SDVO_HIGH_SPEED;
8178
8179         if (intel_crtc_has_dp_encoder(crtc_state))
8180                 dpll |= DPLL_SDVO_HIGH_SPEED;
8181
8182         /* compute bitmask from p1 value */
8183         if (IS_PINEVIEW(dev))
8184                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8185         else {
8186                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8187                 if (IS_G4X(dev_priv) && reduced_clock)
8188                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8189         }
8190         switch (clock->p2) {
8191         case 5:
8192                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8193                 break;
8194         case 7:
8195                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8196                 break;
8197         case 10:
8198                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8199                 break;
8200         case 14:
8201                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8202                 break;
8203         }
8204         if (INTEL_INFO(dev)->gen >= 4)
8205                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8206
8207         if (crtc_state->sdvo_tv_clock)
8208                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8209         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8210                  intel_panel_use_ssc(dev_priv))
8211                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8212         else
8213                 dpll |= PLL_REF_INPUT_DREFCLK;
8214
8215         dpll |= DPLL_VCO_ENABLE;
8216         crtc_state->dpll_hw_state.dpll = dpll;
8217
8218         if (INTEL_INFO(dev)->gen >= 4) {
8219                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8220                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8221                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8222         }
8223 }
8224
8225 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8226                               struct intel_crtc_state *crtc_state,
8227                               struct dpll *reduced_clock)
8228 {
8229         struct drm_device *dev = crtc->base.dev;
8230         struct drm_i915_private *dev_priv = to_i915(dev);
8231         u32 dpll;
8232         struct dpll *clock = &crtc_state->dpll;
8233
8234         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8235
8236         dpll = DPLL_VGA_MODE_DIS;
8237
8238         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8239                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8240         } else {
8241                 if (clock->p1 == 2)
8242                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8243                 else
8244                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8245                 if (clock->p2 == 4)
8246                         dpll |= PLL_P2_DIVIDE_BY_4;
8247         }
8248
8249         if (!IS_I830(dev_priv) &&
8250             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8251                 dpll |= DPLL_DVO_2X_MODE;
8252
8253         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8254             intel_panel_use_ssc(dev_priv))
8255                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8256         else
8257                 dpll |= PLL_REF_INPUT_DREFCLK;
8258
8259         dpll |= DPLL_VCO_ENABLE;
8260         crtc_state->dpll_hw_state.dpll = dpll;
8261 }
8262
8263 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
8264 {
8265         struct drm_device *dev = intel_crtc->base.dev;
8266         struct drm_i915_private *dev_priv = to_i915(dev);
8267         enum pipe pipe = intel_crtc->pipe;
8268         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8269         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
8270         uint32_t crtc_vtotal, crtc_vblank_end;
8271         int vsyncshift = 0;
8272
8273         /* We need to be careful not to changed the adjusted mode, for otherwise
8274          * the hw state checker will get angry at the mismatch. */
8275         crtc_vtotal = adjusted_mode->crtc_vtotal;
8276         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8277
8278         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8279                 /* the chip adds 2 halflines automatically */
8280                 crtc_vtotal -= 1;
8281                 crtc_vblank_end -= 1;
8282
8283                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
8284                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8285                 else
8286                         vsyncshift = adjusted_mode->crtc_hsync_start -
8287                                 adjusted_mode->crtc_htotal / 2;
8288                 if (vsyncshift < 0)
8289                         vsyncshift += adjusted_mode->crtc_htotal;
8290         }
8291
8292         if (INTEL_INFO(dev)->gen > 3)
8293                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8294
8295         I915_WRITE(HTOTAL(cpu_transcoder),
8296                    (adjusted_mode->crtc_hdisplay - 1) |
8297                    ((adjusted_mode->crtc_htotal - 1) << 16));
8298         I915_WRITE(HBLANK(cpu_transcoder),
8299                    (adjusted_mode->crtc_hblank_start - 1) |
8300                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8301         I915_WRITE(HSYNC(cpu_transcoder),
8302                    (adjusted_mode->crtc_hsync_start - 1) |
8303                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8304
8305         I915_WRITE(VTOTAL(cpu_transcoder),
8306                    (adjusted_mode->crtc_vdisplay - 1) |
8307                    ((crtc_vtotal - 1) << 16));
8308         I915_WRITE(VBLANK(cpu_transcoder),
8309                    (adjusted_mode->crtc_vblank_start - 1) |
8310                    ((crtc_vblank_end - 1) << 16));
8311         I915_WRITE(VSYNC(cpu_transcoder),
8312                    (adjusted_mode->crtc_vsync_start - 1) |
8313                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8314
8315         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8316          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8317          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8318          * bits. */
8319         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8320             (pipe == PIPE_B || pipe == PIPE_C))
8321                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8322
8323 }
8324
8325 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
8326 {
8327         struct drm_device *dev = intel_crtc->base.dev;
8328         struct drm_i915_private *dev_priv = to_i915(dev);
8329         enum pipe pipe = intel_crtc->pipe;
8330
8331         /* pipesrc controls the size that is scaled from, which should
8332          * always be the user's requested size.
8333          */
8334         I915_WRITE(PIPESRC(pipe),
8335                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
8336                    (intel_crtc->config->pipe_src_h - 1));
8337 }
8338
8339 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8340                                    struct intel_crtc_state *pipe_config)
8341 {
8342         struct drm_device *dev = crtc->base.dev;
8343         struct drm_i915_private *dev_priv = to_i915(dev);
8344         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8345         uint32_t tmp;
8346
8347         tmp = I915_READ(HTOTAL(cpu_transcoder));
8348         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8349         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8350         tmp = I915_READ(HBLANK(cpu_transcoder));
8351         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
8352         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
8353         tmp = I915_READ(HSYNC(cpu_transcoder));
8354         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8355         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8356
8357         tmp = I915_READ(VTOTAL(cpu_transcoder));
8358         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8359         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8360         tmp = I915_READ(VBLANK(cpu_transcoder));
8361         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
8362         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
8363         tmp = I915_READ(VSYNC(cpu_transcoder));
8364         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8365         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8366
8367         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8368                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8369                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8370                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8371         }
8372 }
8373
8374 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8375                                     struct intel_crtc_state *pipe_config)
8376 {
8377         struct drm_device *dev = crtc->base.dev;
8378         struct drm_i915_private *dev_priv = to_i915(dev);
8379         u32 tmp;
8380
8381         tmp = I915_READ(PIPESRC(crtc->pipe));
8382         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8383         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8384
8385         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8386         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8387 }
8388
8389 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8390                                  struct intel_crtc_state *pipe_config)
8391 {
8392         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8393         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8394         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8395         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8396
8397         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8398         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8399         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8400         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8401
8402         mode->flags = pipe_config->base.adjusted_mode.flags;
8403         mode->type = DRM_MODE_TYPE_DRIVER;
8404
8405         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8406         mode->flags |= pipe_config->base.adjusted_mode.flags;
8407
8408         mode->hsync = drm_mode_hsync(mode);
8409         mode->vrefresh = drm_mode_vrefresh(mode);
8410         drm_mode_set_name(mode);
8411 }
8412
8413 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
8414 {
8415         struct drm_device *dev = intel_crtc->base.dev;
8416         struct drm_i915_private *dev_priv = to_i915(dev);
8417         uint32_t pipeconf;
8418
8419         pipeconf = 0;
8420
8421         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
8422             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
8423                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
8424
8425         if (intel_crtc->config->double_wide)
8426                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8427
8428         /* only g4x and later have fancy bpc/dither controls */
8429         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8430             IS_CHERRYVIEW(dev_priv)) {
8431                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8432                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
8433                         pipeconf |= PIPECONF_DITHER_EN |
8434                                     PIPECONF_DITHER_TYPE_SP;
8435
8436                 switch (intel_crtc->config->pipe_bpp) {
8437                 case 18:
8438                         pipeconf |= PIPECONF_6BPC;
8439                         break;
8440                 case 24:
8441                         pipeconf |= PIPECONF_8BPC;
8442                         break;
8443                 case 30:
8444                         pipeconf |= PIPECONF_10BPC;
8445                         break;
8446                 default:
8447                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8448                         BUG();
8449                 }
8450         }
8451
8452         if (HAS_PIPE_CXSR(dev)) {
8453                 if (intel_crtc->lowfreq_avail) {
8454                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
8455                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
8456                 } else {
8457                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
8458                 }
8459         }
8460
8461         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8462                 if (INTEL_INFO(dev)->gen < 4 ||
8463                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
8464                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8465                 else
8466                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8467         } else
8468                 pipeconf |= PIPECONF_PROGRESSIVE;
8469
8470         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8471              intel_crtc->config->limited_color_range)
8472                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8473
8474         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
8475         POSTING_READ(PIPECONF(intel_crtc->pipe));
8476 }
8477
8478 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8479                                    struct intel_crtc_state *crtc_state)
8480 {
8481         struct drm_device *dev = crtc->base.dev;
8482         struct drm_i915_private *dev_priv = to_i915(dev);
8483         const struct intel_limit *limit;
8484         int refclk = 48000;
8485
8486         memset(&crtc_state->dpll_hw_state, 0,
8487                sizeof(crtc_state->dpll_hw_state));
8488
8489         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8490                 if (intel_panel_use_ssc(dev_priv)) {
8491                         refclk = dev_priv->vbt.lvds_ssc_freq;
8492                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8493                 }
8494
8495                 limit = &intel_limits_i8xx_lvds;
8496         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8497                 limit = &intel_limits_i8xx_dvo;
8498         } else {
8499                 limit = &intel_limits_i8xx_dac;
8500         }
8501
8502         if (!crtc_state->clock_set &&
8503             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8504                                  refclk, NULL, &crtc_state->dpll)) {
8505                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8506                 return -EINVAL;
8507         }
8508
8509         i8xx_compute_dpll(crtc, crtc_state, NULL);
8510
8511         return 0;
8512 }
8513
8514 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8515                                   struct intel_crtc_state *crtc_state)
8516 {
8517         struct drm_device *dev = crtc->base.dev;
8518         struct drm_i915_private *dev_priv = to_i915(dev);
8519         const struct intel_limit *limit;
8520         int refclk = 96000;
8521
8522         memset(&crtc_state->dpll_hw_state, 0,
8523                sizeof(crtc_state->dpll_hw_state));
8524
8525         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8526                 if (intel_panel_use_ssc(dev_priv)) {
8527                         refclk = dev_priv->vbt.lvds_ssc_freq;
8528                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8529                 }
8530
8531                 if (intel_is_dual_link_lvds(dev))
8532                         limit = &intel_limits_g4x_dual_channel_lvds;
8533                 else
8534                         limit = &intel_limits_g4x_single_channel_lvds;
8535         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8536                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8537                 limit = &intel_limits_g4x_hdmi;
8538         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8539                 limit = &intel_limits_g4x_sdvo;
8540         } else {
8541                 /* The option is for other outputs */
8542                 limit = &intel_limits_i9xx_sdvo;
8543         }
8544
8545         if (!crtc_state->clock_set &&
8546             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8547                                 refclk, NULL, &crtc_state->dpll)) {
8548                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8549                 return -EINVAL;
8550         }
8551
8552         i9xx_compute_dpll(crtc, crtc_state, NULL);
8553
8554         return 0;
8555 }
8556
8557 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8558                                   struct intel_crtc_state *crtc_state)
8559 {
8560         struct drm_device *dev = crtc->base.dev;
8561         struct drm_i915_private *dev_priv = to_i915(dev);
8562         const struct intel_limit *limit;
8563         int refclk = 96000;
8564
8565         memset(&crtc_state->dpll_hw_state, 0,
8566                sizeof(crtc_state->dpll_hw_state));
8567
8568         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8569                 if (intel_panel_use_ssc(dev_priv)) {
8570                         refclk = dev_priv->vbt.lvds_ssc_freq;
8571                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8572                 }
8573
8574                 limit = &intel_limits_pineview_lvds;
8575         } else {
8576                 limit = &intel_limits_pineview_sdvo;
8577         }
8578
8579         if (!crtc_state->clock_set &&
8580             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8581                                 refclk, NULL, &crtc_state->dpll)) {
8582                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8583                 return -EINVAL;
8584         }
8585
8586         i9xx_compute_dpll(crtc, crtc_state, NULL);
8587
8588         return 0;
8589 }
8590
8591 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8592                                    struct intel_crtc_state *crtc_state)
8593 {
8594         struct drm_device *dev = crtc->base.dev;
8595         struct drm_i915_private *dev_priv = to_i915(dev);
8596         const struct intel_limit *limit;
8597         int refclk = 96000;
8598
8599         memset(&crtc_state->dpll_hw_state, 0,
8600                sizeof(crtc_state->dpll_hw_state));
8601
8602         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8603                 if (intel_panel_use_ssc(dev_priv)) {
8604                         refclk = dev_priv->vbt.lvds_ssc_freq;
8605                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8606                 }
8607
8608                 limit = &intel_limits_i9xx_lvds;
8609         } else {
8610                 limit = &intel_limits_i9xx_sdvo;
8611         }
8612
8613         if (!crtc_state->clock_set &&
8614             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8615                                  refclk, NULL, &crtc_state->dpll)) {
8616                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8617                 return -EINVAL;
8618         }
8619
8620         i9xx_compute_dpll(crtc, crtc_state, NULL);
8621
8622         return 0;
8623 }
8624
8625 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8626                                   struct intel_crtc_state *crtc_state)
8627 {
8628         int refclk = 100000;
8629         const struct intel_limit *limit = &intel_limits_chv;
8630
8631         memset(&crtc_state->dpll_hw_state, 0,
8632                sizeof(crtc_state->dpll_hw_state));
8633
8634         if (!crtc_state->clock_set &&
8635             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8636                                 refclk, NULL, &crtc_state->dpll)) {
8637                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8638                 return -EINVAL;
8639         }
8640
8641         chv_compute_dpll(crtc, crtc_state);
8642
8643         return 0;
8644 }
8645
8646 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8647                                   struct intel_crtc_state *crtc_state)
8648 {
8649         int refclk = 100000;
8650         const struct intel_limit *limit = &intel_limits_vlv;
8651
8652         memset(&crtc_state->dpll_hw_state, 0,
8653                sizeof(crtc_state->dpll_hw_state));
8654
8655         if (!crtc_state->clock_set &&
8656             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8657                                 refclk, NULL, &crtc_state->dpll)) {
8658                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8659                 return -EINVAL;
8660         }
8661
8662         vlv_compute_dpll(crtc, crtc_state);
8663
8664         return 0;
8665 }
8666
8667 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8668                                  struct intel_crtc_state *pipe_config)
8669 {
8670         struct drm_device *dev = crtc->base.dev;
8671         struct drm_i915_private *dev_priv = to_i915(dev);
8672         uint32_t tmp;
8673
8674         if (INTEL_GEN(dev_priv) <= 3 &&
8675             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
8676                 return;
8677
8678         tmp = I915_READ(PFIT_CONTROL);
8679         if (!(tmp & PFIT_ENABLE))
8680                 return;
8681
8682         /* Check whether the pfit is attached to our pipe. */
8683         if (INTEL_INFO(dev)->gen < 4) {
8684                 if (crtc->pipe != PIPE_B)
8685                         return;
8686         } else {
8687                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8688                         return;
8689         }
8690
8691         pipe_config->gmch_pfit.control = tmp;
8692         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8693 }
8694
8695 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8696                                struct intel_crtc_state *pipe_config)
8697 {
8698         struct drm_device *dev = crtc->base.dev;
8699         struct drm_i915_private *dev_priv = to_i915(dev);
8700         int pipe = pipe_config->cpu_transcoder;
8701         struct dpll clock;
8702         u32 mdiv;
8703         int refclk = 100000;
8704
8705         /* In case of DSI, DPLL will not be used */
8706         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8707                 return;
8708
8709         mutex_lock(&dev_priv->sb_lock);
8710         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8711         mutex_unlock(&dev_priv->sb_lock);
8712
8713         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8714         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8715         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8716         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8717         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8718
8719         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8720 }
8721
8722 static void
8723 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8724                               struct intel_initial_plane_config *plane_config)
8725 {
8726         struct drm_device *dev = crtc->base.dev;
8727         struct drm_i915_private *dev_priv = to_i915(dev);
8728         u32 val, base, offset;
8729         int pipe = crtc->pipe, plane = crtc->plane;
8730         int fourcc, pixel_format;
8731         unsigned int aligned_height;
8732         struct drm_framebuffer *fb;
8733         struct intel_framebuffer *intel_fb;
8734
8735         val = I915_READ(DSPCNTR(plane));
8736         if (!(val & DISPLAY_PLANE_ENABLE))
8737                 return;
8738
8739         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8740         if (!intel_fb) {
8741                 DRM_DEBUG_KMS("failed to alloc fb\n");
8742                 return;
8743         }
8744
8745         fb = &intel_fb->base;
8746
8747         if (INTEL_INFO(dev)->gen >= 4) {
8748                 if (val & DISPPLANE_TILED) {
8749                         plane_config->tiling = I915_TILING_X;
8750                         fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8751                 }
8752         }
8753
8754         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8755         fourcc = i9xx_format_to_fourcc(pixel_format);
8756         fb->pixel_format = fourcc;
8757         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8758
8759         if (INTEL_INFO(dev)->gen >= 4) {
8760                 if (plane_config->tiling)
8761                         offset = I915_READ(DSPTILEOFF(plane));
8762                 else
8763                         offset = I915_READ(DSPLINOFF(plane));
8764                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8765         } else {
8766                 base = I915_READ(DSPADDR(plane));
8767         }
8768         plane_config->base = base;
8769
8770         val = I915_READ(PIPESRC(pipe));
8771         fb->width = ((val >> 16) & 0xfff) + 1;
8772         fb->height = ((val >> 0) & 0xfff) + 1;
8773
8774         val = I915_READ(DSPSTRIDE(pipe));
8775         fb->pitches[0] = val & 0xffffffc0;
8776
8777         aligned_height = intel_fb_align_height(dev, fb->height,
8778                                                fb->pixel_format,
8779                                                fb->modifier[0]);
8780
8781         plane_config->size = fb->pitches[0] * aligned_height;
8782
8783         DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8784                       pipe_name(pipe), plane, fb->width, fb->height,
8785                       fb->bits_per_pixel, base, fb->pitches[0],
8786                       plane_config->size);
8787
8788         plane_config->fb = intel_fb;
8789 }
8790
8791 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8792                                struct intel_crtc_state *pipe_config)
8793 {
8794         struct drm_device *dev = crtc->base.dev;
8795         struct drm_i915_private *dev_priv = to_i915(dev);
8796         int pipe = pipe_config->cpu_transcoder;
8797         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8798         struct dpll clock;
8799         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8800         int refclk = 100000;
8801
8802         /* In case of DSI, DPLL will not be used */
8803         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8804                 return;
8805
8806         mutex_lock(&dev_priv->sb_lock);
8807         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8808         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8809         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8810         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8811         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8812         mutex_unlock(&dev_priv->sb_lock);
8813
8814         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8815         clock.m2 = (pll_dw0 & 0xff) << 22;
8816         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8817                 clock.m2 |= pll_dw2 & 0x3fffff;
8818         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8819         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8820         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8821
8822         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8823 }
8824
8825 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8826                                  struct intel_crtc_state *pipe_config)
8827 {
8828         struct drm_device *dev = crtc->base.dev;
8829         struct drm_i915_private *dev_priv = to_i915(dev);
8830         enum intel_display_power_domain power_domain;
8831         uint32_t tmp;
8832         bool ret;
8833
8834         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8835         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8836                 return false;
8837
8838         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8839         pipe_config->shared_dpll = NULL;
8840
8841         ret = false;
8842
8843         tmp = I915_READ(PIPECONF(crtc->pipe));
8844         if (!(tmp & PIPECONF_ENABLE))
8845                 goto out;
8846
8847         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8848             IS_CHERRYVIEW(dev_priv)) {
8849                 switch (tmp & PIPECONF_BPC_MASK) {
8850                 case PIPECONF_6BPC:
8851                         pipe_config->pipe_bpp = 18;
8852                         break;
8853                 case PIPECONF_8BPC:
8854                         pipe_config->pipe_bpp = 24;
8855                         break;
8856                 case PIPECONF_10BPC:
8857                         pipe_config->pipe_bpp = 30;
8858                         break;
8859                 default:
8860                         break;
8861                 }
8862         }
8863
8864         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8865             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8866                 pipe_config->limited_color_range = true;
8867
8868         if (INTEL_INFO(dev)->gen < 4)
8869                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8870
8871         intel_get_pipe_timings(crtc, pipe_config);
8872         intel_get_pipe_src_size(crtc, pipe_config);
8873
8874         i9xx_get_pfit_config(crtc, pipe_config);
8875
8876         if (INTEL_INFO(dev)->gen >= 4) {
8877                 /* No way to read it out on pipes B and C */
8878                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8879                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8880                 else
8881                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8882                 pipe_config->pixel_multiplier =
8883                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8884                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8885                 pipe_config->dpll_hw_state.dpll_md = tmp;
8886         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8887                    IS_G33(dev_priv)) {
8888                 tmp = I915_READ(DPLL(crtc->pipe));
8889                 pipe_config->pixel_multiplier =
8890                         ((tmp & SDVO_MULTIPLIER_MASK)
8891                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8892         } else {
8893                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8894                  * port and will be fixed up in the encoder->get_config
8895                  * function. */
8896                 pipe_config->pixel_multiplier = 1;
8897         }
8898         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8899         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8900                 /*
8901                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8902                  * on 830. Filter it out here so that we don't
8903                  * report errors due to that.
8904                  */
8905                 if (IS_I830(dev_priv))
8906                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8907
8908                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8909                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8910         } else {
8911                 /* Mask out read-only status bits. */
8912                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8913                                                      DPLL_PORTC_READY_MASK |
8914                                                      DPLL_PORTB_READY_MASK);
8915         }
8916
8917         if (IS_CHERRYVIEW(dev_priv))
8918                 chv_crtc_clock_get(crtc, pipe_config);
8919         else if (IS_VALLEYVIEW(dev_priv))
8920                 vlv_crtc_clock_get(crtc, pipe_config);
8921         else
8922                 i9xx_crtc_clock_get(crtc, pipe_config);
8923
8924         /*
8925          * Normally the dotclock is filled in by the encoder .get_config()
8926          * but in case the pipe is enabled w/o any ports we need a sane
8927          * default.
8928          */
8929         pipe_config->base.adjusted_mode.crtc_clock =
8930                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8931
8932         ret = true;
8933
8934 out:
8935         intel_display_power_put(dev_priv, power_domain);
8936
8937         return ret;
8938 }
8939
8940 static void ironlake_init_pch_refclk(struct drm_device *dev)
8941 {
8942         struct drm_i915_private *dev_priv = to_i915(dev);
8943         struct intel_encoder *encoder;
8944         int i;
8945         u32 val, final;
8946         bool has_lvds = false;
8947         bool has_cpu_edp = false;
8948         bool has_panel = false;
8949         bool has_ck505 = false;
8950         bool can_ssc = false;
8951         bool using_ssc_source = false;
8952
8953         /* We need to take the global config into account */
8954         for_each_intel_encoder(dev, encoder) {
8955                 switch (encoder->type) {
8956                 case INTEL_OUTPUT_LVDS:
8957                         has_panel = true;
8958                         has_lvds = true;
8959                         break;
8960                 case INTEL_OUTPUT_EDP:
8961                         has_panel = true;
8962                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8963                                 has_cpu_edp = true;
8964                         break;
8965                 default:
8966                         break;
8967                 }
8968         }
8969
8970         if (HAS_PCH_IBX(dev_priv)) {
8971                 has_ck505 = dev_priv->vbt.display_clock_mode;
8972                 can_ssc = has_ck505;
8973         } else {
8974                 has_ck505 = false;
8975                 can_ssc = true;
8976         }
8977
8978         /* Check if any DPLLs are using the SSC source */
8979         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8980                 u32 temp = I915_READ(PCH_DPLL(i));
8981
8982                 if (!(temp & DPLL_VCO_ENABLE))
8983                         continue;
8984
8985                 if ((temp & PLL_REF_INPUT_MASK) ==
8986                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8987                         using_ssc_source = true;
8988                         break;
8989                 }
8990         }
8991
8992         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8993                       has_panel, has_lvds, has_ck505, using_ssc_source);
8994
8995         /* Ironlake: try to setup display ref clock before DPLL
8996          * enabling. This is only under driver's control after
8997          * PCH B stepping, previous chipset stepping should be
8998          * ignoring this setting.
8999          */
9000         val = I915_READ(PCH_DREF_CONTROL);
9001
9002         /* As we must carefully and slowly disable/enable each source in turn,
9003          * compute the final state we want first and check if we need to
9004          * make any changes at all.
9005          */
9006         final = val;
9007         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9008         if (has_ck505)
9009                 final |= DREF_NONSPREAD_CK505_ENABLE;
9010         else
9011                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9012
9013         final &= ~DREF_SSC_SOURCE_MASK;
9014         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9015         final &= ~DREF_SSC1_ENABLE;
9016
9017         if (has_panel) {
9018                 final |= DREF_SSC_SOURCE_ENABLE;
9019
9020                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9021                         final |= DREF_SSC1_ENABLE;
9022
9023                 if (has_cpu_edp) {
9024                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9025                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9026                         else
9027                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9028                 } else
9029                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9030         } else if (using_ssc_source) {
9031                 final |= DREF_SSC_SOURCE_ENABLE;
9032                 final |= DREF_SSC1_ENABLE;
9033         }
9034
9035         if (final == val)
9036                 return;
9037
9038         /* Always enable nonspread source */
9039         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9040
9041         if (has_ck505)
9042                 val |= DREF_NONSPREAD_CK505_ENABLE;
9043         else
9044                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9045
9046         if (has_panel) {
9047                 val &= ~DREF_SSC_SOURCE_MASK;
9048                 val |= DREF_SSC_SOURCE_ENABLE;
9049
9050                 /* SSC must be turned on before enabling the CPU output  */
9051                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9052                         DRM_DEBUG_KMS("Using SSC on panel\n");
9053                         val |= DREF_SSC1_ENABLE;
9054                 } else
9055                         val &= ~DREF_SSC1_ENABLE;
9056
9057                 /* Get SSC going before enabling the outputs */
9058                 I915_WRITE(PCH_DREF_CONTROL, val);
9059                 POSTING_READ(PCH_DREF_CONTROL);
9060                 udelay(200);
9061
9062                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9063
9064                 /* Enable CPU source on CPU attached eDP */
9065                 if (has_cpu_edp) {
9066                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9067                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9068                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9069                         } else
9070                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9071                 } else
9072                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9073
9074                 I915_WRITE(PCH_DREF_CONTROL, val);
9075                 POSTING_READ(PCH_DREF_CONTROL);
9076                 udelay(200);
9077         } else {
9078                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9079
9080                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9081
9082                 /* Turn off CPU output */
9083                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9084
9085                 I915_WRITE(PCH_DREF_CONTROL, val);
9086                 POSTING_READ(PCH_DREF_CONTROL);
9087                 udelay(200);
9088
9089                 if (!using_ssc_source) {
9090                         DRM_DEBUG_KMS("Disabling SSC source\n");
9091
9092                         /* Turn off the SSC source */
9093                         val &= ~DREF_SSC_SOURCE_MASK;
9094                         val |= DREF_SSC_SOURCE_DISABLE;
9095
9096                         /* Turn off SSC1 */
9097                         val &= ~DREF_SSC1_ENABLE;
9098
9099                         I915_WRITE(PCH_DREF_CONTROL, val);
9100                         POSTING_READ(PCH_DREF_CONTROL);
9101                         udelay(200);
9102                 }
9103         }
9104
9105         BUG_ON(val != final);
9106 }
9107
9108 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9109 {
9110         uint32_t tmp;
9111
9112         tmp = I915_READ(SOUTH_CHICKEN2);
9113         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9114         I915_WRITE(SOUTH_CHICKEN2, tmp);
9115
9116         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9117                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9118                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9119
9120         tmp = I915_READ(SOUTH_CHICKEN2);
9121         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9122         I915_WRITE(SOUTH_CHICKEN2, tmp);
9123
9124         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9125                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9126                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9127 }
9128
9129 /* WaMPhyProgramming:hsw */
9130 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9131 {
9132         uint32_t tmp;
9133
9134         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9135         tmp &= ~(0xFF << 24);
9136         tmp |= (0x12 << 24);
9137         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9138
9139         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9140         tmp |= (1 << 11);
9141         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9142
9143         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9144         tmp |= (1 << 11);
9145         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9146
9147         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9148         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9149         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9150
9151         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9152         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9153         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9154
9155         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9156         tmp &= ~(7 << 13);
9157         tmp |= (5 << 13);
9158         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9159
9160         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9161         tmp &= ~(7 << 13);
9162         tmp |= (5 << 13);
9163         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9164
9165         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9166         tmp &= ~0xFF;
9167         tmp |= 0x1C;
9168         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9169
9170         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9171         tmp &= ~0xFF;
9172         tmp |= 0x1C;
9173         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9174
9175         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9176         tmp &= ~(0xFF << 16);
9177         tmp |= (0x1C << 16);
9178         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9179
9180         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9181         tmp &= ~(0xFF << 16);
9182         tmp |= (0x1C << 16);
9183         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9184
9185         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9186         tmp |= (1 << 27);
9187         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9188
9189         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9190         tmp |= (1 << 27);
9191         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9192
9193         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9194         tmp &= ~(0xF << 28);
9195         tmp |= (4 << 28);
9196         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9197
9198         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9199         tmp &= ~(0xF << 28);
9200         tmp |= (4 << 28);
9201         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9202 }
9203
9204 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9205  * Programming" based on the parameters passed:
9206  * - Sequence to enable CLKOUT_DP
9207  * - Sequence to enable CLKOUT_DP without spread
9208  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9209  */
9210 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
9211                                  bool with_fdi)
9212 {
9213         struct drm_i915_private *dev_priv = to_i915(dev);
9214         uint32_t reg, tmp;
9215
9216         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9217                 with_spread = true;
9218         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9219             with_fdi, "LP PCH doesn't have FDI\n"))
9220                 with_fdi = false;
9221
9222         mutex_lock(&dev_priv->sb_lock);
9223
9224         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9225         tmp &= ~SBI_SSCCTL_DISABLE;
9226         tmp |= SBI_SSCCTL_PATHALT;
9227         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9228
9229         udelay(24);
9230
9231         if (with_spread) {
9232                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9233                 tmp &= ~SBI_SSCCTL_PATHALT;
9234                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9235
9236                 if (with_fdi) {
9237                         lpt_reset_fdi_mphy(dev_priv);
9238                         lpt_program_fdi_mphy(dev_priv);
9239                 }
9240         }
9241
9242         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9243         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9244         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9245         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9246
9247         mutex_unlock(&dev_priv->sb_lock);
9248 }
9249
9250 /* Sequence to disable CLKOUT_DP */
9251 static void lpt_disable_clkout_dp(struct drm_device *dev)
9252 {
9253         struct drm_i915_private *dev_priv = to_i915(dev);
9254         uint32_t reg, tmp;
9255
9256         mutex_lock(&dev_priv->sb_lock);
9257
9258         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9259         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9260         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9261         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9262
9263         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9264         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9265                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9266                         tmp |= SBI_SSCCTL_PATHALT;
9267                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9268                         udelay(32);
9269                 }
9270                 tmp |= SBI_SSCCTL_DISABLE;
9271                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9272         }
9273
9274         mutex_unlock(&dev_priv->sb_lock);
9275 }
9276
9277 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9278
9279 static const uint16_t sscdivintphase[] = {
9280         [BEND_IDX( 50)] = 0x3B23,
9281         [BEND_IDX( 45)] = 0x3B23,
9282         [BEND_IDX( 40)] = 0x3C23,
9283         [BEND_IDX( 35)] = 0x3C23,
9284         [BEND_IDX( 30)] = 0x3D23,
9285         [BEND_IDX( 25)] = 0x3D23,
9286         [BEND_IDX( 20)] = 0x3E23,
9287         [BEND_IDX( 15)] = 0x3E23,
9288         [BEND_IDX( 10)] = 0x3F23,
9289         [BEND_IDX(  5)] = 0x3F23,
9290         [BEND_IDX(  0)] = 0x0025,
9291         [BEND_IDX( -5)] = 0x0025,
9292         [BEND_IDX(-10)] = 0x0125,
9293         [BEND_IDX(-15)] = 0x0125,
9294         [BEND_IDX(-20)] = 0x0225,
9295         [BEND_IDX(-25)] = 0x0225,
9296         [BEND_IDX(-30)] = 0x0325,
9297         [BEND_IDX(-35)] = 0x0325,
9298         [BEND_IDX(-40)] = 0x0425,
9299         [BEND_IDX(-45)] = 0x0425,
9300         [BEND_IDX(-50)] = 0x0525,
9301 };
9302
9303 /*
9304  * Bend CLKOUT_DP
9305  * steps -50 to 50 inclusive, in steps of 5
9306  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9307  * change in clock period = -(steps / 10) * 5.787 ps
9308  */
9309 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9310 {
9311         uint32_t tmp;
9312         int idx = BEND_IDX(steps);
9313
9314         if (WARN_ON(steps % 5 != 0))
9315                 return;
9316
9317         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9318                 return;
9319
9320         mutex_lock(&dev_priv->sb_lock);
9321
9322         if (steps % 10 != 0)
9323                 tmp = 0xAAAAAAAB;
9324         else
9325                 tmp = 0x00000000;
9326         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9327
9328         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9329         tmp &= 0xffff0000;
9330         tmp |= sscdivintphase[idx];
9331         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9332
9333         mutex_unlock(&dev_priv->sb_lock);
9334 }
9335
9336 #undef BEND_IDX
9337
9338 static void lpt_init_pch_refclk(struct drm_device *dev)
9339 {
9340         struct intel_encoder *encoder;
9341         bool has_vga = false;
9342
9343         for_each_intel_encoder(dev, encoder) {
9344                 switch (encoder->type) {
9345                 case INTEL_OUTPUT_ANALOG:
9346                         has_vga = true;
9347                         break;
9348                 default:
9349                         break;
9350                 }
9351         }
9352
9353         if (has_vga) {
9354                 lpt_bend_clkout_dp(to_i915(dev), 0);
9355                 lpt_enable_clkout_dp(dev, true, true);
9356         } else {
9357                 lpt_disable_clkout_dp(dev);
9358         }
9359 }
9360
9361 /*
9362  * Initialize reference clocks when the driver loads
9363  */
9364 void intel_init_pch_refclk(struct drm_device *dev)
9365 {
9366         struct drm_i915_private *dev_priv = to_i915(dev);
9367
9368         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9369                 ironlake_init_pch_refclk(dev);
9370         else if (HAS_PCH_LPT(dev_priv))
9371                 lpt_init_pch_refclk(dev);
9372 }
9373
9374 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
9375 {
9376         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
9377         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9378         int pipe = intel_crtc->pipe;
9379         uint32_t val;
9380
9381         val = 0;
9382
9383         switch (intel_crtc->config->pipe_bpp) {
9384         case 18:
9385                 val |= PIPECONF_6BPC;
9386                 break;
9387         case 24:
9388                 val |= PIPECONF_8BPC;
9389                 break;
9390         case 30:
9391                 val |= PIPECONF_10BPC;
9392                 break;
9393         case 36:
9394                 val |= PIPECONF_12BPC;
9395                 break;
9396         default:
9397                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9398                 BUG();
9399         }
9400
9401         if (intel_crtc->config->dither)
9402                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9403
9404         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9405                 val |= PIPECONF_INTERLACED_ILK;
9406         else
9407                 val |= PIPECONF_PROGRESSIVE;
9408
9409         if (intel_crtc->config->limited_color_range)
9410                 val |= PIPECONF_COLOR_RANGE_SELECT;
9411
9412         I915_WRITE(PIPECONF(pipe), val);
9413         POSTING_READ(PIPECONF(pipe));
9414 }
9415
9416 static void haswell_set_pipeconf(struct drm_crtc *crtc)
9417 {
9418         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
9419         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9420         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
9421         u32 val = 0;
9422
9423         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
9424                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9425
9426         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9427                 val |= PIPECONF_INTERLACED_ILK;
9428         else
9429                 val |= PIPECONF_PROGRESSIVE;
9430
9431         I915_WRITE(PIPECONF(cpu_transcoder), val);
9432         POSTING_READ(PIPECONF(cpu_transcoder));
9433 }
9434
9435 static void haswell_set_pipemisc(struct drm_crtc *crtc)
9436 {
9437         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
9438         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9439
9440         if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
9441                 u32 val = 0;
9442
9443                 switch (intel_crtc->config->pipe_bpp) {
9444                 case 18:
9445                         val |= PIPEMISC_DITHER_6_BPC;
9446                         break;
9447                 case 24:
9448                         val |= PIPEMISC_DITHER_8_BPC;
9449                         break;
9450                 case 30:
9451                         val |= PIPEMISC_DITHER_10_BPC;
9452                         break;
9453                 case 36:
9454                         val |= PIPEMISC_DITHER_12_BPC;
9455                         break;
9456                 default:
9457                         /* Case prevented by pipe_config_set_bpp. */
9458                         BUG();
9459                 }
9460
9461                 if (intel_crtc->config->dither)
9462                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9463
9464                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
9465         }
9466 }
9467
9468 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9469 {
9470         /*
9471          * Account for spread spectrum to avoid
9472          * oversubscribing the link. Max center spread
9473          * is 2.5%; use 5% for safety's sake.
9474          */
9475         u32 bps = target_clock * bpp * 21 / 20;
9476         return DIV_ROUND_UP(bps, link_bw * 8);
9477 }
9478
9479 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9480 {
9481         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9482 }
9483
9484 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
9485                                   struct intel_crtc_state *crtc_state,
9486                                   struct dpll *reduced_clock)
9487 {
9488         struct drm_crtc *crtc = &intel_crtc->base;
9489         struct drm_device *dev = crtc->dev;
9490         struct drm_i915_private *dev_priv = to_i915(dev);
9491         u32 dpll, fp, fp2;
9492         int factor;
9493
9494         /* Enable autotuning of the PLL clock (if permissible) */
9495         factor = 21;
9496         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9497                 if ((intel_panel_use_ssc(dev_priv) &&
9498                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9499                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
9500                         factor = 25;
9501         } else if (crtc_state->sdvo_tv_clock)
9502                 factor = 20;
9503
9504         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9505
9506         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9507                 fp |= FP_CB_TUNE;
9508
9509         if (reduced_clock) {
9510                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9511
9512                 if (reduced_clock->m < factor * reduced_clock->n)
9513                         fp2 |= FP_CB_TUNE;
9514         } else {
9515                 fp2 = fp;
9516         }
9517
9518         dpll = 0;
9519
9520         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9521                 dpll |= DPLLB_MODE_LVDS;
9522         else
9523                 dpll |= DPLLB_MODE_DAC_SERIAL;
9524
9525         dpll |= (crtc_state->pixel_multiplier - 1)
9526                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9527
9528         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9529             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9530                 dpll |= DPLL_SDVO_HIGH_SPEED;
9531
9532         if (intel_crtc_has_dp_encoder(crtc_state))
9533                 dpll |= DPLL_SDVO_HIGH_SPEED;
9534
9535         /*
9536          * The high speed IO clock is only really required for
9537          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9538          * possible to share the DPLL between CRT and HDMI. Enabling
9539          * the clock needlessly does no real harm, except use up a
9540          * bit of power potentially.
9541          *
9542          * We'll limit this to IVB with 3 pipes, since it has only two
9543          * DPLLs and so DPLL sharing is the only way to get three pipes
9544          * driving PCH ports at the same time. On SNB we could do this,
9545          * and potentially avoid enabling the second DPLL, but it's not
9546          * clear if it''s a win or loss power wise. No point in doing
9547          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9548          */
9549         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9550             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9551                 dpll |= DPLL_SDVO_HIGH_SPEED;
9552
9553         /* compute bitmask from p1 value */
9554         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9555         /* also FPA1 */
9556         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9557
9558         switch (crtc_state->dpll.p2) {
9559         case 5:
9560                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9561                 break;
9562         case 7:
9563                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9564                 break;
9565         case 10:
9566                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9567                 break;
9568         case 14:
9569                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9570                 break;
9571         }
9572
9573         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9574             intel_panel_use_ssc(dev_priv))
9575                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9576         else
9577                 dpll |= PLL_REF_INPUT_DREFCLK;
9578
9579         dpll |= DPLL_VCO_ENABLE;
9580
9581         crtc_state->dpll_hw_state.dpll = dpll;
9582         crtc_state->dpll_hw_state.fp0 = fp;
9583         crtc_state->dpll_hw_state.fp1 = fp2;
9584 }
9585
9586 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9587                                        struct intel_crtc_state *crtc_state)
9588 {
9589         struct drm_device *dev = crtc->base.dev;
9590         struct drm_i915_private *dev_priv = to_i915(dev);
9591         struct dpll reduced_clock;
9592         bool has_reduced_clock = false;
9593         struct intel_shared_dpll *pll;
9594         const struct intel_limit *limit;
9595         int refclk = 120000;
9596
9597         memset(&crtc_state->dpll_hw_state, 0,
9598                sizeof(crtc_state->dpll_hw_state));
9599
9600         crtc->lowfreq_avail = false;
9601
9602         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9603         if (!crtc_state->has_pch_encoder)
9604                 return 0;
9605
9606         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9607                 if (intel_panel_use_ssc(dev_priv)) {
9608                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9609                                       dev_priv->vbt.lvds_ssc_freq);
9610                         refclk = dev_priv->vbt.lvds_ssc_freq;
9611                 }
9612
9613                 if (intel_is_dual_link_lvds(dev)) {
9614                         if (refclk == 100000)
9615                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9616                         else
9617                                 limit = &intel_limits_ironlake_dual_lvds;
9618                 } else {
9619                         if (refclk == 100000)
9620                                 limit = &intel_limits_ironlake_single_lvds_100m;
9621                         else
9622                                 limit = &intel_limits_ironlake_single_lvds;
9623                 }
9624         } else {
9625                 limit = &intel_limits_ironlake_dac;
9626         }
9627
9628         if (!crtc_state->clock_set &&
9629             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9630                                 refclk, NULL, &crtc_state->dpll)) {
9631                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9632                 return -EINVAL;
9633         }
9634
9635         ironlake_compute_dpll(crtc, crtc_state,
9636                               has_reduced_clock ? &reduced_clock : NULL);
9637
9638         pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
9639         if (pll == NULL) {
9640                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9641                                  pipe_name(crtc->pipe));
9642                 return -EINVAL;
9643         }
9644
9645         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9646             has_reduced_clock)
9647                 crtc->lowfreq_avail = true;
9648
9649         return 0;
9650 }
9651
9652 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9653                                          struct intel_link_m_n *m_n)
9654 {
9655         struct drm_device *dev = crtc->base.dev;
9656         struct drm_i915_private *dev_priv = to_i915(dev);
9657         enum pipe pipe = crtc->pipe;
9658
9659         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9660         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9661         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9662                 & ~TU_SIZE_MASK;
9663         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9664         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9665                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9666 }
9667
9668 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9669                                          enum transcoder transcoder,
9670                                          struct intel_link_m_n *m_n,
9671                                          struct intel_link_m_n *m2_n2)
9672 {
9673         struct drm_device *dev = crtc->base.dev;
9674         struct drm_i915_private *dev_priv = to_i915(dev);
9675         enum pipe pipe = crtc->pipe;
9676
9677         if (INTEL_INFO(dev)->gen >= 5) {
9678                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9679                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9680                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9681                         & ~TU_SIZE_MASK;
9682                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9683                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9684                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9685                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9686                  * gen < 8) and if DRRS is supported (to make sure the
9687                  * registers are not unnecessarily read).
9688                  */
9689                 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9690                         crtc->config->has_drrs) {
9691                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9692                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9693                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9694                                         & ~TU_SIZE_MASK;
9695                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9696                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9697                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9698                 }
9699         } else {
9700                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9701                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9702                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9703                         & ~TU_SIZE_MASK;
9704                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9705                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9706                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9707         }
9708 }
9709
9710 void intel_dp_get_m_n(struct intel_crtc *crtc,
9711                       struct intel_crtc_state *pipe_config)
9712 {
9713         if (pipe_config->has_pch_encoder)
9714                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9715         else
9716                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9717                                              &pipe_config->dp_m_n,
9718                                              &pipe_config->dp_m2_n2);
9719 }
9720
9721 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9722                                         struct intel_crtc_state *pipe_config)
9723 {
9724         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9725                                      &pipe_config->fdi_m_n, NULL);
9726 }
9727
9728 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9729                                     struct intel_crtc_state *pipe_config)
9730 {
9731         struct drm_device *dev = crtc->base.dev;
9732         struct drm_i915_private *dev_priv = to_i915(dev);
9733         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9734         uint32_t ps_ctrl = 0;
9735         int id = -1;
9736         int i;
9737
9738         /* find scaler attached to this pipe */
9739         for (i = 0; i < crtc->num_scalers; i++) {
9740                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9741                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9742                         id = i;
9743                         pipe_config->pch_pfit.enabled = true;
9744                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9745                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9746                         break;
9747                 }
9748         }
9749
9750         scaler_state->scaler_id = id;
9751         if (id >= 0) {
9752                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9753         } else {
9754                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9755         }
9756 }
9757
9758 static void
9759 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9760                                  struct intel_initial_plane_config *plane_config)
9761 {
9762         struct drm_device *dev = crtc->base.dev;
9763         struct drm_i915_private *dev_priv = to_i915(dev);
9764         u32 val, base, offset, stride_mult, tiling;
9765         int pipe = crtc->pipe;
9766         int fourcc, pixel_format;
9767         unsigned int aligned_height;
9768         struct drm_framebuffer *fb;
9769         struct intel_framebuffer *intel_fb;
9770
9771         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9772         if (!intel_fb) {
9773                 DRM_DEBUG_KMS("failed to alloc fb\n");
9774                 return;
9775         }
9776
9777         fb = &intel_fb->base;
9778
9779         val = I915_READ(PLANE_CTL(pipe, 0));
9780         if (!(val & PLANE_CTL_ENABLE))
9781                 goto error;
9782
9783         pixel_format = val & PLANE_CTL_FORMAT_MASK;
9784         fourcc = skl_format_to_fourcc(pixel_format,
9785                                       val & PLANE_CTL_ORDER_RGBX,
9786                                       val & PLANE_CTL_ALPHA_MASK);
9787         fb->pixel_format = fourcc;
9788         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9789
9790         tiling = val & PLANE_CTL_TILED_MASK;
9791         switch (tiling) {
9792         case PLANE_CTL_TILED_LINEAR:
9793                 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9794                 break;
9795         case PLANE_CTL_TILED_X:
9796                 plane_config->tiling = I915_TILING_X;
9797                 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9798                 break;
9799         case PLANE_CTL_TILED_Y:
9800                 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9801                 break;
9802         case PLANE_CTL_TILED_YF:
9803                 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9804                 break;
9805         default:
9806                 MISSING_CASE(tiling);
9807                 goto error;
9808         }
9809
9810         base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9811         plane_config->base = base;
9812
9813         offset = I915_READ(PLANE_OFFSET(pipe, 0));
9814
9815         val = I915_READ(PLANE_SIZE(pipe, 0));
9816         fb->height = ((val >> 16) & 0xfff) + 1;
9817         fb->width = ((val >> 0) & 0x1fff) + 1;
9818
9819         val = I915_READ(PLANE_STRIDE(pipe, 0));
9820         stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9821                                                 fb->pixel_format);
9822         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9823
9824         aligned_height = intel_fb_align_height(dev, fb->height,
9825                                                fb->pixel_format,
9826                                                fb->modifier[0]);
9827
9828         plane_config->size = fb->pitches[0] * aligned_height;
9829
9830         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9831                       pipe_name(pipe), fb->width, fb->height,
9832                       fb->bits_per_pixel, base, fb->pitches[0],
9833                       plane_config->size);
9834
9835         plane_config->fb = intel_fb;
9836         return;
9837
9838 error:
9839         kfree(intel_fb);
9840 }
9841
9842 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9843                                      struct intel_crtc_state *pipe_config)
9844 {
9845         struct drm_device *dev = crtc->base.dev;
9846         struct drm_i915_private *dev_priv = to_i915(dev);
9847         uint32_t tmp;
9848
9849         tmp = I915_READ(PF_CTL(crtc->pipe));
9850
9851         if (tmp & PF_ENABLE) {
9852                 pipe_config->pch_pfit.enabled = true;
9853                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9854                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9855
9856                 /* We currently do not free assignements of panel fitters on
9857                  * ivb/hsw (since we don't use the higher upscaling modes which
9858                  * differentiates them) so just WARN about this case for now. */
9859                 if (IS_GEN7(dev_priv)) {
9860                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9861                                 PF_PIPE_SEL_IVB(crtc->pipe));
9862                 }
9863         }
9864 }
9865
9866 static void
9867 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9868                                   struct intel_initial_plane_config *plane_config)
9869 {
9870         struct drm_device *dev = crtc->base.dev;
9871         struct drm_i915_private *dev_priv = to_i915(dev);
9872         u32 val, base, offset;
9873         int pipe = crtc->pipe;
9874         int fourcc, pixel_format;
9875         unsigned int aligned_height;
9876         struct drm_framebuffer *fb;
9877         struct intel_framebuffer *intel_fb;
9878
9879         val = I915_READ(DSPCNTR(pipe));
9880         if (!(val & DISPLAY_PLANE_ENABLE))
9881                 return;
9882
9883         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9884         if (!intel_fb) {
9885                 DRM_DEBUG_KMS("failed to alloc fb\n");
9886                 return;
9887         }
9888
9889         fb = &intel_fb->base;
9890
9891         if (INTEL_INFO(dev)->gen >= 4) {
9892                 if (val & DISPPLANE_TILED) {
9893                         plane_config->tiling = I915_TILING_X;
9894                         fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9895                 }
9896         }
9897
9898         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9899         fourcc = i9xx_format_to_fourcc(pixel_format);
9900         fb->pixel_format = fourcc;
9901         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9902
9903         base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9904         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9905                 offset = I915_READ(DSPOFFSET(pipe));
9906         } else {
9907                 if (plane_config->tiling)
9908                         offset = I915_READ(DSPTILEOFF(pipe));
9909                 else
9910                         offset = I915_READ(DSPLINOFF(pipe));
9911         }
9912         plane_config->base = base;
9913
9914         val = I915_READ(PIPESRC(pipe));
9915         fb->width = ((val >> 16) & 0xfff) + 1;
9916         fb->height = ((val >> 0) & 0xfff) + 1;
9917
9918         val = I915_READ(DSPSTRIDE(pipe));
9919         fb->pitches[0] = val & 0xffffffc0;
9920
9921         aligned_height = intel_fb_align_height(dev, fb->height,
9922                                                fb->pixel_format,
9923                                                fb->modifier[0]);
9924
9925         plane_config->size = fb->pitches[0] * aligned_height;
9926
9927         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9928                       pipe_name(pipe), fb->width, fb->height,
9929                       fb->bits_per_pixel, base, fb->pitches[0],
9930                       plane_config->size);
9931
9932         plane_config->fb = intel_fb;
9933 }
9934
9935 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9936                                      struct intel_crtc_state *pipe_config)
9937 {
9938         struct drm_device *dev = crtc->base.dev;
9939         struct drm_i915_private *dev_priv = to_i915(dev);
9940         enum intel_display_power_domain power_domain;
9941         uint32_t tmp;
9942         bool ret;
9943
9944         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9945         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9946                 return false;
9947
9948         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9949         pipe_config->shared_dpll = NULL;
9950
9951         ret = false;
9952         tmp = I915_READ(PIPECONF(crtc->pipe));
9953         if (!(tmp & PIPECONF_ENABLE))
9954                 goto out;
9955
9956         switch (tmp & PIPECONF_BPC_MASK) {
9957         case PIPECONF_6BPC:
9958                 pipe_config->pipe_bpp = 18;
9959                 break;
9960         case PIPECONF_8BPC:
9961                 pipe_config->pipe_bpp = 24;
9962                 break;
9963         case PIPECONF_10BPC:
9964                 pipe_config->pipe_bpp = 30;
9965                 break;
9966         case PIPECONF_12BPC:
9967                 pipe_config->pipe_bpp = 36;
9968                 break;
9969         default:
9970                 break;
9971         }
9972
9973         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9974                 pipe_config->limited_color_range = true;
9975
9976         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9977                 struct intel_shared_dpll *pll;
9978                 enum intel_dpll_id pll_id;
9979
9980                 pipe_config->has_pch_encoder = true;
9981
9982                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9983                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9984                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9985
9986                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9987
9988                 if (HAS_PCH_IBX(dev_priv)) {
9989                         /*
9990                          * The pipe->pch transcoder and pch transcoder->pll
9991                          * mapping is fixed.
9992                          */
9993                         pll_id = (enum intel_dpll_id) crtc->pipe;
9994                 } else {
9995                         tmp = I915_READ(PCH_DPLL_SEL);
9996                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9997                                 pll_id = DPLL_ID_PCH_PLL_B;
9998                         else
9999                                 pll_id= DPLL_ID_PCH_PLL_A;
10000                 }
10001
10002                 pipe_config->shared_dpll =
10003                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10004                 pll = pipe_config->shared_dpll;
10005
10006                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10007                                                  &pipe_config->dpll_hw_state));
10008
10009                 tmp = pipe_config->dpll_hw_state.dpll;
10010                 pipe_config->pixel_multiplier =
10011                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10012                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10013
10014                 ironlake_pch_clock_get(crtc, pipe_config);
10015         } else {
10016                 pipe_config->pixel_multiplier = 1;
10017         }
10018
10019         intel_get_pipe_timings(crtc, pipe_config);
10020         intel_get_pipe_src_size(crtc, pipe_config);
10021
10022         ironlake_get_pfit_config(crtc, pipe_config);
10023
10024         ret = true;
10025
10026 out:
10027         intel_display_power_put(dev_priv, power_domain);
10028
10029         return ret;
10030 }
10031
10032 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
10033 {
10034         struct drm_device *dev = &dev_priv->drm;
10035         struct intel_crtc *crtc;
10036
10037         for_each_intel_crtc(dev, crtc)
10038                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
10039                      pipe_name(crtc->pipe));
10040
10041         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
10042         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
10043         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
10044         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
10045         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
10046         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
10047              "CPU PWM1 enabled\n");
10048         if (IS_HASWELL(dev_priv))
10049                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
10050                      "CPU PWM2 enabled\n");
10051         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
10052              "PCH PWM1 enabled\n");
10053         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
10054              "Utility pin enabled\n");
10055         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
10056
10057         /*
10058          * In theory we can still leave IRQs enabled, as long as only the HPD
10059          * interrupts remain enabled. We used to check for that, but since it's
10060          * gen-specific and since we only disable LCPLL after we fully disable
10061          * the interrupts, the check below should be enough.
10062          */
10063         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
10064 }
10065
10066 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
10067 {
10068         if (IS_HASWELL(dev_priv))
10069                 return I915_READ(D_COMP_HSW);
10070         else
10071                 return I915_READ(D_COMP_BDW);
10072 }
10073
10074 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
10075 {
10076         if (IS_HASWELL(dev_priv)) {
10077                 mutex_lock(&dev_priv->rps.hw_lock);
10078                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
10079                                             val))
10080                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
10081                 mutex_unlock(&dev_priv->rps.hw_lock);
10082         } else {
10083                 I915_WRITE(D_COMP_BDW, val);
10084                 POSTING_READ(D_COMP_BDW);
10085         }
10086 }
10087
10088 /*
10089  * This function implements pieces of two sequences from BSpec:
10090  * - Sequence for display software to disable LCPLL
10091  * - Sequence for display software to allow package C8+
10092  * The steps implemented here are just the steps that actually touch the LCPLL
10093  * register. Callers should take care of disabling all the display engine
10094  * functions, doing the mode unset, fixing interrupts, etc.
10095  */
10096 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
10097                               bool switch_to_fclk, bool allow_power_down)
10098 {
10099         uint32_t val;
10100
10101         assert_can_disable_lcpll(dev_priv);
10102
10103         val = I915_READ(LCPLL_CTL);
10104
10105         if (switch_to_fclk) {
10106                 val |= LCPLL_CD_SOURCE_FCLK;
10107                 I915_WRITE(LCPLL_CTL, val);
10108
10109                 if (wait_for_us(I915_READ(LCPLL_CTL) &
10110                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
10111                         DRM_ERROR("Switching to FCLK failed\n");
10112
10113                 val = I915_READ(LCPLL_CTL);
10114         }
10115
10116         val |= LCPLL_PLL_DISABLE;
10117         I915_WRITE(LCPLL_CTL, val);
10118         POSTING_READ(LCPLL_CTL);
10119
10120         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
10121                 DRM_ERROR("LCPLL still locked\n");
10122
10123         val = hsw_read_dcomp(dev_priv);
10124         val |= D_COMP_COMP_DISABLE;
10125         hsw_write_dcomp(dev_priv, val);
10126         ndelay(100);
10127
10128         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
10129                      1))
10130                 DRM_ERROR("D_COMP RCOMP still in progress\n");
10131
10132         if (allow_power_down) {
10133                 val = I915_READ(LCPLL_CTL);
10134                 val |= LCPLL_POWER_DOWN_ALLOW;
10135                 I915_WRITE(LCPLL_CTL, val);
10136                 POSTING_READ(LCPLL_CTL);
10137         }
10138 }
10139
10140 /*
10141  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
10142  * source.
10143  */
10144 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
10145 {
10146         uint32_t val;
10147
10148         val = I915_READ(LCPLL_CTL);
10149
10150         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
10151                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
10152                 return;
10153
10154         /*
10155          * Make sure we're not on PC8 state before disabling PC8, otherwise
10156          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
10157          */
10158         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
10159
10160         if (val & LCPLL_POWER_DOWN_ALLOW) {
10161                 val &= ~LCPLL_POWER_DOWN_ALLOW;
10162                 I915_WRITE(LCPLL_CTL, val);
10163                 POSTING_READ(LCPLL_CTL);
10164         }
10165
10166         val = hsw_read_dcomp(dev_priv);
10167         val |= D_COMP_COMP_FORCE;
10168         val &= ~D_COMP_COMP_DISABLE;
10169         hsw_write_dcomp(dev_priv, val);
10170
10171         val = I915_READ(LCPLL_CTL);
10172         val &= ~LCPLL_PLL_DISABLE;
10173         I915_WRITE(LCPLL_CTL, val);
10174
10175         if (intel_wait_for_register(dev_priv,
10176                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
10177                                     5))
10178                 DRM_ERROR("LCPLL not locked yet\n");
10179
10180         if (val & LCPLL_CD_SOURCE_FCLK) {
10181                 val = I915_READ(LCPLL_CTL);
10182                 val &= ~LCPLL_CD_SOURCE_FCLK;
10183                 I915_WRITE(LCPLL_CTL, val);
10184
10185                 if (wait_for_us((I915_READ(LCPLL_CTL) &
10186                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
10187                         DRM_ERROR("Switching back to LCPLL failed\n");
10188         }
10189
10190         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
10191         intel_update_cdclk(&dev_priv->drm);
10192 }
10193
10194 /*
10195  * Package states C8 and deeper are really deep PC states that can only be
10196  * reached when all the devices on the system allow it, so even if the graphics
10197  * device allows PC8+, it doesn't mean the system will actually get to these
10198  * states. Our driver only allows PC8+ when going into runtime PM.
10199  *
10200  * The requirements for PC8+ are that all the outputs are disabled, the power
10201  * well is disabled and most interrupts are disabled, and these are also
10202  * requirements for runtime PM. When these conditions are met, we manually do
10203  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
10204  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
10205  * hang the machine.
10206  *
10207  * When we really reach PC8 or deeper states (not just when we allow it) we lose
10208  * the state of some registers, so when we come back from PC8+ we need to
10209  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
10210  * need to take care of the registers kept by RC6. Notice that this happens even
10211  * if we don't put the device in PCI D3 state (which is what currently happens
10212  * because of the runtime PM support).
10213  *
10214  * For more, read "Display Sequences for Package C8" on the hardware
10215  * documentation.
10216  */
10217 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
10218 {
10219         struct drm_device *dev = &dev_priv->drm;
10220         uint32_t val;
10221
10222         DRM_DEBUG_KMS("Enabling package C8+\n");
10223
10224         if (HAS_PCH_LPT_LP(dev_priv)) {
10225                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
10226                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
10227                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
10228         }
10229
10230         lpt_disable_clkout_dp(dev);
10231         hsw_disable_lcpll(dev_priv, true, true);
10232 }
10233
10234 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
10235 {
10236         struct drm_device *dev = &dev_priv->drm;
10237         uint32_t val;
10238
10239         DRM_DEBUG_KMS("Disabling package C8+\n");
10240
10241         hsw_restore_lcpll(dev_priv);
10242         lpt_init_pch_refclk(dev);
10243
10244         if (HAS_PCH_LPT_LP(dev_priv)) {
10245                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
10246                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
10247                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
10248         }
10249 }
10250
10251 static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
10252 {
10253         struct drm_device *dev = old_state->dev;
10254         struct intel_atomic_state *old_intel_state =
10255                 to_intel_atomic_state(old_state);
10256         unsigned int req_cdclk = old_intel_state->dev_cdclk;
10257
10258         bxt_set_cdclk(to_i915(dev), req_cdclk);
10259 }
10260
10261 /* compute the max rate for new configuration */
10262 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
10263 {
10264         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
10265         struct drm_i915_private *dev_priv = to_i915(state->dev);
10266         struct drm_crtc *crtc;
10267         struct drm_crtc_state *cstate;
10268         struct intel_crtc_state *crtc_state;
10269         unsigned max_pixel_rate = 0, i;
10270         enum pipe pipe;
10271
10272         memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
10273                sizeof(intel_state->min_pixclk));
10274
10275         for_each_crtc_in_state(state, crtc, cstate, i) {
10276                 int pixel_rate;
10277
10278                 crtc_state = to_intel_crtc_state(cstate);
10279                 if (!crtc_state->base.enable) {
10280                         intel_state->min_pixclk[i] = 0;
10281                         continue;
10282                 }
10283
10284                 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
10285
10286                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
10287                 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
10288                         pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
10289
10290                 intel_state->min_pixclk[i] = pixel_rate;
10291         }
10292
10293         for_each_pipe(dev_priv, pipe)
10294                 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
10295
10296         return max_pixel_rate;
10297 }
10298
10299 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
10300 {
10301         struct drm_i915_private *dev_priv = to_i915(dev);
10302         uint32_t val, data;
10303         int ret;
10304
10305         if (WARN((I915_READ(LCPLL_CTL) &
10306                   (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
10307                    LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
10308                    LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
10309                    LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
10310                  "trying to change cdclk frequency with cdclk not enabled\n"))
10311                 return;
10312
10313         mutex_lock(&dev_priv->rps.hw_lock);
10314         ret = sandybridge_pcode_write(dev_priv,
10315                                       BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
10316         mutex_unlock(&dev_priv->rps.hw_lock);
10317         if (ret) {
10318                 DRM_ERROR("failed to inform pcode about cdclk change\n");
10319                 return;
10320         }
10321
10322         val = I915_READ(LCPLL_CTL);
10323         val |= LCPLL_CD_SOURCE_FCLK;
10324         I915_WRITE(LCPLL_CTL, val);
10325
10326         if (wait_for_us(I915_READ(LCPLL_CTL) &
10327                         LCPLL_CD_SOURCE_FCLK_DONE, 1))
10328                 DRM_ERROR("Switching to FCLK failed\n");
10329
10330         val = I915_READ(LCPLL_CTL);
10331         val &= ~LCPLL_CLK_FREQ_MASK;
10332
10333         switch (cdclk) {
10334         case 450000:
10335                 val |= LCPLL_CLK_FREQ_450;
10336                 data = 0;
10337                 break;
10338         case 540000:
10339                 val |= LCPLL_CLK_FREQ_54O_BDW;
10340                 data = 1;
10341                 break;
10342         case 337500:
10343                 val |= LCPLL_CLK_FREQ_337_5_BDW;
10344                 data = 2;
10345                 break;
10346         case 675000:
10347                 val |= LCPLL_CLK_FREQ_675_BDW;
10348                 data = 3;
10349                 break;
10350         default:
10351                 WARN(1, "invalid cdclk frequency\n");
10352                 return;
10353         }
10354
10355         I915_WRITE(LCPLL_CTL, val);
10356
10357         val = I915_READ(LCPLL_CTL);
10358         val &= ~LCPLL_CD_SOURCE_FCLK;
10359         I915_WRITE(LCPLL_CTL, val);
10360
10361         if (wait_for_us((I915_READ(LCPLL_CTL) &
10362                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
10363                 DRM_ERROR("Switching back to LCPLL failed\n");
10364
10365         mutex_lock(&dev_priv->rps.hw_lock);
10366         sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
10367         mutex_unlock(&dev_priv->rps.hw_lock);
10368
10369         I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
10370
10371         intel_update_cdclk(dev);
10372
10373         WARN(cdclk != dev_priv->cdclk_freq,
10374              "cdclk requested %d kHz but got %d kHz\n",
10375              cdclk, dev_priv->cdclk_freq);
10376 }
10377
10378 static int broadwell_calc_cdclk(int max_pixclk)
10379 {
10380         if (max_pixclk > 540000)
10381                 return 675000;
10382         else if (max_pixclk > 450000)
10383                 return 540000;
10384         else if (max_pixclk > 337500)
10385                 return 450000;
10386         else
10387                 return 337500;
10388 }
10389
10390 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
10391 {
10392         struct drm_i915_private *dev_priv = to_i915(state->dev);
10393         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
10394         int max_pixclk = ilk_max_pixel_rate(state);
10395         int cdclk;
10396
10397         /*
10398          * FIXME should also account for plane ratio
10399          * once 64bpp pixel formats are supported.
10400          */
10401         cdclk = broadwell_calc_cdclk(max_pixclk);
10402
10403         if (cdclk > dev_priv->max_cdclk_freq) {
10404                 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
10405                               cdclk, dev_priv->max_cdclk_freq);
10406                 return -EINVAL;
10407         }
10408
10409         intel_state->cdclk = intel_state->dev_cdclk = cdclk;
10410         if (!intel_state->active_crtcs)
10411                 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
10412
10413         return 0;
10414 }
10415
10416 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
10417 {
10418         struct drm_device *dev = old_state->dev;
10419         struct intel_atomic_state *old_intel_state =
10420                 to_intel_atomic_state(old_state);
10421         unsigned req_cdclk = old_intel_state->dev_cdclk;
10422
10423         broadwell_set_cdclk(dev, req_cdclk);
10424 }
10425
10426 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
10427 {
10428         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
10429         struct drm_i915_private *dev_priv = to_i915(state->dev);
10430         const int max_pixclk = ilk_max_pixel_rate(state);
10431         int vco = intel_state->cdclk_pll_vco;
10432         int cdclk;
10433
10434         /*
10435          * FIXME should also account for plane ratio
10436          * once 64bpp pixel formats are supported.
10437          */
10438         cdclk = skl_calc_cdclk(max_pixclk, vco);
10439
10440         /*
10441          * FIXME move the cdclk caclulation to
10442          * compute_config() so we can fail gracegully.
10443          */
10444         if (cdclk > dev_priv->max_cdclk_freq) {
10445                 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
10446                           cdclk, dev_priv->max_cdclk_freq);
10447                 cdclk = dev_priv->max_cdclk_freq;
10448         }
10449
10450         intel_state->cdclk = intel_state->dev_cdclk = cdclk;
10451         if (!intel_state->active_crtcs)
10452                 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
10453
10454         return 0;
10455 }
10456
10457 static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
10458 {
10459         struct drm_i915_private *dev_priv = to_i915(old_state->dev);
10460         struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
10461         unsigned int req_cdclk = intel_state->dev_cdclk;
10462         unsigned int req_vco = intel_state->cdclk_pll_vco;
10463
10464         skl_set_cdclk(dev_priv, req_cdclk, req_vco);
10465 }
10466
10467 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10468                                       struct intel_crtc_state *crtc_state)
10469 {
10470         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
10471                 if (!intel_ddi_pll_select(crtc, crtc_state))
10472                         return -EINVAL;
10473         }
10474
10475         crtc->lowfreq_avail = false;
10476
10477         return 0;
10478 }
10479
10480 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10481                                 enum port port,
10482                                 struct intel_crtc_state *pipe_config)
10483 {
10484         enum intel_dpll_id id;
10485
10486         switch (port) {
10487         case PORT_A:
10488                 id = DPLL_ID_SKL_DPLL0;
10489                 break;
10490         case PORT_B:
10491                 id = DPLL_ID_SKL_DPLL1;
10492                 break;
10493         case PORT_C:
10494                 id = DPLL_ID_SKL_DPLL2;
10495                 break;
10496         default:
10497                 DRM_ERROR("Incorrect port type\n");
10498                 return;
10499         }
10500
10501         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10502 }
10503
10504 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10505                                 enum port port,
10506                                 struct intel_crtc_state *pipe_config)
10507 {
10508         enum intel_dpll_id id;
10509         u32 temp;
10510
10511         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10512         id = temp >> (port * 3 + 1);
10513
10514         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10515                 return;
10516
10517         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10518 }
10519
10520 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10521                                 enum port port,
10522                                 struct intel_crtc_state *pipe_config)
10523 {
10524         enum intel_dpll_id id;
10525         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10526
10527         switch (ddi_pll_sel) {
10528         case PORT_CLK_SEL_WRPLL1:
10529                 id = DPLL_ID_WRPLL1;
10530                 break;
10531         case PORT_CLK_SEL_WRPLL2:
10532                 id = DPLL_ID_WRPLL2;
10533                 break;
10534         case PORT_CLK_SEL_SPLL:
10535                 id = DPLL_ID_SPLL;
10536                 break;
10537         case PORT_CLK_SEL_LCPLL_810:
10538                 id = DPLL_ID_LCPLL_810;
10539                 break;
10540         case PORT_CLK_SEL_LCPLL_1350:
10541                 id = DPLL_ID_LCPLL_1350;
10542                 break;
10543         case PORT_CLK_SEL_LCPLL_2700:
10544                 id = DPLL_ID_LCPLL_2700;
10545                 break;
10546         default:
10547                 MISSING_CASE(ddi_pll_sel);
10548                 /* fall through */
10549         case PORT_CLK_SEL_NONE:
10550                 return;
10551         }
10552
10553         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10554 }
10555
10556 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10557                                      struct intel_crtc_state *pipe_config,
10558                                      unsigned long *power_domain_mask)
10559 {
10560         struct drm_device *dev = crtc->base.dev;
10561         struct drm_i915_private *dev_priv = to_i915(dev);
10562         enum intel_display_power_domain power_domain;
10563         u32 tmp;
10564
10565         /*
10566          * The pipe->transcoder mapping is fixed with the exception of the eDP
10567          * transcoder handled below.
10568          */
10569         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10570
10571         /*
10572          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10573          * consistency and less surprising code; it's in always on power).
10574          */
10575         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
10576         if (tmp & TRANS_DDI_FUNC_ENABLE) {
10577                 enum pipe trans_edp_pipe;
10578                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10579                 default:
10580                         WARN(1, "unknown pipe linked to edp transcoder\n");
10581                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10582                 case TRANS_DDI_EDP_INPUT_A_ON:
10583                         trans_edp_pipe = PIPE_A;
10584                         break;
10585                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10586                         trans_edp_pipe = PIPE_B;
10587                         break;
10588                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10589                         trans_edp_pipe = PIPE_C;
10590                         break;
10591                 }
10592
10593                 if (trans_edp_pipe == crtc->pipe)
10594                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
10595         }
10596
10597         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10598         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10599                 return false;
10600         *power_domain_mask |= BIT(power_domain);
10601
10602         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10603
10604         return tmp & PIPECONF_ENABLE;
10605 }
10606
10607 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10608                                          struct intel_crtc_state *pipe_config,
10609                                          unsigned long *power_domain_mask)
10610 {
10611         struct drm_device *dev = crtc->base.dev;
10612         struct drm_i915_private *dev_priv = to_i915(dev);
10613         enum intel_display_power_domain power_domain;
10614         enum port port;
10615         enum transcoder cpu_transcoder;
10616         u32 tmp;
10617
10618         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10619                 if (port == PORT_A)
10620                         cpu_transcoder = TRANSCODER_DSI_A;
10621                 else
10622                         cpu_transcoder = TRANSCODER_DSI_C;
10623
10624                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10625                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10626                         continue;
10627                 *power_domain_mask |= BIT(power_domain);
10628
10629                 /*
10630                  * The PLL needs to be enabled with a valid divider
10631                  * configuration, otherwise accessing DSI registers will hang
10632                  * the machine. See BSpec North Display Engine
10633                  * registers/MIPI[BXT]. We can break out here early, since we
10634                  * need the same DSI PLL to be enabled for both DSI ports.
10635                  */
10636                 if (!intel_dsi_pll_is_enabled(dev_priv))
10637                         break;
10638
10639                 /* XXX: this works for video mode only */
10640                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10641                 if (!(tmp & DPI_ENABLE))
10642                         continue;
10643
10644                 tmp = I915_READ(MIPI_CTRL(port));
10645                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10646                         continue;
10647
10648                 pipe_config->cpu_transcoder = cpu_transcoder;
10649                 break;
10650         }
10651
10652         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10653 }
10654
10655 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10656                                        struct intel_crtc_state *pipe_config)
10657 {
10658         struct drm_device *dev = crtc->base.dev;
10659         struct drm_i915_private *dev_priv = to_i915(dev);
10660         struct intel_shared_dpll *pll;
10661         enum port port;
10662         uint32_t tmp;
10663
10664         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10665
10666         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10667
10668         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
10669                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10670         else if (IS_BROXTON(dev_priv))
10671                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10672         else
10673                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10674
10675         pll = pipe_config->shared_dpll;
10676         if (pll) {
10677                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10678                                                  &pipe_config->dpll_hw_state));
10679         }
10680
10681         /*
10682          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10683          * DDI E. So just check whether this pipe is wired to DDI E and whether
10684          * the PCH transcoder is on.
10685          */
10686         if (INTEL_INFO(dev)->gen < 9 &&
10687             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10688                 pipe_config->has_pch_encoder = true;
10689
10690                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10691                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10692                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10693
10694                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10695         }
10696 }
10697
10698 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10699                                     struct intel_crtc_state *pipe_config)
10700 {
10701         struct drm_device *dev = crtc->base.dev;
10702         struct drm_i915_private *dev_priv = to_i915(dev);
10703         enum intel_display_power_domain power_domain;
10704         unsigned long power_domain_mask;
10705         bool active;
10706
10707         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10708         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10709                 return false;
10710         power_domain_mask = BIT(power_domain);
10711
10712         pipe_config->shared_dpll = NULL;
10713
10714         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10715
10716         if (IS_BROXTON(dev_priv) &&
10717             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
10718                 WARN_ON(active);
10719                 active = true;
10720         }
10721
10722         if (!active)
10723                 goto out;
10724
10725         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10726                 haswell_get_ddi_port_state(crtc, pipe_config);
10727                 intel_get_pipe_timings(crtc, pipe_config);
10728         }
10729
10730         intel_get_pipe_src_size(crtc, pipe_config);
10731
10732         pipe_config->gamma_mode =
10733                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10734
10735         if (INTEL_INFO(dev)->gen >= 9) {
10736                 skl_init_scalers(dev, crtc, pipe_config);
10737         }
10738
10739         if (INTEL_INFO(dev)->gen >= 9) {
10740                 pipe_config->scaler_state.scaler_id = -1;
10741                 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10742         }
10743
10744         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10745         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10746                 power_domain_mask |= BIT(power_domain);
10747                 if (INTEL_INFO(dev)->gen >= 9)
10748                         skylake_get_pfit_config(crtc, pipe_config);
10749                 else
10750                         ironlake_get_pfit_config(crtc, pipe_config);
10751         }
10752
10753         if (IS_HASWELL(dev_priv))
10754                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10755                         (I915_READ(IPS_CTL) & IPS_ENABLE);
10756
10757         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10758             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10759                 pipe_config->pixel_multiplier =
10760                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10761         } else {
10762                 pipe_config->pixel_multiplier = 1;
10763         }
10764
10765 out:
10766         for_each_power_domain(power_domain, power_domain_mask)
10767                 intel_display_power_put(dev_priv, power_domain);
10768
10769         return active;
10770 }
10771
10772 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10773                                const struct intel_plane_state *plane_state)
10774 {
10775         struct drm_device *dev = crtc->dev;
10776         struct drm_i915_private *dev_priv = to_i915(dev);
10777         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10778         uint32_t cntl = 0, size = 0;
10779
10780         if (plane_state && plane_state->base.visible) {
10781                 unsigned int width = plane_state->base.crtc_w;
10782                 unsigned int height = plane_state->base.crtc_h;
10783                 unsigned int stride = roundup_pow_of_two(width) * 4;
10784
10785                 switch (stride) {
10786                 default:
10787                         WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10788                                   width, stride);
10789                         stride = 256;
10790                         /* fallthrough */
10791                 case 256:
10792                 case 512:
10793                 case 1024:
10794                 case 2048:
10795                         break;
10796                 }
10797
10798                 cntl |= CURSOR_ENABLE |
10799                         CURSOR_GAMMA_ENABLE |
10800                         CURSOR_FORMAT_ARGB |
10801                         CURSOR_STRIDE(stride);
10802
10803                 size = (height << 12) | width;
10804         }
10805
10806         if (intel_crtc->cursor_cntl != 0 &&
10807             (intel_crtc->cursor_base != base ||
10808              intel_crtc->cursor_size != size ||
10809              intel_crtc->cursor_cntl != cntl)) {
10810                 /* On these chipsets we can only modify the base/size/stride
10811                  * whilst the cursor is disabled.
10812                  */
10813                 I915_WRITE(CURCNTR(PIPE_A), 0);
10814                 POSTING_READ(CURCNTR(PIPE_A));
10815                 intel_crtc->cursor_cntl = 0;
10816         }
10817
10818         if (intel_crtc->cursor_base != base) {
10819                 I915_WRITE(CURBASE(PIPE_A), base);
10820                 intel_crtc->cursor_base = base;
10821         }
10822
10823         if (intel_crtc->cursor_size != size) {
10824                 I915_WRITE(CURSIZE, size);
10825                 intel_crtc->cursor_size = size;
10826         }
10827
10828         if (intel_crtc->cursor_cntl != cntl) {
10829                 I915_WRITE(CURCNTR(PIPE_A), cntl);
10830                 POSTING_READ(CURCNTR(PIPE_A));
10831                 intel_crtc->cursor_cntl = cntl;
10832         }
10833 }
10834
10835 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10836                                const struct intel_plane_state *plane_state)
10837 {
10838         struct drm_device *dev = crtc->dev;
10839         struct drm_i915_private *dev_priv = to_i915(dev);
10840         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10841         struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
10842         const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
10843         const struct skl_plane_wm *p_wm =
10844                 &cstate->wm.skl.optimal.planes[PLANE_CURSOR];
10845         int pipe = intel_crtc->pipe;
10846         uint32_t cntl = 0;
10847
10848         if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
10849                 skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb);
10850
10851         if (plane_state && plane_state->base.visible) {
10852                 cntl = MCURSOR_GAMMA_ENABLE;
10853                 switch (plane_state->base.crtc_w) {
10854                         case 64:
10855                                 cntl |= CURSOR_MODE_64_ARGB_AX;
10856                                 break;
10857                         case 128:
10858                                 cntl |= CURSOR_MODE_128_ARGB_AX;
10859                                 break;
10860                         case 256:
10861                                 cntl |= CURSOR_MODE_256_ARGB_AX;
10862                                 break;
10863                         default:
10864                                 MISSING_CASE(plane_state->base.crtc_w);
10865                                 return;
10866                 }
10867                 cntl |= pipe << 28; /* Connect to correct pipe */
10868
10869                 if (HAS_DDI(dev_priv))
10870                         cntl |= CURSOR_PIPE_CSC_ENABLE;
10871
10872                 if (plane_state->base.rotation == DRM_ROTATE_180)
10873                         cntl |= CURSOR_ROTATE_180;
10874         }
10875
10876         if (intel_crtc->cursor_cntl != cntl) {
10877                 I915_WRITE(CURCNTR(pipe), cntl);
10878                 POSTING_READ(CURCNTR(pipe));
10879                 intel_crtc->cursor_cntl = cntl;
10880         }
10881
10882         /* and commit changes on next vblank */
10883         I915_WRITE(CURBASE(pipe), base);
10884         POSTING_READ(CURBASE(pipe));
10885
10886         intel_crtc->cursor_base = base;
10887 }
10888
10889 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10890 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10891                                      const struct intel_plane_state *plane_state)
10892 {
10893         struct drm_device *dev = crtc->dev;
10894         struct drm_i915_private *dev_priv = to_i915(dev);
10895         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10896         int pipe = intel_crtc->pipe;
10897         u32 base = intel_crtc->cursor_addr;
10898         u32 pos = 0;
10899
10900         if (plane_state) {
10901                 int x = plane_state->base.crtc_x;
10902                 int y = plane_state->base.crtc_y;
10903
10904                 if (x < 0) {
10905                         pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10906                         x = -x;
10907                 }
10908                 pos |= x << CURSOR_X_SHIFT;
10909
10910                 if (y < 0) {
10911                         pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10912                         y = -y;
10913                 }
10914                 pos |= y << CURSOR_Y_SHIFT;
10915
10916                 /* ILK+ do this automagically */
10917                 if (HAS_GMCH_DISPLAY(dev_priv) &&
10918                     plane_state->base.rotation == DRM_ROTATE_180) {
10919                         base += (plane_state->base.crtc_h *
10920                                  plane_state->base.crtc_w - 1) * 4;
10921                 }
10922         }
10923
10924         I915_WRITE(CURPOS(pipe), pos);
10925
10926         if (IS_845G(dev_priv) || IS_I865G(dev_priv))
10927                 i845_update_cursor(crtc, base, plane_state);
10928         else
10929                 i9xx_update_cursor(crtc, base, plane_state);
10930 }
10931
10932 static bool cursor_size_ok(struct drm_i915_private *dev_priv,
10933                            uint32_t width, uint32_t height)
10934 {
10935         if (width == 0 || height == 0)
10936                 return false;
10937
10938         /*
10939          * 845g/865g are special in that they are only limited by
10940          * the width of their cursors, the height is arbitrary up to
10941          * the precision of the register. Everything else requires
10942          * square cursors, limited to a few power-of-two sizes.
10943          */
10944         if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
10945                 if ((width & 63) != 0)
10946                         return false;
10947
10948                 if (width > (IS_845G(dev_priv) ? 64 : 512))
10949                         return false;
10950
10951                 if (height > 1023)
10952                         return false;
10953         } else {
10954                 switch (width | height) {
10955                 case 256:
10956                 case 128:
10957                         if (IS_GEN2(dev_priv))
10958                                 return false;
10959                 case 64:
10960                         break;
10961                 default:
10962                         return false;
10963                 }
10964         }
10965
10966         return true;
10967 }
10968
10969 /* VESA 640x480x72Hz mode to set on the pipe */
10970 static struct drm_display_mode load_detect_mode = {
10971         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10972                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10973 };
10974
10975 struct drm_framebuffer *
10976 __intel_framebuffer_create(struct drm_device *dev,
10977                            struct drm_mode_fb_cmd2 *mode_cmd,
10978                            struct drm_i915_gem_object *obj)
10979 {
10980         struct intel_framebuffer *intel_fb;
10981         int ret;
10982
10983         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10984         if (!intel_fb)
10985                 return ERR_PTR(-ENOMEM);
10986
10987         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10988         if (ret)
10989                 goto err;
10990
10991         return &intel_fb->base;
10992
10993 err:
10994         kfree(intel_fb);
10995         return ERR_PTR(ret);
10996 }
10997
10998 static struct drm_framebuffer *
10999 intel_framebuffer_create(struct drm_device *dev,
11000                          struct drm_mode_fb_cmd2 *mode_cmd,
11001                          struct drm_i915_gem_object *obj)
11002 {
11003         struct drm_framebuffer *fb;
11004         int ret;
11005
11006         ret = i915_mutex_lock_interruptible(dev);
11007         if (ret)
11008                 return ERR_PTR(ret);
11009         fb = __intel_framebuffer_create(dev, mode_cmd, obj);
11010         mutex_unlock(&dev->struct_mutex);
11011
11012         return fb;
11013 }
11014
11015 static u32
11016 intel_framebuffer_pitch_for_width(int width, int bpp)
11017 {
11018         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
11019         return ALIGN(pitch, 64);
11020 }
11021
11022 static u32
11023 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
11024 {
11025         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
11026         return PAGE_ALIGN(pitch * mode->vdisplay);
11027 }
11028
11029 static struct drm_framebuffer *
11030 intel_framebuffer_create_for_mode(struct drm_device *dev,
11031                                   struct drm_display_mode *mode,
11032                                   int depth, int bpp)
11033 {
11034         struct drm_framebuffer *fb;
11035         struct drm_i915_gem_object *obj;
11036         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
11037
11038         obj = i915_gem_object_create(dev,
11039                                     intel_framebuffer_size_for_mode(mode, bpp));
11040         if (IS_ERR(obj))
11041                 return ERR_CAST(obj);
11042
11043         mode_cmd.width = mode->hdisplay;
11044         mode_cmd.height = mode->vdisplay;
11045         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
11046                                                                 bpp);
11047         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
11048
11049         fb = intel_framebuffer_create(dev, &mode_cmd, obj);
11050         if (IS_ERR(fb))
11051                 i915_gem_object_put_unlocked(obj);
11052
11053         return fb;
11054 }
11055
11056 static struct drm_framebuffer *
11057 mode_fits_in_fbdev(struct drm_device *dev,
11058                    struct drm_display_mode *mode)
11059 {
11060 #ifdef CONFIG_DRM_FBDEV_EMULATION
11061         struct drm_i915_private *dev_priv = to_i915(dev);
11062         struct drm_i915_gem_object *obj;
11063         struct drm_framebuffer *fb;
11064
11065         if (!dev_priv->fbdev)
11066                 return NULL;
11067
11068         if (!dev_priv->fbdev->fb)
11069                 return NULL;
11070
11071         obj = dev_priv->fbdev->fb->obj;
11072         BUG_ON(!obj);
11073
11074         fb = &dev_priv->fbdev->fb->base;
11075         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
11076                                                                fb->bits_per_pixel))
11077                 return NULL;
11078
11079         if (obj->base.size < mode->vdisplay * fb->pitches[0])
11080                 return NULL;
11081
11082         drm_framebuffer_reference(fb);
11083         return fb;
11084 #else
11085         return NULL;
11086 #endif
11087 }
11088
11089 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
11090                                            struct drm_crtc *crtc,
11091                                            struct drm_display_mode *mode,
11092                                            struct drm_framebuffer *fb,
11093                                            int x, int y)
11094 {
11095         struct drm_plane_state *plane_state;
11096         int hdisplay, vdisplay;
11097         int ret;
11098
11099         plane_state = drm_atomic_get_plane_state(state, crtc->primary);
11100         if (IS_ERR(plane_state))
11101                 return PTR_ERR(plane_state);
11102
11103         if (mode)
11104                 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
11105         else
11106                 hdisplay = vdisplay = 0;
11107
11108         ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
11109         if (ret)
11110                 return ret;
11111         drm_atomic_set_fb_for_plane(plane_state, fb);
11112         plane_state->crtc_x = 0;
11113         plane_state->crtc_y = 0;
11114         plane_state->crtc_w = hdisplay;
11115         plane_state->crtc_h = vdisplay;
11116         plane_state->src_x = x << 16;
11117         plane_state->src_y = y << 16;
11118         plane_state->src_w = hdisplay << 16;
11119         plane_state->src_h = vdisplay << 16;
11120
11121         return 0;
11122 }
11123
11124 bool intel_get_load_detect_pipe(struct drm_connector *connector,
11125                                 struct drm_display_mode *mode,
11126                                 struct intel_load_detect_pipe *old,
11127                                 struct drm_modeset_acquire_ctx *ctx)
11128 {
11129         struct intel_crtc *intel_crtc;
11130         struct intel_encoder *intel_encoder =
11131                 intel_attached_encoder(connector);
11132         struct drm_crtc *possible_crtc;
11133         struct drm_encoder *encoder = &intel_encoder->base;
11134         struct drm_crtc *crtc = NULL;
11135         struct drm_device *dev = encoder->dev;
11136         struct drm_framebuffer *fb;
11137         struct drm_mode_config *config = &dev->mode_config;
11138         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11139         struct drm_connector_state *connector_state;
11140         struct intel_crtc_state *crtc_state;
11141         int ret, i = -1;
11142
11143         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11144                       connector->base.id, connector->name,
11145                       encoder->base.id, encoder->name);
11146
11147         old->restore_state = NULL;
11148
11149 retry:
11150         ret = drm_modeset_lock(&config->connection_mutex, ctx);
11151         if (ret)
11152                 goto fail;
11153
11154         /*
11155          * Algorithm gets a little messy:
11156          *
11157          *   - if the connector already has an assigned crtc, use it (but make
11158          *     sure it's on first)
11159          *
11160          *   - try to find the first unused crtc that can drive this connector,
11161          *     and use that if we find one
11162          */
11163
11164         /* See if we already have a CRTC for this connector */
11165         if (connector->state->crtc) {
11166                 crtc = connector->state->crtc;
11167
11168                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11169                 if (ret)
11170                         goto fail;
11171
11172                 /* Make sure the crtc and connector are running */
11173                 goto found;
11174         }
11175
11176         /* Find an unused one (if possible) */
11177         for_each_crtc(dev, possible_crtc) {
11178                 i++;
11179                 if (!(encoder->possible_crtcs & (1 << i)))
11180                         continue;
11181
11182                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11183                 if (ret)
11184                         goto fail;
11185
11186                 if (possible_crtc->state->enable) {
11187                         drm_modeset_unlock(&possible_crtc->mutex);
11188                         continue;
11189                 }
11190
11191                 crtc = possible_crtc;
11192                 break;
11193         }
11194
11195         /*
11196          * If we didn't find an unused CRTC, don't use any.
11197          */
11198         if (!crtc) {
11199                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11200                 goto fail;
11201         }
11202
11203 found:
11204         intel_crtc = to_intel_crtc(crtc);
11205
11206         ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
11207         if (ret)
11208                 goto fail;
11209
11210         state = drm_atomic_state_alloc(dev);
11211         restore_state = drm_atomic_state_alloc(dev);
11212         if (!state || !restore_state) {
11213                 ret = -ENOMEM;
11214                 goto fail;
11215         }
11216
11217         state->acquire_ctx = ctx;
11218         restore_state->acquire_ctx = ctx;
11219
11220         connector_state = drm_atomic_get_connector_state(state, connector);
11221         if (IS_ERR(connector_state)) {
11222                 ret = PTR_ERR(connector_state);
11223                 goto fail;
11224         }
11225
11226         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11227         if (ret)
11228                 goto fail;
11229
11230         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11231         if (IS_ERR(crtc_state)) {
11232                 ret = PTR_ERR(crtc_state);
11233                 goto fail;
11234         }
11235
11236         crtc_state->base.active = crtc_state->base.enable = true;
11237
11238         if (!mode)
11239                 mode = &load_detect_mode;
11240
11241         /* We need a framebuffer large enough to accommodate all accesses
11242          * that the plane may generate whilst we perform load detection.
11243          * We can not rely on the fbcon either being present (we get called
11244          * during its initialisation to detect all boot displays, or it may
11245          * not even exist) or that it is large enough to satisfy the
11246          * requested mode.
11247          */
11248         fb = mode_fits_in_fbdev(dev, mode);
11249         if (fb == NULL) {
11250                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
11251                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
11252         } else
11253                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
11254         if (IS_ERR(fb)) {
11255                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
11256                 goto fail;
11257         }
11258
11259         ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
11260         if (ret)
11261                 goto fail;
11262
11263         drm_framebuffer_unreference(fb);
11264
11265         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
11266         if (ret)
11267                 goto fail;
11268
11269         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11270         if (!ret)
11271                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11272         if (!ret)
11273                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
11274         if (ret) {
11275                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11276                 goto fail;
11277         }
11278
11279         ret = drm_atomic_commit(state);
11280         if (ret) {
11281                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11282                 goto fail;
11283         }
11284
11285         old->restore_state = restore_state;
11286
11287         /* let the connector get through one full cycle before testing */
11288         intel_wait_for_vblank(dev, intel_crtc->pipe);
11289         return true;
11290
11291 fail:
11292         drm_atomic_state_free(state);
11293         drm_atomic_state_free(restore_state);
11294         restore_state = state = NULL;
11295
11296         if (ret == -EDEADLK) {
11297                 drm_modeset_backoff(ctx);
11298                 goto retry;
11299         }
11300
11301         return false;
11302 }
11303
11304 void intel_release_load_detect_pipe(struct drm_connector *connector,
11305                                     struct intel_load_detect_pipe *old,
11306                                     struct drm_modeset_acquire_ctx *ctx)
11307 {
11308         struct intel_encoder *intel_encoder =
11309                 intel_attached_encoder(connector);
11310         struct drm_encoder *encoder = &intel_encoder->base;
11311         struct drm_atomic_state *state = old->restore_state;
11312         int ret;
11313
11314         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11315                       connector->base.id, connector->name,
11316                       encoder->base.id, encoder->name);
11317
11318         if (!state)
11319                 return;
11320
11321         ret = drm_atomic_commit(state);
11322         if (ret) {
11323                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11324                 drm_atomic_state_free(state);
11325         }
11326 }
11327
11328 static int i9xx_pll_refclk(struct drm_device *dev,
11329                            const struct intel_crtc_state *pipe_config)
11330 {
11331         struct drm_i915_private *dev_priv = to_i915(dev);
11332         u32 dpll = pipe_config->dpll_hw_state.dpll;
11333
11334         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11335                 return dev_priv->vbt.lvds_ssc_freq;
11336         else if (HAS_PCH_SPLIT(dev_priv))
11337                 return 120000;
11338         else if (!IS_GEN2(dev_priv))
11339                 return 96000;
11340         else
11341                 return 48000;
11342 }
11343
11344 /* Returns the clock of the currently programmed mode of the given pipe. */
11345 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11346                                 struct intel_crtc_state *pipe_config)
11347 {
11348         struct drm_device *dev = crtc->base.dev;
11349         struct drm_i915_private *dev_priv = to_i915(dev);
11350         int pipe = pipe_config->cpu_transcoder;
11351         u32 dpll = pipe_config->dpll_hw_state.dpll;
11352         u32 fp;
11353         struct dpll clock;
11354         int port_clock;
11355         int refclk = i9xx_pll_refclk(dev, pipe_config);
11356
11357         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11358                 fp = pipe_config->dpll_hw_state.fp0;
11359         else
11360                 fp = pipe_config->dpll_hw_state.fp1;
11361
11362         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11363         if (IS_PINEVIEW(dev)) {
11364                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11365                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11366         } else {
11367                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11368                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11369         }
11370
11371         if (!IS_GEN2(dev_priv)) {
11372                 if (IS_PINEVIEW(dev))
11373                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11374                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11375                 else
11376                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11377                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11378
11379                 switch (dpll & DPLL_MODE_MASK) {
11380                 case DPLLB_MODE_DAC_SERIAL:
11381                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11382                                 5 : 10;
11383                         break;
11384                 case DPLLB_MODE_LVDS:
11385                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11386                                 7 : 14;
11387                         break;
11388                 default:
11389                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11390                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11391                         return;
11392                 }
11393
11394                 if (IS_PINEVIEW(dev))
11395                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11396                 else
11397                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11398         } else {
11399                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11400                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11401
11402                 if (is_lvds) {
11403                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11404                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11405
11406                         if (lvds & LVDS_CLKB_POWER_UP)
11407                                 clock.p2 = 7;
11408                         else
11409                                 clock.p2 = 14;
11410                 } else {
11411                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11412                                 clock.p1 = 2;
11413                         else {
11414                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11415                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11416                         }
11417                         if (dpll & PLL_P2_DIVIDE_BY_4)
11418                                 clock.p2 = 4;
11419                         else
11420                                 clock.p2 = 2;
11421                 }
11422
11423                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11424         }
11425
11426         /*
11427          * This value includes pixel_multiplier. We will use
11428          * port_clock to compute adjusted_mode.crtc_clock in the
11429          * encoder's get_config() function.
11430          */
11431         pipe_config->port_clock = port_clock;
11432 }
11433
11434 int intel_dotclock_calculate(int link_freq,
11435                              const struct intel_link_m_n *m_n)
11436 {
11437         /*
11438          * The calculation for the data clock is:
11439          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11440          * But we want to avoid losing precison if possible, so:
11441          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11442          *
11443          * and the link clock is simpler:
11444          * link_clock = (m * link_clock) / n
11445          */
11446
11447         if (!m_n->link_n)
11448                 return 0;
11449
11450         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
11451 }
11452
11453 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11454                                    struct intel_crtc_state *pipe_config)
11455 {
11456         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11457
11458         /* read out port_clock from the DPLL */
11459         i9xx_crtc_clock_get(crtc, pipe_config);
11460
11461         /*
11462          * In case there is an active pipe without active ports,
11463          * we may need some idea for the dotclock anyway.
11464          * Calculate one based on the FDI configuration.
11465          */
11466         pipe_config->base.adjusted_mode.crtc_clock =
11467                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11468                                          &pipe_config->fdi_m_n);
11469 }
11470
11471 /** Returns the currently programmed mode of the given pipe. */
11472 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
11473                                              struct drm_crtc *crtc)
11474 {
11475         struct drm_i915_private *dev_priv = to_i915(dev);
11476         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11477         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
11478         struct drm_display_mode *mode;
11479         struct intel_crtc_state *pipe_config;
11480         int htot = I915_READ(HTOTAL(cpu_transcoder));
11481         int hsync = I915_READ(HSYNC(cpu_transcoder));
11482         int vtot = I915_READ(VTOTAL(cpu_transcoder));
11483         int vsync = I915_READ(VSYNC(cpu_transcoder));
11484         enum pipe pipe = intel_crtc->pipe;
11485
11486         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11487         if (!mode)
11488                 return NULL;
11489
11490         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
11491         if (!pipe_config) {
11492                 kfree(mode);
11493                 return NULL;
11494         }
11495
11496         /*
11497          * Construct a pipe_config sufficient for getting the clock info
11498          * back out of crtc_clock_get.
11499          *
11500          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
11501          * to use a real value here instead.
11502          */
11503         pipe_config->cpu_transcoder = (enum transcoder) pipe;
11504         pipe_config->pixel_multiplier = 1;
11505         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
11506         pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
11507         pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
11508         i9xx_crtc_clock_get(intel_crtc, pipe_config);
11509
11510         mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
11511         mode->hdisplay = (htot & 0xffff) + 1;
11512         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
11513         mode->hsync_start = (hsync & 0xffff) + 1;
11514         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
11515         mode->vdisplay = (vtot & 0xffff) + 1;
11516         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
11517         mode->vsync_start = (vsync & 0xffff) + 1;
11518         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
11519
11520         drm_mode_set_name(mode);
11521
11522         kfree(pipe_config);
11523
11524         return mode;
11525 }
11526
11527 static void intel_crtc_destroy(struct drm_crtc *crtc)
11528 {
11529         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11530         struct drm_device *dev = crtc->dev;
11531         struct intel_flip_work *work;
11532
11533         spin_lock_irq(&dev->event_lock);
11534         work = intel_crtc->flip_work;
11535         intel_crtc->flip_work = NULL;
11536         spin_unlock_irq(&dev->event_lock);
11537
11538         if (work) {
11539                 cancel_work_sync(&work->mmio_work);
11540                 cancel_work_sync(&work->unpin_work);
11541                 kfree(work);
11542         }
11543
11544         drm_crtc_cleanup(crtc);
11545
11546         kfree(intel_crtc);
11547 }
11548
11549 static void intel_unpin_work_fn(struct work_struct *__work)
11550 {
11551         struct intel_flip_work *work =
11552                 container_of(__work, struct intel_flip_work, unpin_work);
11553         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11554         struct drm_device *dev = crtc->base.dev;
11555         struct drm_plane *primary = crtc->base.primary;
11556
11557         if (is_mmio_work(work))
11558                 flush_work(&work->mmio_work);
11559
11560         mutex_lock(&dev->struct_mutex);
11561         intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
11562         i915_gem_object_put(work->pending_flip_obj);
11563         mutex_unlock(&dev->struct_mutex);
11564
11565         i915_gem_request_put(work->flip_queued_req);
11566
11567         intel_frontbuffer_flip_complete(to_i915(dev),
11568                                         to_intel_plane(primary)->frontbuffer_bit);
11569         intel_fbc_post_update(crtc);
11570         drm_framebuffer_unreference(work->old_fb);
11571
11572         BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
11573         atomic_dec(&crtc->unpin_work_count);
11574
11575         kfree(work);
11576 }
11577
11578 /* Is 'a' after or equal to 'b'? */
11579 static bool g4x_flip_count_after_eq(u32 a, u32 b)
11580 {
11581         return !((a - b) & 0x80000000);
11582 }
11583
11584 static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11585                                    struct intel_flip_work *work)
11586 {
11587         struct drm_device *dev = crtc->base.dev;
11588         struct drm_i915_private *dev_priv = to_i915(dev);
11589
11590         if (abort_flip_on_reset(crtc))
11591                 return true;
11592
11593         /*
11594          * The relevant registers doen't exist on pre-ctg.
11595          * As the flip done interrupt doesn't trigger for mmio
11596          * flips on gmch platforms, a flip count check isn't
11597          * really needed there. But since ctg has the registers,
11598          * include it in the check anyway.
11599          */
11600         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11601                 return true;
11602
11603         /*
11604          * BDW signals flip done immediately if the plane
11605          * is disabled, even if the plane enable is already
11606          * armed to occur at the next vblank :(
11607          */
11608
11609         /*
11610          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11611          * used the same base address. In that case the mmio flip might
11612          * have completed, but the CS hasn't even executed the flip yet.
11613          *
11614          * A flip count check isn't enough as the CS might have updated
11615          * the base address just after start of vblank, but before we
11616          * managed to process the interrupt. This means we'd complete the
11617          * CS flip too soon.
11618          *
11619          * Combining both checks should get us a good enough result. It may
11620          * still happen that the CS flip has been executed, but has not
11621          * yet actually completed. But in case the base address is the same
11622          * anyway, we don't really care.
11623          */
11624         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11625                 crtc->flip_work->gtt_offset &&
11626                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11627                                     crtc->flip_work->flip_count);
11628 }
11629
11630 static bool
11631 __pageflip_finished_mmio(struct intel_crtc *crtc,
11632                                struct intel_flip_work *work)
11633 {
11634         /*
11635          * MMIO work completes when vblank is different from
11636          * flip_queued_vblank.
11637          *
11638          * Reset counter value doesn't matter, this is handled by
11639          * i915_wait_request finishing early, so no need to handle
11640          * reset here.
11641          */
11642         return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11643 }
11644
11645
11646 static bool pageflip_finished(struct intel_crtc *crtc,
11647                               struct intel_flip_work *work)
11648 {
11649         if (!atomic_read(&work->pending))
11650                 return false;
11651
11652         smp_rmb();
11653
11654         if (is_mmio_work(work))
11655                 return __pageflip_finished_mmio(crtc, work);
11656         else
11657                 return __pageflip_finished_cs(crtc, work);
11658 }
11659
11660 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11661 {
11662         struct drm_device *dev = &dev_priv->drm;
11663         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11664         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11665         struct intel_flip_work *work;
11666         unsigned long flags;
11667
11668         /* Ignore early vblank irqs */
11669         if (!crtc)
11670                 return;
11671
11672         /*
11673          * This is called both by irq handlers and the reset code (to complete
11674          * lost pageflips) so needs the full irqsave spinlocks.
11675          */
11676         spin_lock_irqsave(&dev->event_lock, flags);
11677         work = intel_crtc->flip_work;
11678
11679         if (work != NULL &&
11680             !is_mmio_work(work) &&
11681             pageflip_finished(intel_crtc, work))
11682                 page_flip_completed(intel_crtc);
11683
11684         spin_unlock_irqrestore(&dev->event_lock, flags);
11685 }
11686
11687 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11688 {
11689         struct drm_device *dev = &dev_priv->drm;
11690         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11691         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11692         struct intel_flip_work *work;
11693         unsigned long flags;
11694
11695         /* Ignore early vblank irqs */
11696         if (!crtc)
11697                 return;
11698
11699         /*
11700          * This is called both by irq handlers and the reset code (to complete
11701          * lost pageflips) so needs the full irqsave spinlocks.
11702          */
11703         spin_lock_irqsave(&dev->event_lock, flags);
11704         work = intel_crtc->flip_work;
11705
11706         if (work != NULL &&
11707             is_mmio_work(work) &&
11708             pageflip_finished(intel_crtc, work))
11709                 page_flip_completed(intel_crtc);
11710
11711         spin_unlock_irqrestore(&dev->event_lock, flags);
11712 }
11713
11714 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11715                                                struct intel_flip_work *work)
11716 {
11717         work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11718
11719         /* Ensure that the work item is consistent when activating it ... */
11720         smp_mb__before_atomic();
11721         atomic_set(&work->pending, 1);
11722 }
11723
11724 static int intel_gen2_queue_flip(struct drm_device *dev,
11725                                  struct drm_crtc *crtc,
11726                                  struct drm_framebuffer *fb,
11727                                  struct drm_i915_gem_object *obj,
11728                                  struct drm_i915_gem_request *req,
11729                                  uint32_t flags)
11730 {
11731         struct intel_ring *ring = req->ring;
11732         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11733         u32 flip_mask;
11734         int ret;
11735
11736         ret = intel_ring_begin(req, 6);
11737         if (ret)
11738                 return ret;
11739
11740         /* Can't queue multiple flips, so wait for the previous
11741          * one to finish before executing the next.
11742          */
11743         if (intel_crtc->plane)
11744                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11745         else
11746                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11747         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11748         intel_ring_emit(ring, MI_NOOP);
11749         intel_ring_emit(ring, MI_DISPLAY_FLIP |
11750                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11751         intel_ring_emit(ring, fb->pitches[0]);
11752         intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
11753         intel_ring_emit(ring, 0); /* aux display base address, unused */
11754
11755         return 0;
11756 }
11757
11758 static int intel_gen3_queue_flip(struct drm_device *dev,
11759                                  struct drm_crtc *crtc,
11760                                  struct drm_framebuffer *fb,
11761                                  struct drm_i915_gem_object *obj,
11762                                  struct drm_i915_gem_request *req,
11763                                  uint32_t flags)
11764 {
11765         struct intel_ring *ring = req->ring;
11766         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11767         u32 flip_mask;
11768         int ret;
11769
11770         ret = intel_ring_begin(req, 6);
11771         if (ret)
11772                 return ret;
11773
11774         if (intel_crtc->plane)
11775                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11776         else
11777                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11778         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11779         intel_ring_emit(ring, MI_NOOP);
11780         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11781                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11782         intel_ring_emit(ring, fb->pitches[0]);
11783         intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
11784         intel_ring_emit(ring, MI_NOOP);
11785
11786         return 0;
11787 }
11788
11789 static int intel_gen4_queue_flip(struct drm_device *dev,
11790                                  struct drm_crtc *crtc,
11791                                  struct drm_framebuffer *fb,
11792                                  struct drm_i915_gem_object *obj,
11793                                  struct drm_i915_gem_request *req,
11794                                  uint32_t flags)
11795 {
11796         struct intel_ring *ring = req->ring;
11797         struct drm_i915_private *dev_priv = to_i915(dev);
11798         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11799         uint32_t pf, pipesrc;
11800         int ret;
11801
11802         ret = intel_ring_begin(req, 4);
11803         if (ret)
11804                 return ret;
11805
11806         /* i965+ uses the linear or tiled offsets from the
11807          * Display Registers (which do not change across a page-flip)
11808          * so we need only reprogram the base address.
11809          */
11810         intel_ring_emit(ring, MI_DISPLAY_FLIP |
11811                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11812         intel_ring_emit(ring, fb->pitches[0]);
11813         intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
11814                         intel_fb_modifier_to_tiling(fb->modifier[0]));
11815
11816         /* XXX Enabling the panel-fitter across page-flip is so far
11817          * untested on non-native modes, so ignore it for now.
11818          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11819          */
11820         pf = 0;
11821         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11822         intel_ring_emit(ring, pf | pipesrc);
11823
11824         return 0;
11825 }
11826
11827 static int intel_gen6_queue_flip(struct drm_device *dev,
11828                                  struct drm_crtc *crtc,
11829                                  struct drm_framebuffer *fb,
11830                                  struct drm_i915_gem_object *obj,
11831                                  struct drm_i915_gem_request *req,
11832                                  uint32_t flags)
11833 {
11834         struct intel_ring *ring = req->ring;
11835         struct drm_i915_private *dev_priv = to_i915(dev);
11836         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11837         uint32_t pf, pipesrc;
11838         int ret;
11839
11840         ret = intel_ring_begin(req, 4);
11841         if (ret)
11842                 return ret;
11843
11844         intel_ring_emit(ring, MI_DISPLAY_FLIP |
11845                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11846         intel_ring_emit(ring, fb->pitches[0] |
11847                         intel_fb_modifier_to_tiling(fb->modifier[0]));
11848         intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
11849
11850         /* Contrary to the suggestions in the documentation,
11851          * "Enable Panel Fitter" does not seem to be required when page
11852          * flipping with a non-native mode, and worse causes a normal
11853          * modeset to fail.
11854          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11855          */
11856         pf = 0;
11857         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11858         intel_ring_emit(ring, pf | pipesrc);
11859
11860         return 0;
11861 }
11862
11863 static int intel_gen7_queue_flip(struct drm_device *dev,
11864                                  struct drm_crtc *crtc,
11865                                  struct drm_framebuffer *fb,
11866                                  struct drm_i915_gem_object *obj,
11867                                  struct drm_i915_gem_request *req,
11868                                  uint32_t flags)
11869 {
11870         struct drm_i915_private *dev_priv = to_i915(dev);
11871         struct intel_ring *ring = req->ring;
11872         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11873         uint32_t plane_bit = 0;
11874         int len, ret;
11875
11876         switch (intel_crtc->plane) {
11877         case PLANE_A:
11878                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11879                 break;
11880         case PLANE_B:
11881                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11882                 break;
11883         case PLANE_C:
11884                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11885                 break;
11886         default:
11887                 WARN_ONCE(1, "unknown plane in flip command\n");
11888                 return -ENODEV;
11889         }
11890
11891         len = 4;
11892         if (req->engine->id == RCS) {
11893                 len += 6;
11894                 /*
11895                  * On Gen 8, SRM is now taking an extra dword to accommodate
11896                  * 48bits addresses, and we need a NOOP for the batch size to
11897                  * stay even.
11898                  */
11899                 if (IS_GEN8(dev_priv))
11900                         len += 2;
11901         }
11902
11903         /*
11904          * BSpec MI_DISPLAY_FLIP for IVB:
11905          * "The full packet must be contained within the same cache line."
11906          *
11907          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11908          * cacheline, if we ever start emitting more commands before
11909          * the MI_DISPLAY_FLIP we may need to first emit everything else,
11910          * then do the cacheline alignment, and finally emit the
11911          * MI_DISPLAY_FLIP.
11912          */
11913         ret = intel_ring_cacheline_align(req);
11914         if (ret)
11915                 return ret;
11916
11917         ret = intel_ring_begin(req, len);
11918         if (ret)
11919                 return ret;
11920
11921         /* Unmask the flip-done completion message. Note that the bspec says that
11922          * we should do this for both the BCS and RCS, and that we must not unmask
11923          * more than one flip event at any time (or ensure that one flip message
11924          * can be sent by waiting for flip-done prior to queueing new flips).
11925          * Experimentation says that BCS works despite DERRMR masking all
11926          * flip-done completion events and that unmasking all planes at once
11927          * for the RCS also doesn't appear to drop events. Setting the DERRMR
11928          * to zero does lead to lockups within MI_DISPLAY_FLIP.
11929          */
11930         if (req->engine->id == RCS) {
11931                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11932                 intel_ring_emit_reg(ring, DERRMR);
11933                 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11934                                           DERRMR_PIPEB_PRI_FLIP_DONE |
11935                                           DERRMR_PIPEC_PRI_FLIP_DONE));
11936                 if (IS_GEN8(dev_priv))
11937                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11938                                               MI_SRM_LRM_GLOBAL_GTT);
11939                 else
11940                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11941                                               MI_SRM_LRM_GLOBAL_GTT);
11942                 intel_ring_emit_reg(ring, DERRMR);
11943                 intel_ring_emit(ring,
11944                                 i915_ggtt_offset(req->engine->scratch) + 256);
11945                 if (IS_GEN8(dev_priv)) {
11946                         intel_ring_emit(ring, 0);
11947                         intel_ring_emit(ring, MI_NOOP);
11948                 }
11949         }
11950
11951         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11952         intel_ring_emit(ring, fb->pitches[0] |
11953                         intel_fb_modifier_to_tiling(fb->modifier[0]));
11954         intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
11955         intel_ring_emit(ring, (MI_NOOP));
11956
11957         return 0;
11958 }
11959
11960 static bool use_mmio_flip(struct intel_engine_cs *engine,
11961                           struct drm_i915_gem_object *obj)
11962 {
11963         struct reservation_object *resv;
11964
11965         /*
11966          * This is not being used for older platforms, because
11967          * non-availability of flip done interrupt forces us to use
11968          * CS flips. Older platforms derive flip done using some clever
11969          * tricks involving the flip_pending status bits and vblank irqs.
11970          * So using MMIO flips there would disrupt this mechanism.
11971          */
11972
11973         if (engine == NULL)
11974                 return true;
11975
11976         if (INTEL_GEN(engine->i915) < 5)
11977                 return false;
11978
11979         if (i915.use_mmio_flip < 0)
11980                 return false;
11981         else if (i915.use_mmio_flip > 0)
11982                 return true;
11983         else if (i915.enable_execlists)
11984                 return true;
11985
11986         resv = i915_gem_object_get_dmabuf_resv(obj);
11987         if (resv && !reservation_object_test_signaled_rcu(resv, false))
11988                 return true;
11989
11990         return engine != i915_gem_active_get_engine(&obj->last_write,
11991                                                     &obj->base.dev->struct_mutex);
11992 }
11993
11994 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11995                              unsigned int rotation,
11996                              struct intel_flip_work *work)
11997 {
11998         struct drm_device *dev = intel_crtc->base.dev;
11999         struct drm_i915_private *dev_priv = to_i915(dev);
12000         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
12001         const enum pipe pipe = intel_crtc->pipe;
12002         u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
12003
12004         ctl = I915_READ(PLANE_CTL(pipe, 0));
12005         ctl &= ~PLANE_CTL_TILED_MASK;
12006         switch (fb->modifier[0]) {
12007         case DRM_FORMAT_MOD_NONE:
12008                 break;
12009         case I915_FORMAT_MOD_X_TILED:
12010                 ctl |= PLANE_CTL_TILED_X;
12011                 break;
12012         case I915_FORMAT_MOD_Y_TILED:
12013                 ctl |= PLANE_CTL_TILED_Y;
12014                 break;
12015         case I915_FORMAT_MOD_Yf_TILED:
12016                 ctl |= PLANE_CTL_TILED_YF;
12017                 break;
12018         default:
12019                 MISSING_CASE(fb->modifier[0]);
12020         }
12021
12022         /*
12023          * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
12024          * PLANE_SURF updates, the update is then guaranteed to be atomic.
12025          */
12026         I915_WRITE(PLANE_CTL(pipe, 0), ctl);
12027         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
12028
12029         I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
12030         POSTING_READ(PLANE_SURF(pipe, 0));
12031 }
12032
12033 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
12034                              struct intel_flip_work *work)
12035 {
12036         struct drm_device *dev = intel_crtc->base.dev;
12037         struct drm_i915_private *dev_priv = to_i915(dev);
12038         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
12039         i915_reg_t reg = DSPCNTR(intel_crtc->plane);
12040         u32 dspcntr;
12041
12042         dspcntr = I915_READ(reg);
12043
12044         if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
12045                 dspcntr |= DISPPLANE_TILED;
12046         else
12047                 dspcntr &= ~DISPPLANE_TILED;
12048
12049         I915_WRITE(reg, dspcntr);
12050
12051         I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
12052         POSTING_READ(DSPSURF(intel_crtc->plane));
12053 }
12054
12055 static void intel_mmio_flip_work_func(struct work_struct *w)
12056 {
12057         struct intel_flip_work *work =
12058                 container_of(w, struct intel_flip_work, mmio_work);
12059         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
12060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12061         struct intel_framebuffer *intel_fb =
12062                 to_intel_framebuffer(crtc->base.primary->fb);
12063         struct drm_i915_gem_object *obj = intel_fb->obj;
12064         struct reservation_object *resv;
12065
12066         if (work->flip_queued_req)
12067                 WARN_ON(i915_wait_request(work->flip_queued_req,
12068                                           0, NULL, NO_WAITBOOST));
12069
12070         /* For framebuffer backed by dmabuf, wait for fence */
12071         resv = i915_gem_object_get_dmabuf_resv(obj);
12072         if (resv)
12073                 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
12074                                                             MAX_SCHEDULE_TIMEOUT) < 0);
12075
12076         intel_pipe_update_start(crtc);
12077
12078         if (INTEL_GEN(dev_priv) >= 9)
12079                 skl_do_mmio_flip(crtc, work->rotation, work);
12080         else
12081                 /* use_mmio_flip() retricts MMIO flips to ilk+ */
12082                 ilk_do_mmio_flip(crtc, work);
12083
12084         intel_pipe_update_end(crtc, work);
12085 }
12086
12087 static int intel_default_queue_flip(struct drm_device *dev,
12088                                     struct drm_crtc *crtc,
12089                                     struct drm_framebuffer *fb,
12090                                     struct drm_i915_gem_object *obj,
12091                                     struct drm_i915_gem_request *req,
12092                                     uint32_t flags)
12093 {
12094         return -ENODEV;
12095 }
12096
12097 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
12098                                       struct intel_crtc *intel_crtc,
12099                                       struct intel_flip_work *work)
12100 {
12101         u32 addr, vblank;
12102
12103         if (!atomic_read(&work->pending))
12104                 return false;
12105
12106         smp_rmb();
12107
12108         vblank = intel_crtc_get_vblank_counter(intel_crtc);
12109         if (work->flip_ready_vblank == 0) {
12110                 if (work->flip_queued_req &&
12111                     !i915_gem_request_completed(work->flip_queued_req))
12112                         return false;
12113
12114                 work->flip_ready_vblank = vblank;
12115         }
12116
12117         if (vblank - work->flip_ready_vblank < 3)
12118                 return false;
12119
12120         /* Potential stall - if we see that the flip has happened,
12121          * assume a missed interrupt. */
12122         if (INTEL_GEN(dev_priv) >= 4)
12123                 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
12124         else
12125                 addr = I915_READ(DSPADDR(intel_crtc->plane));
12126
12127         /* There is a potential issue here with a false positive after a flip
12128          * to the same address. We could address this by checking for a
12129          * non-incrementing frame counter.
12130          */
12131         return addr == work->gtt_offset;
12132 }
12133
12134 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
12135 {
12136         struct drm_device *dev = &dev_priv->drm;
12137         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
12138         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12139         struct intel_flip_work *work;
12140
12141         WARN_ON(!in_interrupt());
12142
12143         if (crtc == NULL)
12144                 return;
12145
12146         spin_lock(&dev->event_lock);
12147         work = intel_crtc->flip_work;
12148
12149         if (work != NULL && !is_mmio_work(work) &&
12150             __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
12151                 WARN_ONCE(1,
12152                           "Kicking stuck page flip: queued at %d, now %d\n",
12153                         work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
12154                 page_flip_completed(intel_crtc);
12155                 work = NULL;
12156         }
12157
12158         if (work != NULL && !is_mmio_work(work) &&
12159             intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
12160                 intel_queue_rps_boost_for_request(work->flip_queued_req);
12161         spin_unlock(&dev->event_lock);
12162 }
12163
12164 static int intel_crtc_page_flip(struct drm_crtc *crtc,
12165                                 struct drm_framebuffer *fb,
12166                                 struct drm_pending_vblank_event *event,
12167                                 uint32_t page_flip_flags)
12168 {
12169         struct drm_device *dev = crtc->dev;
12170         struct drm_i915_private *dev_priv = to_i915(dev);
12171         struct drm_framebuffer *old_fb = crtc->primary->fb;
12172         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12174         struct drm_plane *primary = crtc->primary;
12175         enum pipe pipe = intel_crtc->pipe;
12176         struct intel_flip_work *work;
12177         struct intel_engine_cs *engine;
12178         bool mmio_flip;
12179         struct drm_i915_gem_request *request;
12180         struct i915_vma *vma;
12181         int ret;
12182
12183         /*
12184          * drm_mode_page_flip_ioctl() should already catch this, but double
12185          * check to be safe.  In the future we may enable pageflipping from
12186          * a disabled primary plane.
12187          */
12188         if (WARN_ON(intel_fb_obj(old_fb) == NULL))
12189                 return -EBUSY;
12190
12191         /* Can't change pixel format via MI display flips. */
12192         if (fb->pixel_format != crtc->primary->fb->pixel_format)
12193                 return -EINVAL;
12194
12195         /*
12196          * TILEOFF/LINOFF registers can't be changed via MI display flips.
12197          * Note that pitch changes could also affect these register.
12198          */
12199         if (INTEL_INFO(dev)->gen > 3 &&
12200             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
12201              fb->pitches[0] != crtc->primary->fb->pitches[0]))
12202                 return -EINVAL;
12203
12204         if (i915_terminally_wedged(&dev_priv->gpu_error))
12205                 goto out_hang;
12206
12207         work = kzalloc(sizeof(*work), GFP_KERNEL);
12208         if (work == NULL)
12209                 return -ENOMEM;
12210
12211         work->event = event;
12212         work->crtc = crtc;
12213         work->old_fb = old_fb;
12214         INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
12215
12216         ret = drm_crtc_vblank_get(crtc);
12217         if (ret)
12218                 goto free_work;
12219
12220         /* We borrow the event spin lock for protecting flip_work */
12221         spin_lock_irq(&dev->event_lock);
12222         if (intel_crtc->flip_work) {
12223                 /* Before declaring the flip queue wedged, check if
12224                  * the hardware completed the operation behind our backs.
12225                  */
12226                 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
12227                         DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
12228                         page_flip_completed(intel_crtc);
12229                 } else {
12230                         DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
12231                         spin_unlock_irq(&dev->event_lock);
12232
12233                         drm_crtc_vblank_put(crtc);
12234                         kfree(work);
12235                         return -EBUSY;
12236                 }
12237         }
12238         intel_crtc->flip_work = work;
12239         spin_unlock_irq(&dev->event_lock);
12240
12241         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
12242                 flush_workqueue(dev_priv->wq);
12243
12244         /* Reference the objects for the scheduled work. */
12245         drm_framebuffer_reference(work->old_fb);
12246
12247         crtc->primary->fb = fb;
12248         update_state_fb(crtc->primary);
12249
12250         work->pending_flip_obj = i915_gem_object_get(obj);
12251
12252         ret = i915_mutex_lock_interruptible(dev);
12253         if (ret)
12254                 goto cleanup;
12255
12256         intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
12257         if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
12258                 ret = -EIO;
12259                 goto cleanup;
12260         }
12261
12262         atomic_inc(&intel_crtc->unpin_work_count);
12263
12264         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
12265                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
12266
12267         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12268                 engine = dev_priv->engine[BCS];
12269                 if (fb->modifier[0] != old_fb->modifier[0])
12270                         /* vlv: DISPLAY_FLIP fails to change tiling */
12271                         engine = NULL;
12272         } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
12273                 engine = dev_priv->engine[BCS];
12274         } else if (INTEL_INFO(dev)->gen >= 7) {
12275                 engine = i915_gem_active_get_engine(&obj->last_write,
12276                                                     &obj->base.dev->struct_mutex);
12277                 if (engine == NULL || engine->id != RCS)
12278                         engine = dev_priv->engine[BCS];
12279         } else {
12280                 engine = dev_priv->engine[RCS];
12281         }
12282
12283         mmio_flip = use_mmio_flip(engine, obj);
12284
12285         vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
12286         if (IS_ERR(vma)) {
12287                 ret = PTR_ERR(vma);
12288                 goto cleanup_pending;
12289         }
12290
12291         work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
12292         work->gtt_offset += intel_crtc->dspaddr_offset;
12293         work->rotation = crtc->primary->state->rotation;
12294
12295         /*
12296          * There's the potential that the next frame will not be compatible with
12297          * FBC, so we want to call pre_update() before the actual page flip.
12298          * The problem is that pre_update() caches some information about the fb
12299          * object, so we want to do this only after the object is pinned. Let's
12300          * be on the safe side and do this immediately before scheduling the
12301          * flip.
12302          */
12303         intel_fbc_pre_update(intel_crtc, intel_crtc->config,
12304                              to_intel_plane_state(primary->state));
12305
12306         if (mmio_flip) {
12307                 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
12308
12309                 work->flip_queued_req = i915_gem_active_get(&obj->last_write,
12310                                                             &obj->base.dev->struct_mutex);
12311                 queue_work(system_unbound_wq, &work->mmio_work);
12312         } else {
12313                 request = i915_gem_request_alloc(engine, engine->last_context);
12314                 if (IS_ERR(request)) {
12315                         ret = PTR_ERR(request);
12316                         goto cleanup_unpin;
12317                 }
12318
12319                 ret = i915_gem_request_await_object(request, obj, false);
12320                 if (ret)
12321                         goto cleanup_request;
12322
12323                 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
12324                                                    page_flip_flags);
12325                 if (ret)
12326                         goto cleanup_request;
12327
12328                 intel_mark_page_flip_active(intel_crtc, work);
12329
12330                 work->flip_queued_req = i915_gem_request_get(request);
12331                 i915_add_request_no_flush(request);
12332         }
12333
12334         i915_gem_track_fb(intel_fb_obj(old_fb), obj,
12335                           to_intel_plane(primary)->frontbuffer_bit);
12336         mutex_unlock(&dev->struct_mutex);
12337
12338         intel_frontbuffer_flip_prepare(to_i915(dev),
12339                                        to_intel_plane(primary)->frontbuffer_bit);
12340
12341         trace_i915_flip_request(intel_crtc->plane, obj);
12342
12343         return 0;
12344
12345 cleanup_request:
12346         i915_add_request_no_flush(request);
12347 cleanup_unpin:
12348         intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
12349 cleanup_pending:
12350         atomic_dec(&intel_crtc->unpin_work_count);
12351         mutex_unlock(&dev->struct_mutex);
12352 cleanup:
12353         crtc->primary->fb = old_fb;
12354         update_state_fb(crtc->primary);
12355
12356         i915_gem_object_put_unlocked(obj);
12357         drm_framebuffer_unreference(work->old_fb);
12358
12359         spin_lock_irq(&dev->event_lock);
12360         intel_crtc->flip_work = NULL;
12361         spin_unlock_irq(&dev->event_lock);
12362
12363         drm_crtc_vblank_put(crtc);
12364 free_work:
12365         kfree(work);
12366
12367         if (ret == -EIO) {
12368                 struct drm_atomic_state *state;
12369                 struct drm_plane_state *plane_state;
12370
12371 out_hang:
12372                 state = drm_atomic_state_alloc(dev);
12373                 if (!state)
12374                         return -ENOMEM;
12375                 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
12376
12377 retry:
12378                 plane_state = drm_atomic_get_plane_state(state, primary);
12379                 ret = PTR_ERR_OR_ZERO(plane_state);
12380                 if (!ret) {
12381                         drm_atomic_set_fb_for_plane(plane_state, fb);
12382
12383                         ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
12384                         if (!ret)
12385                                 ret = drm_atomic_commit(state);
12386                 }
12387
12388                 if (ret == -EDEADLK) {
12389                         drm_modeset_backoff(state->acquire_ctx);
12390                         drm_atomic_state_clear(state);
12391                         goto retry;
12392                 }
12393
12394                 if (ret)
12395                         drm_atomic_state_free(state);
12396
12397                 if (ret == 0 && event) {
12398                         spin_lock_irq(&dev->event_lock);
12399                         drm_crtc_send_vblank_event(crtc, event);
12400                         spin_unlock_irq(&dev->event_lock);
12401                 }
12402         }
12403         return ret;
12404 }
12405
12406
12407 /**
12408  * intel_wm_need_update - Check whether watermarks need updating
12409  * @plane: drm plane
12410  * @state: new plane state
12411  *
12412  * Check current plane state versus the new one to determine whether
12413  * watermarks need to be recalculated.
12414  *
12415  * Returns true or false.
12416  */
12417 static bool intel_wm_need_update(struct drm_plane *plane,
12418                                  struct drm_plane_state *state)
12419 {
12420         struct intel_plane_state *new = to_intel_plane_state(state);
12421         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
12422
12423         /* Update watermarks on tiling or size changes. */
12424         if (new->base.visible != cur->base.visible)
12425                 return true;
12426
12427         if (!cur->base.fb || !new->base.fb)
12428                 return false;
12429
12430         if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
12431             cur->base.rotation != new->base.rotation ||
12432             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
12433             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
12434             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
12435             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
12436                 return true;
12437
12438         return false;
12439 }
12440
12441 static bool needs_scaling(struct intel_plane_state *state)
12442 {
12443         int src_w = drm_rect_width(&state->base.src) >> 16;
12444         int src_h = drm_rect_height(&state->base.src) >> 16;
12445         int dst_w = drm_rect_width(&state->base.dst);
12446         int dst_h = drm_rect_height(&state->base.dst);
12447
12448         return (src_w != dst_w || src_h != dst_h);
12449 }
12450
12451 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
12452                                     struct drm_plane_state *plane_state)
12453 {
12454         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
12455         struct drm_crtc *crtc = crtc_state->crtc;
12456         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12457         struct drm_plane *plane = plane_state->plane;
12458         struct drm_device *dev = crtc->dev;
12459         struct drm_i915_private *dev_priv = to_i915(dev);
12460         struct intel_plane_state *old_plane_state =
12461                 to_intel_plane_state(plane->state);
12462         bool mode_changed = needs_modeset(crtc_state);
12463         bool was_crtc_enabled = crtc->state->active;
12464         bool is_crtc_enabled = crtc_state->active;
12465         bool turn_off, turn_on, visible, was_visible;
12466         struct drm_framebuffer *fb = plane_state->fb;
12467         int ret;
12468
12469         if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
12470                 ret = skl_update_scaler_plane(
12471                         to_intel_crtc_state(crtc_state),
12472                         to_intel_plane_state(plane_state));
12473                 if (ret)
12474                         return ret;
12475         }
12476
12477         was_visible = old_plane_state->base.visible;
12478         visible = to_intel_plane_state(plane_state)->base.visible;
12479
12480         if (!was_crtc_enabled && WARN_ON(was_visible))
12481                 was_visible = false;
12482
12483         /*
12484          * Visibility is calculated as if the crtc was on, but
12485          * after scaler setup everything depends on it being off
12486          * when the crtc isn't active.
12487          *
12488          * FIXME this is wrong for watermarks. Watermarks should also
12489          * be computed as if the pipe would be active. Perhaps move
12490          * per-plane wm computation to the .check_plane() hook, and
12491          * only combine the results from all planes in the current place?
12492          */
12493         if (!is_crtc_enabled)
12494                 to_intel_plane_state(plane_state)->base.visible = visible = false;
12495
12496         if (!was_visible && !visible)
12497                 return 0;
12498
12499         if (fb != old_plane_state->base.fb)
12500                 pipe_config->fb_changed = true;
12501
12502         turn_off = was_visible && (!visible || mode_changed);
12503         turn_on = visible && (!was_visible || mode_changed);
12504
12505         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
12506                          intel_crtc->base.base.id,
12507                          intel_crtc->base.name,
12508                          plane->base.id, plane->name,
12509                          fb ? fb->base.id : -1);
12510
12511         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12512                          plane->base.id, plane->name,
12513                          was_visible, visible,
12514                          turn_off, turn_on, mode_changed);
12515
12516         if (turn_on) {
12517                 pipe_config->update_wm_pre = true;
12518
12519                 /* must disable cxsr around plane enable/disable */
12520                 if (plane->type != DRM_PLANE_TYPE_CURSOR)
12521                         pipe_config->disable_cxsr = true;
12522         } else if (turn_off) {
12523                 pipe_config->update_wm_post = true;
12524
12525                 /* must disable cxsr around plane enable/disable */
12526                 if (plane->type != DRM_PLANE_TYPE_CURSOR)
12527                         pipe_config->disable_cxsr = true;
12528         } else if (intel_wm_need_update(plane, plane_state)) {
12529                 /* FIXME bollocks */
12530                 pipe_config->update_wm_pre = true;
12531                 pipe_config->update_wm_post = true;
12532         }
12533
12534         /* Pre-gen9 platforms need two-step watermark updates */
12535         if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
12536             INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
12537                 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
12538
12539         if (visible || was_visible)
12540                 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
12541
12542         /*
12543          * WaCxSRDisabledForSpriteScaling:ivb
12544          *
12545          * cstate->update_wm was already set above, so this flag will
12546          * take effect when we commit and program watermarks.
12547          */
12548         if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
12549             needs_scaling(to_intel_plane_state(plane_state)) &&
12550             !needs_scaling(old_plane_state))
12551                 pipe_config->disable_lp_wm = true;
12552
12553         return 0;
12554 }
12555
12556 static bool encoders_cloneable(const struct intel_encoder *a,
12557                                const struct intel_encoder *b)
12558 {
12559         /* masks could be asymmetric, so check both ways */
12560         return a == b || (a->cloneable & (1 << b->type) &&
12561                           b->cloneable & (1 << a->type));
12562 }
12563
12564 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12565                                          struct intel_crtc *crtc,
12566                                          struct intel_encoder *encoder)
12567 {
12568         struct intel_encoder *source_encoder;
12569         struct drm_connector *connector;
12570         struct drm_connector_state *connector_state;
12571         int i;
12572
12573         for_each_connector_in_state(state, connector, connector_state, i) {
12574                 if (connector_state->crtc != &crtc->base)
12575                         continue;
12576
12577                 source_encoder =
12578                         to_intel_encoder(connector_state->best_encoder);
12579                 if (!encoders_cloneable(encoder, source_encoder))
12580                         return false;
12581         }
12582
12583         return true;
12584 }
12585
12586 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12587                                    struct drm_crtc_state *crtc_state)
12588 {
12589         struct drm_device *dev = crtc->dev;
12590         struct drm_i915_private *dev_priv = to_i915(dev);
12591         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12592         struct intel_crtc_state *pipe_config =
12593                 to_intel_crtc_state(crtc_state);
12594         struct drm_atomic_state *state = crtc_state->state;
12595         int ret;
12596         bool mode_changed = needs_modeset(crtc_state);
12597
12598         if (mode_changed && !crtc_state->active)
12599                 pipe_config->update_wm_post = true;
12600
12601         if (mode_changed && crtc_state->enable &&
12602             dev_priv->display.crtc_compute_clock &&
12603             !WARN_ON(pipe_config->shared_dpll)) {
12604                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12605                                                            pipe_config);
12606                 if (ret)
12607                         return ret;
12608         }
12609
12610         if (crtc_state->color_mgmt_changed) {
12611                 ret = intel_color_check(crtc, crtc_state);
12612                 if (ret)
12613                         return ret;
12614
12615                 /*
12616                  * Changing color management on Intel hardware is
12617                  * handled as part of planes update.
12618                  */
12619                 crtc_state->planes_changed = true;
12620         }
12621
12622         ret = 0;
12623         if (dev_priv->display.compute_pipe_wm) {
12624                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
12625                 if (ret) {
12626                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12627                         return ret;
12628                 }
12629         }
12630
12631         if (dev_priv->display.compute_intermediate_wm &&
12632             !to_intel_atomic_state(state)->skip_intermediate_wm) {
12633                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12634                         return 0;
12635
12636                 /*
12637                  * Calculate 'intermediate' watermarks that satisfy both the
12638                  * old state and the new state.  We can program these
12639                  * immediately.
12640                  */
12641                 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12642                                                                 intel_crtc,
12643                                                                 pipe_config);
12644                 if (ret) {
12645                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12646                         return ret;
12647                 }
12648         } else if (dev_priv->display.compute_intermediate_wm) {
12649                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12650                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12651         }
12652
12653         if (INTEL_INFO(dev)->gen >= 9) {
12654                 if (mode_changed)
12655                         ret = skl_update_scaler_crtc(pipe_config);
12656
12657                 if (!ret)
12658                         ret = intel_atomic_setup_scalers(dev, intel_crtc,
12659                                                          pipe_config);
12660         }
12661
12662         return ret;
12663 }
12664
12665 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12666         .mode_set_base_atomic = intel_pipe_set_base_atomic,
12667         .atomic_begin = intel_begin_crtc_commit,
12668         .atomic_flush = intel_finish_crtc_commit,
12669         .atomic_check = intel_crtc_atomic_check,
12670 };
12671
12672 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12673 {
12674         struct intel_connector *connector;
12675
12676         for_each_intel_connector(dev, connector) {
12677                 if (connector->base.state->crtc)
12678                         drm_connector_unreference(&connector->base);
12679
12680                 if (connector->base.encoder) {
12681                         connector->base.state->best_encoder =
12682                                 connector->base.encoder;
12683                         connector->base.state->crtc =
12684                                 connector->base.encoder->crtc;
12685
12686                         drm_connector_reference(&connector->base);
12687                 } else {
12688                         connector->base.state->best_encoder = NULL;
12689                         connector->base.state->crtc = NULL;
12690                 }
12691         }
12692 }
12693
12694 static void
12695 connected_sink_compute_bpp(struct intel_connector *connector,
12696                            struct intel_crtc_state *pipe_config)
12697 {
12698         const struct drm_display_info *info = &connector->base.display_info;
12699         int bpp = pipe_config->pipe_bpp;
12700
12701         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12702                       connector->base.base.id,
12703                       connector->base.name);
12704
12705         /* Don't use an invalid EDID bpc value */
12706         if (info->bpc != 0 && info->bpc * 3 < bpp) {
12707                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12708                               bpp, info->bpc * 3);
12709                 pipe_config->pipe_bpp = info->bpc * 3;
12710         }
12711
12712         /* Clamp bpp to 8 on screens without EDID 1.4 */
12713         if (info->bpc == 0 && bpp > 24) {
12714                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12715                               bpp);
12716                 pipe_config->pipe_bpp = 24;
12717         }
12718 }
12719
12720 static int
12721 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12722                           struct intel_crtc_state *pipe_config)
12723 {
12724         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12725         struct drm_atomic_state *state;
12726         struct drm_connector *connector;
12727         struct drm_connector_state *connector_state;
12728         int bpp, i;
12729
12730         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12731             IS_CHERRYVIEW(dev_priv)))
12732                 bpp = 10*3;
12733         else if (INTEL_GEN(dev_priv) >= 5)
12734                 bpp = 12*3;
12735         else
12736                 bpp = 8*3;
12737
12738
12739         pipe_config->pipe_bpp = bpp;
12740
12741         state = pipe_config->base.state;
12742
12743         /* Clamp display bpp to EDID value */
12744         for_each_connector_in_state(state, connector, connector_state, i) {
12745                 if (connector_state->crtc != &crtc->base)
12746                         continue;
12747
12748                 connected_sink_compute_bpp(to_intel_connector(connector),
12749                                            pipe_config);
12750         }
12751
12752         return bpp;
12753 }
12754
12755 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12756 {
12757         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12758                         "type: 0x%x flags: 0x%x\n",
12759                 mode->crtc_clock,
12760                 mode->crtc_hdisplay, mode->crtc_hsync_start,
12761                 mode->crtc_hsync_end, mode->crtc_htotal,
12762                 mode->crtc_vdisplay, mode->crtc_vsync_start,
12763                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12764 }
12765
12766 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12767                                    struct intel_crtc_state *pipe_config,
12768                                    const char *context)
12769 {
12770         struct drm_device *dev = crtc->base.dev;
12771         struct drm_i915_private *dev_priv = to_i915(dev);
12772         struct drm_plane *plane;
12773         struct intel_plane *intel_plane;
12774         struct intel_plane_state *state;
12775         struct drm_framebuffer *fb;
12776
12777         DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12778                       crtc->base.base.id, crtc->base.name,
12779                       context, pipe_config, pipe_name(crtc->pipe));
12780
12781         DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12782         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12783                       pipe_config->pipe_bpp, pipe_config->dither);
12784         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12785                       pipe_config->has_pch_encoder,
12786                       pipe_config->fdi_lanes,
12787                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12788                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12789                       pipe_config->fdi_m_n.tu);
12790         DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12791                       intel_crtc_has_dp_encoder(pipe_config),
12792                       pipe_config->lane_count,
12793                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12794                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12795                       pipe_config->dp_m_n.tu);
12796
12797         DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12798                       intel_crtc_has_dp_encoder(pipe_config),
12799                       pipe_config->lane_count,
12800                       pipe_config->dp_m2_n2.gmch_m,
12801                       pipe_config->dp_m2_n2.gmch_n,
12802                       pipe_config->dp_m2_n2.link_m,
12803                       pipe_config->dp_m2_n2.link_n,
12804                       pipe_config->dp_m2_n2.tu);
12805
12806         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12807                       pipe_config->has_audio,
12808                       pipe_config->has_infoframe);
12809
12810         DRM_DEBUG_KMS("requested mode:\n");
12811         drm_mode_debug_printmodeline(&pipe_config->base.mode);
12812         DRM_DEBUG_KMS("adjusted mode:\n");
12813         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12814         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12815         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12816         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12817                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12818         DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12819                       crtc->num_scalers,
12820                       pipe_config->scaler_state.scaler_users,
12821                       pipe_config->scaler_state.scaler_id);
12822         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12823                       pipe_config->gmch_pfit.control,
12824                       pipe_config->gmch_pfit.pgm_ratios,
12825                       pipe_config->gmch_pfit.lvds_border_bits);
12826         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12827                       pipe_config->pch_pfit.pos,
12828                       pipe_config->pch_pfit.size,
12829                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12830         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12831         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12832
12833         if (IS_BROXTON(dev_priv)) {
12834                 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12835                               "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12836                               "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12837                               pipe_config->dpll_hw_state.ebb0,
12838                               pipe_config->dpll_hw_state.ebb4,
12839                               pipe_config->dpll_hw_state.pll0,
12840                               pipe_config->dpll_hw_state.pll1,
12841                               pipe_config->dpll_hw_state.pll2,
12842                               pipe_config->dpll_hw_state.pll3,
12843                               pipe_config->dpll_hw_state.pll6,
12844                               pipe_config->dpll_hw_state.pll8,
12845                               pipe_config->dpll_hw_state.pll9,
12846                               pipe_config->dpll_hw_state.pll10,
12847                               pipe_config->dpll_hw_state.pcsdw12);
12848         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
12849                 DRM_DEBUG_KMS("dpll_hw_state: "
12850                               "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12851                               pipe_config->dpll_hw_state.ctrl1,
12852                               pipe_config->dpll_hw_state.cfgcr1,
12853                               pipe_config->dpll_hw_state.cfgcr2);
12854         } else if (HAS_DDI(dev_priv)) {
12855                 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12856                               pipe_config->dpll_hw_state.wrpll,
12857                               pipe_config->dpll_hw_state.spll);
12858         } else {
12859                 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12860                               "fp0: 0x%x, fp1: 0x%x\n",
12861                               pipe_config->dpll_hw_state.dpll,
12862                               pipe_config->dpll_hw_state.dpll_md,
12863                               pipe_config->dpll_hw_state.fp0,
12864                               pipe_config->dpll_hw_state.fp1);
12865         }
12866
12867         DRM_DEBUG_KMS("planes on this crtc\n");
12868         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12869                 char *format_name;
12870                 intel_plane = to_intel_plane(plane);
12871                 if (intel_plane->pipe != crtc->pipe)
12872                         continue;
12873
12874                 state = to_intel_plane_state(plane->state);
12875                 fb = state->base.fb;
12876                 if (!fb) {
12877                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12878                                       plane->base.id, plane->name, state->scaler_id);
12879                         continue;
12880                 }
12881
12882                 format_name = drm_get_format_name(fb->pixel_format);
12883
12884                 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12885                               plane->base.id, plane->name);
12886                 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12887                               fb->base.id, fb->width, fb->height, format_name);
12888                 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12889                               state->scaler_id,
12890                               state->base.src.x1 >> 16,
12891                               state->base.src.y1 >> 16,
12892                               drm_rect_width(&state->base.src) >> 16,
12893                               drm_rect_height(&state->base.src) >> 16,
12894                               state->base.dst.x1, state->base.dst.y1,
12895                               drm_rect_width(&state->base.dst),
12896                               drm_rect_height(&state->base.dst));
12897
12898                 kfree(format_name);
12899         }
12900 }
12901
12902 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12903 {
12904         struct drm_device *dev = state->dev;
12905         struct drm_connector *connector;
12906         unsigned int used_ports = 0;
12907         unsigned int used_mst_ports = 0;
12908
12909         /*
12910          * Walk the connector list instead of the encoder
12911          * list to detect the problem on ddi platforms
12912          * where there's just one encoder per digital port.
12913          */
12914         drm_for_each_connector(connector, dev) {
12915                 struct drm_connector_state *connector_state;
12916                 struct intel_encoder *encoder;
12917
12918                 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12919                 if (!connector_state)
12920                         connector_state = connector->state;
12921
12922                 if (!connector_state->best_encoder)
12923                         continue;
12924
12925                 encoder = to_intel_encoder(connector_state->best_encoder);
12926
12927                 WARN_ON(!connector_state->crtc);
12928
12929                 switch (encoder->type) {
12930                         unsigned int port_mask;
12931                 case INTEL_OUTPUT_UNKNOWN:
12932                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12933                                 break;
12934                 case INTEL_OUTPUT_DP:
12935                 case INTEL_OUTPUT_HDMI:
12936                 case INTEL_OUTPUT_EDP:
12937                         port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12938
12939                         /* the same port mustn't appear more than once */
12940                         if (used_ports & port_mask)
12941                                 return false;
12942
12943                         used_ports |= port_mask;
12944                         break;
12945                 case INTEL_OUTPUT_DP_MST:
12946                         used_mst_ports |=
12947                                 1 << enc_to_mst(&encoder->base)->primary->port;
12948                         break;
12949                 default:
12950                         break;
12951                 }
12952         }
12953
12954         /* can't mix MST and SST/HDMI on the same port */
12955         if (used_ports & used_mst_ports)
12956                 return false;
12957
12958         return true;
12959 }
12960
12961 static void
12962 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12963 {
12964         struct drm_crtc_state tmp_state;
12965         struct intel_crtc_scaler_state scaler_state;
12966         struct intel_dpll_hw_state dpll_hw_state;
12967         struct intel_shared_dpll *shared_dpll;
12968         bool force_thru;
12969
12970         /* FIXME: before the switch to atomic started, a new pipe_config was
12971          * kzalloc'd. Code that depends on any field being zero should be
12972          * fixed, so that the crtc_state can be safely duplicated. For now,
12973          * only fields that are know to not cause problems are preserved. */
12974
12975         tmp_state = crtc_state->base;
12976         scaler_state = crtc_state->scaler_state;
12977         shared_dpll = crtc_state->shared_dpll;
12978         dpll_hw_state = crtc_state->dpll_hw_state;
12979         force_thru = crtc_state->pch_pfit.force_thru;
12980
12981         memset(crtc_state, 0, sizeof *crtc_state);
12982
12983         crtc_state->base = tmp_state;
12984         crtc_state->scaler_state = scaler_state;
12985         crtc_state->shared_dpll = shared_dpll;
12986         crtc_state->dpll_hw_state = dpll_hw_state;
12987         crtc_state->pch_pfit.force_thru = force_thru;
12988 }
12989
12990 static int
12991 intel_modeset_pipe_config(struct drm_crtc *crtc,
12992                           struct intel_crtc_state *pipe_config)
12993 {
12994         struct drm_atomic_state *state = pipe_config->base.state;
12995         struct intel_encoder *encoder;
12996         struct drm_connector *connector;
12997         struct drm_connector_state *connector_state;
12998         int base_bpp, ret = -EINVAL;
12999         int i;
13000         bool retry = true;
13001
13002         clear_intel_crtc_state(pipe_config);
13003
13004         pipe_config->cpu_transcoder =
13005                 (enum transcoder) to_intel_crtc(crtc)->pipe;
13006
13007         /*
13008          * Sanitize sync polarity flags based on requested ones. If neither
13009          * positive or negative polarity is requested, treat this as meaning
13010          * negative polarity.
13011          */
13012         if (!(pipe_config->base.adjusted_mode.flags &
13013               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13014                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13015
13016         if (!(pipe_config->base.adjusted_mode.flags &
13017               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13018                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13019
13020         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13021                                              pipe_config);
13022         if (base_bpp < 0)
13023                 goto fail;
13024
13025         /*
13026          * Determine the real pipe dimensions. Note that stereo modes can
13027          * increase the actual pipe size due to the frame doubling and
13028          * insertion of additional space for blanks between the frame. This
13029          * is stored in the crtc timings. We use the requested mode to do this
13030          * computation to clearly distinguish it from the adjusted mode, which
13031          * can be changed by the connectors in the below retry loop.
13032          */
13033         drm_crtc_get_hv_timing(&pipe_config->base.mode,
13034                                &pipe_config->pipe_src_w,
13035                                &pipe_config->pipe_src_h);
13036
13037         for_each_connector_in_state(state, connector, connector_state, i) {
13038                 if (connector_state->crtc != crtc)
13039                         continue;
13040
13041                 encoder = to_intel_encoder(connector_state->best_encoder);
13042
13043                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13044                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
13045                         goto fail;
13046                 }
13047
13048                 /*
13049                  * Determine output_types before calling the .compute_config()
13050                  * hooks so that the hooks can use this information safely.
13051                  */
13052                 pipe_config->output_types |= 1 << encoder->type;
13053         }
13054
13055 encoder_retry:
13056         /* Ensure the port clock defaults are reset when retrying. */
13057         pipe_config->port_clock = 0;
13058         pipe_config->pixel_multiplier = 1;
13059
13060         /* Fill in default crtc timings, allow encoders to overwrite them. */
13061         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
13062                               CRTC_STEREO_DOUBLE);
13063
13064         /* Pass our mode to the connectors and the CRTC to give them a chance to
13065          * adjust it according to limitations or connector properties, and also
13066          * a chance to reject the mode entirely.
13067          */
13068         for_each_connector_in_state(state, connector, connector_state, i) {
13069                 if (connector_state->crtc != crtc)
13070                         continue;
13071
13072                 encoder = to_intel_encoder(connector_state->best_encoder);
13073
13074                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
13075                         DRM_DEBUG_KMS("Encoder config failure\n");
13076                         goto fail;
13077                 }
13078         }
13079
13080         /* Set default port clock if not overwritten by the encoder. Needs to be
13081          * done afterwards in case the encoder adjusts the mode. */
13082         if (!pipe_config->port_clock)
13083                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
13084                         * pipe_config->pixel_multiplier;
13085
13086         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13087         if (ret < 0) {
13088                 DRM_DEBUG_KMS("CRTC fixup failed\n");
13089                 goto fail;
13090         }
13091
13092         if (ret == RETRY) {
13093                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
13094                         ret = -EINVAL;
13095                         goto fail;
13096                 }
13097
13098                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
13099                 retry = false;
13100                 goto encoder_retry;
13101         }
13102
13103         /* Dithering seems to not pass-through bits correctly when it should, so
13104          * only enable it on 6bpc panels. */
13105         pipe_config->dither = pipe_config->pipe_bpp == 6*3;
13106         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13107                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13108
13109 fail:
13110         return ret;
13111 }
13112
13113 static void
13114 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
13115 {
13116         struct drm_crtc *crtc;
13117         struct drm_crtc_state *crtc_state;
13118         int i;
13119
13120         /* Double check state. */
13121         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13122                 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
13123
13124                 /* Update hwmode for vblank functions */
13125                 if (crtc->state->active)
13126                         crtc->hwmode = crtc->state->adjusted_mode;
13127                 else
13128                         crtc->hwmode.crtc_clock = 0;
13129
13130                 /*
13131                  * Update legacy state to satisfy fbc code. This can
13132                  * be removed when fbc uses the atomic state.
13133                  */
13134                 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
13135                         struct drm_plane_state *plane_state = crtc->primary->state;
13136
13137                         crtc->primary->fb = plane_state->fb;
13138                         crtc->x = plane_state->src_x >> 16;
13139                         crtc->y = plane_state->src_y >> 16;
13140                 }
13141         }
13142 }
13143
13144 static bool intel_fuzzy_clock_check(int clock1, int clock2)
13145 {
13146         int diff;
13147
13148         if (clock1 == clock2)
13149                 return true;
13150
13151         if (!clock1 || !clock2)
13152                 return false;
13153
13154         diff = abs(clock1 - clock2);
13155
13156         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13157                 return true;
13158
13159         return false;
13160 }
13161
13162 static bool
13163 intel_compare_m_n(unsigned int m, unsigned int n,
13164                   unsigned int m2, unsigned int n2,
13165                   bool exact)
13166 {
13167         if (m == m2 && n == n2)
13168                 return true;
13169
13170         if (exact || !m || !n || !m2 || !n2)
13171                 return false;
13172
13173         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13174
13175         if (n > n2) {
13176                 while (n > n2) {
13177                         m2 <<= 1;
13178                         n2 <<= 1;
13179                 }
13180         } else if (n < n2) {
13181                 while (n < n2) {
13182                         m <<= 1;
13183                         n <<= 1;
13184                 }
13185         }
13186
13187         if (n != n2)
13188                 return false;
13189
13190         return intel_fuzzy_clock_check(m, m2);
13191 }
13192
13193 static bool
13194 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13195                        struct intel_link_m_n *m2_n2,
13196                        bool adjust)
13197 {
13198         if (m_n->tu == m2_n2->tu &&
13199             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13200                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
13201             intel_compare_m_n(m_n->link_m, m_n->link_n,
13202                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
13203                 if (adjust)
13204                         *m2_n2 = *m_n;
13205
13206                 return true;
13207         }
13208
13209         return false;
13210 }
13211
13212 static bool
13213 intel_pipe_config_compare(struct drm_device *dev,
13214                           struct intel_crtc_state *current_config,
13215                           struct intel_crtc_state *pipe_config,
13216                           bool adjust)
13217 {
13218         struct drm_i915_private *dev_priv = to_i915(dev);
13219         bool ret = true;
13220
13221 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
13222         do { \
13223                 if (!adjust) \
13224                         DRM_ERROR(fmt, ##__VA_ARGS__); \
13225                 else \
13226                         DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
13227         } while (0)
13228
13229 #define PIPE_CONF_CHECK_X(name) \
13230         if (current_config->name != pipe_config->name) { \
13231                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13232                           "(expected 0x%08x, found 0x%08x)\n", \
13233                           current_config->name, \
13234                           pipe_config->name); \
13235                 ret = false; \
13236         }
13237
13238 #define PIPE_CONF_CHECK_I(name) \
13239         if (current_config->name != pipe_config->name) { \
13240                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13241                           "(expected %i, found %i)\n", \
13242                           current_config->name, \
13243                           pipe_config->name); \
13244                 ret = false; \
13245         }
13246
13247 #define PIPE_CONF_CHECK_P(name) \
13248         if (current_config->name != pipe_config->name) { \
13249                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13250                           "(expected %p, found %p)\n", \
13251                           current_config->name, \
13252                           pipe_config->name); \
13253                 ret = false; \
13254         }
13255
13256 #define PIPE_CONF_CHECK_M_N(name) \
13257         if (!intel_compare_link_m_n(&current_config->name, \
13258                                     &pipe_config->name,\
13259                                     adjust)) { \
13260                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13261                           "(expected tu %i gmch %i/%i link %i/%i, " \
13262                           "found tu %i, gmch %i/%i link %i/%i)\n", \
13263                           current_config->name.tu, \
13264                           current_config->name.gmch_m, \
13265                           current_config->name.gmch_n, \
13266                           current_config->name.link_m, \
13267                           current_config->name.link_n, \
13268                           pipe_config->name.tu, \
13269                           pipe_config->name.gmch_m, \
13270                           pipe_config->name.gmch_n, \
13271                           pipe_config->name.link_m, \
13272                           pipe_config->name.link_n); \
13273                 ret = false; \
13274         }
13275
13276 /* This is required for BDW+ where there is only one set of registers for
13277  * switching between high and low RR.
13278  * This macro can be used whenever a comparison has to be made between one
13279  * hw state and multiple sw state variables.
13280  */
13281 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
13282         if (!intel_compare_link_m_n(&current_config->name, \
13283                                     &pipe_config->name, adjust) && \
13284             !intel_compare_link_m_n(&current_config->alt_name, \
13285                                     &pipe_config->name, adjust)) { \
13286                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13287                           "(expected tu %i gmch %i/%i link %i/%i, " \
13288                           "or tu %i gmch %i/%i link %i/%i, " \
13289                           "found tu %i, gmch %i/%i link %i/%i)\n", \
13290                           current_config->name.tu, \
13291                           current_config->name.gmch_m, \
13292                           current_config->name.gmch_n, \
13293                           current_config->name.link_m, \
13294                           current_config->name.link_n, \
13295                           current_config->alt_name.tu, \
13296                           current_config->alt_name.gmch_m, \
13297                           current_config->alt_name.gmch_n, \
13298                           current_config->alt_name.link_m, \
13299                           current_config->alt_name.link_n, \
13300                           pipe_config->name.tu, \
13301                           pipe_config->name.gmch_m, \
13302                           pipe_config->name.gmch_n, \
13303                           pipe_config->name.link_m, \
13304                           pipe_config->name.link_n); \
13305                 ret = false; \
13306         }
13307
13308 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
13309         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13310                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
13311                           "(expected %i, found %i)\n", \
13312                           current_config->name & (mask), \
13313                           pipe_config->name & (mask)); \
13314                 ret = false; \
13315         }
13316
13317 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
13318         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13319                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
13320                           "(expected %i, found %i)\n", \
13321                           current_config->name, \
13322                           pipe_config->name); \
13323                 ret = false; \
13324         }
13325
13326 #define PIPE_CONF_QUIRK(quirk)  \
13327         ((current_config->quirks | pipe_config->quirks) & (quirk))
13328
13329         PIPE_CONF_CHECK_I(cpu_transcoder);
13330
13331         PIPE_CONF_CHECK_I(has_pch_encoder);
13332         PIPE_CONF_CHECK_I(fdi_lanes);
13333         PIPE_CONF_CHECK_M_N(fdi_m_n);
13334
13335         PIPE_CONF_CHECK_I(lane_count);
13336         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13337
13338         if (INTEL_INFO(dev)->gen < 8) {
13339                 PIPE_CONF_CHECK_M_N(dp_m_n);
13340
13341                 if (current_config->has_drrs)
13342                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13343         } else
13344                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13345
13346         PIPE_CONF_CHECK_X(output_types);
13347
13348         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
13349         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
13350         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
13351         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
13352         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
13353         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
13354
13355         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
13356         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
13357         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
13358         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
13359         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
13360         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
13361
13362         PIPE_CONF_CHECK_I(pixel_multiplier);
13363         PIPE_CONF_CHECK_I(has_hdmi_sink);
13364         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13365             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13366                 PIPE_CONF_CHECK_I(limited_color_range);
13367         PIPE_CONF_CHECK_I(has_infoframe);
13368
13369         PIPE_CONF_CHECK_I(has_audio);
13370
13371         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13372                               DRM_MODE_FLAG_INTERLACE);
13373
13374         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13375                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13376                                       DRM_MODE_FLAG_PHSYNC);
13377                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13378                                       DRM_MODE_FLAG_NHSYNC);
13379                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13380                                       DRM_MODE_FLAG_PVSYNC);
13381                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13382                                       DRM_MODE_FLAG_NVSYNC);
13383         }
13384
13385         PIPE_CONF_CHECK_X(gmch_pfit.control);
13386         /* pfit ratios are autocomputed by the hw on gen4+ */
13387         if (INTEL_INFO(dev)->gen < 4)
13388                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13389         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13390
13391         if (!adjust) {
13392                 PIPE_CONF_CHECK_I(pipe_src_w);
13393                 PIPE_CONF_CHECK_I(pipe_src_h);
13394
13395                 PIPE_CONF_CHECK_I(pch_pfit.enabled);
13396                 if (current_config->pch_pfit.enabled) {
13397                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13398                         PIPE_CONF_CHECK_X(pch_pfit.size);
13399                 }
13400
13401                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13402         }
13403
13404         /* BDW+ don't expose a synchronous way to read the state */
13405         if (IS_HASWELL(dev_priv))
13406                 PIPE_CONF_CHECK_I(ips_enabled);
13407
13408         PIPE_CONF_CHECK_I(double_wide);
13409
13410         PIPE_CONF_CHECK_P(shared_dpll);
13411         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13412         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13413         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13414         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13415         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13416         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13417         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13418         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13419         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13420
13421         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13422         PIPE_CONF_CHECK_X(dsi_pll.div);
13423
13424         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13425                 PIPE_CONF_CHECK_I(pipe_bpp);
13426
13427         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
13428         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13429
13430 #undef PIPE_CONF_CHECK_X
13431 #undef PIPE_CONF_CHECK_I
13432 #undef PIPE_CONF_CHECK_P
13433 #undef PIPE_CONF_CHECK_FLAGS
13434 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13435 #undef PIPE_CONF_QUIRK
13436 #undef INTEL_ERR_OR_DBG_KMS
13437
13438         return ret;
13439 }
13440
13441 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13442                                            const struct intel_crtc_state *pipe_config)
13443 {
13444         if (pipe_config->has_pch_encoder) {
13445                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13446                                                             &pipe_config->fdi_m_n);
13447                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
13448
13449                 /*
13450                  * FDI already provided one idea for the dotclock.
13451                  * Yell if the encoder disagrees.
13452                  */
13453                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13454                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13455                      fdi_dotclock, dotclock);
13456         }
13457 }
13458
13459 static void verify_wm_state(struct drm_crtc *crtc,
13460                             struct drm_crtc_state *new_state)
13461 {
13462         struct drm_device *dev = crtc->dev;
13463         struct drm_i915_private *dev_priv = to_i915(dev);
13464         struct skl_ddb_allocation hw_ddb, *sw_ddb;
13465         struct skl_pipe_wm hw_wm, *sw_wm;
13466         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13467         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13468         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13469         const enum pipe pipe = intel_crtc->pipe;
13470         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13471
13472         if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
13473                 return;
13474
13475         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
13476         sw_wm = &intel_crtc->wm.active.skl;
13477
13478         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
13479         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13480
13481         /* planes */
13482         for_each_plane(dev_priv, pipe, plane) {
13483                 hw_plane_wm = &hw_wm.planes[plane];
13484                 sw_plane_wm = &sw_wm->planes[plane];
13485
13486                 /* Watermarks */
13487                 for (level = 0; level <= max_level; level++) {
13488                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13489                                                 &sw_plane_wm->wm[level]))
13490                                 continue;
13491
13492                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13493                                   pipe_name(pipe), plane + 1, level,
13494                                   sw_plane_wm->wm[level].plane_en,
13495                                   sw_plane_wm->wm[level].plane_res_b,
13496                                   sw_plane_wm->wm[level].plane_res_l,
13497                                   hw_plane_wm->wm[level].plane_en,
13498                                   hw_plane_wm->wm[level].plane_res_b,
13499                                   hw_plane_wm->wm[level].plane_res_l);
13500                 }
13501
13502                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13503                                          &sw_plane_wm->trans_wm)) {
13504                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13505                                   pipe_name(pipe), plane + 1,
13506                                   sw_plane_wm->trans_wm.plane_en,
13507                                   sw_plane_wm->trans_wm.plane_res_b,
13508                                   sw_plane_wm->trans_wm.plane_res_l,
13509                                   hw_plane_wm->trans_wm.plane_en,
13510                                   hw_plane_wm->trans_wm.plane_res_b,
13511                                   hw_plane_wm->trans_wm.plane_res_l);
13512                 }
13513
13514                 /* DDB */
13515                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
13516                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
13517
13518                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13519                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13520                                   pipe_name(pipe), plane + 1,
13521                                   sw_ddb_entry->start, sw_ddb_entry->end,
13522                                   hw_ddb_entry->start, hw_ddb_entry->end);
13523                 }
13524         }
13525
13526         /*
13527          * cursor
13528          * If the cursor plane isn't active, we may not have updated it's ddb
13529          * allocation. In that case since the ddb allocation will be updated
13530          * once the plane becomes visible, we can skip this check
13531          */
13532         if (intel_crtc->cursor_addr) {
13533                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
13534                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13535
13536                 /* Watermarks */
13537                 for (level = 0; level <= max_level; level++) {
13538                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13539                                                 &sw_plane_wm->wm[level]))
13540                                 continue;
13541
13542                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13543                                   pipe_name(pipe), level,
13544                                   sw_plane_wm->wm[level].plane_en,
13545                                   sw_plane_wm->wm[level].plane_res_b,
13546                                   sw_plane_wm->wm[level].plane_res_l,
13547                                   hw_plane_wm->wm[level].plane_en,
13548                                   hw_plane_wm->wm[level].plane_res_b,
13549                                   hw_plane_wm->wm[level].plane_res_l);
13550                 }
13551
13552                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13553                                          &sw_plane_wm->trans_wm)) {
13554                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13555                                   pipe_name(pipe),
13556                                   sw_plane_wm->trans_wm.plane_en,
13557                                   sw_plane_wm->trans_wm.plane_res_b,
13558                                   sw_plane_wm->trans_wm.plane_res_l,
13559                                   hw_plane_wm->trans_wm.plane_en,
13560                                   hw_plane_wm->trans_wm.plane_res_b,
13561                                   hw_plane_wm->trans_wm.plane_res_l);
13562                 }
13563
13564                 /* DDB */
13565                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
13566                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
13567
13568                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13569                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13570                                   pipe_name(pipe),
13571                                   sw_ddb_entry->start, sw_ddb_entry->end,
13572                                   hw_ddb_entry->start, hw_ddb_entry->end);
13573                 }
13574         }
13575 }
13576
13577 static void
13578 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
13579 {
13580         struct drm_connector *connector;
13581
13582         drm_for_each_connector(connector, dev) {
13583                 struct drm_encoder *encoder = connector->encoder;
13584                 struct drm_connector_state *state = connector->state;
13585
13586                 if (state->crtc != crtc)
13587                         continue;
13588
13589                 intel_connector_verify_state(to_intel_connector(connector));
13590
13591                 I915_STATE_WARN(state->best_encoder != encoder,
13592                      "connector's atomic encoder doesn't match legacy encoder\n");
13593         }
13594 }
13595
13596 static void
13597 verify_encoder_state(struct drm_device *dev)
13598 {
13599         struct intel_encoder *encoder;
13600         struct intel_connector *connector;
13601
13602         for_each_intel_encoder(dev, encoder) {
13603                 bool enabled = false;
13604                 enum pipe pipe;
13605
13606                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13607                               encoder->base.base.id,
13608                               encoder->base.name);
13609
13610                 for_each_intel_connector(dev, connector) {
13611                         if (connector->base.state->best_encoder != &encoder->base)
13612                                 continue;
13613                         enabled = true;
13614
13615                         I915_STATE_WARN(connector->base.state->crtc !=
13616                                         encoder->base.crtc,
13617                              "connector's crtc doesn't match encoder crtc\n");
13618                 }
13619
13620                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13621                      "encoder's enabled state mismatch "
13622                      "(expected %i, found %i)\n",
13623                      !!encoder->base.crtc, enabled);
13624
13625                 if (!encoder->base.crtc) {
13626                         bool active;
13627
13628                         active = encoder->get_hw_state(encoder, &pipe);
13629                         I915_STATE_WARN(active,
13630                              "encoder detached but still enabled on pipe %c.\n",
13631                              pipe_name(pipe));
13632                 }
13633         }
13634 }
13635
13636 static void
13637 verify_crtc_state(struct drm_crtc *crtc,
13638                   struct drm_crtc_state *old_crtc_state,
13639                   struct drm_crtc_state *new_crtc_state)
13640 {
13641         struct drm_device *dev = crtc->dev;
13642         struct drm_i915_private *dev_priv = to_i915(dev);
13643         struct intel_encoder *encoder;
13644         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13645         struct intel_crtc_state *pipe_config, *sw_config;
13646         struct drm_atomic_state *old_state;
13647         bool active;
13648
13649         old_state = old_crtc_state->state;
13650         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
13651         pipe_config = to_intel_crtc_state(old_crtc_state);
13652         memset(pipe_config, 0, sizeof(*pipe_config));
13653         pipe_config->base.crtc = crtc;
13654         pipe_config->base.state = old_state;
13655
13656         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
13657
13658         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
13659
13660         /* hw state is inconsistent with the pipe quirk */
13661         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
13662             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
13663                 active = new_crtc_state->active;
13664
13665         I915_STATE_WARN(new_crtc_state->active != active,
13666              "crtc active state doesn't match with hw state "
13667              "(expected %i, found %i)\n", new_crtc_state->active, active);
13668
13669         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
13670              "transitional active state does not match atomic hw state "
13671              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
13672
13673         for_each_encoder_on_crtc(dev, crtc, encoder) {
13674                 enum pipe pipe;
13675
13676                 active = encoder->get_hw_state(encoder, &pipe);
13677                 I915_STATE_WARN(active != new_crtc_state->active,
13678                         "[ENCODER:%i] active %i with crtc active %i\n",
13679                         encoder->base.base.id, active, new_crtc_state->active);
13680
13681                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13682                                 "Encoder connected to wrong pipe %c\n",
13683                                 pipe_name(pipe));
13684
13685                 if (active) {
13686                         pipe_config->output_types |= 1 << encoder->type;
13687                         encoder->get_config(encoder, pipe_config);
13688                 }
13689         }
13690
13691         if (!new_crtc_state->active)
13692                 return;
13693
13694         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13695
13696         sw_config = to_intel_crtc_state(crtc->state);
13697         if (!intel_pipe_config_compare(dev, sw_config,
13698                                        pipe_config, false)) {
13699                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13700                 intel_dump_pipe_config(intel_crtc, pipe_config,
13701                                        "[hw state]");
13702                 intel_dump_pipe_config(intel_crtc, sw_config,
13703                                        "[sw state]");
13704         }
13705 }
13706
13707 static void
13708 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13709                          struct intel_shared_dpll *pll,
13710                          struct drm_crtc *crtc,
13711                          struct drm_crtc_state *new_state)
13712 {
13713         struct intel_dpll_hw_state dpll_hw_state;
13714         unsigned crtc_mask;
13715         bool active;
13716
13717         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13718
13719         DRM_DEBUG_KMS("%s\n", pll->name);
13720
13721         active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13722
13723         if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13724                 I915_STATE_WARN(!pll->on && pll->active_mask,
13725                      "pll in active use but not on in sw tracking\n");
13726                 I915_STATE_WARN(pll->on && !pll->active_mask,
13727                      "pll is on but not used by any active crtc\n");
13728                 I915_STATE_WARN(pll->on != active,
13729                      "pll on state mismatch (expected %i, found %i)\n",
13730                      pll->on, active);
13731         }
13732
13733         if (!crtc) {
13734                 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13735                                 "more active pll users than references: %x vs %x\n",
13736                                 pll->active_mask, pll->config.crtc_mask);
13737
13738                 return;
13739         }
13740
13741         crtc_mask = 1 << drm_crtc_index(crtc);
13742
13743         if (new_state->active)
13744                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13745                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13746                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13747         else
13748                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13749                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13750                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13751
13752         I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13753                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13754                         crtc_mask, pll->config.crtc_mask);
13755
13756         I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13757                                           &dpll_hw_state,
13758                                           sizeof(dpll_hw_state)),
13759                         "pll hw state mismatch\n");
13760 }
13761
13762 static void
13763 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13764                          struct drm_crtc_state *old_crtc_state,
13765                          struct drm_crtc_state *new_crtc_state)
13766 {
13767         struct drm_i915_private *dev_priv = to_i915(dev);
13768         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13769         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13770
13771         if (new_state->shared_dpll)
13772                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13773
13774         if (old_state->shared_dpll &&
13775             old_state->shared_dpll != new_state->shared_dpll) {
13776                 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13777                 struct intel_shared_dpll *pll = old_state->shared_dpll;
13778
13779                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13780                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13781                                 pipe_name(drm_crtc_index(crtc)));
13782                 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13783                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13784                                 pipe_name(drm_crtc_index(crtc)));
13785         }
13786 }
13787
13788 static void
13789 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13790                          struct drm_crtc_state *old_state,
13791                          struct drm_crtc_state *new_state)
13792 {
13793         if (!needs_modeset(new_state) &&
13794             !to_intel_crtc_state(new_state)->update_pipe)
13795                 return;
13796
13797         verify_wm_state(crtc, new_state);
13798         verify_connector_state(crtc->dev, crtc);
13799         verify_crtc_state(crtc, old_state, new_state);
13800         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13801 }
13802
13803 static void
13804 verify_disabled_dpll_state(struct drm_device *dev)
13805 {
13806         struct drm_i915_private *dev_priv = to_i915(dev);
13807         int i;
13808
13809         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13810                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13811 }
13812
13813 static void
13814 intel_modeset_verify_disabled(struct drm_device *dev)
13815 {
13816         verify_encoder_state(dev);
13817         verify_connector_state(dev, NULL);
13818         verify_disabled_dpll_state(dev);
13819 }
13820
13821 static void update_scanline_offset(struct intel_crtc *crtc)
13822 {
13823         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13824
13825         /*
13826          * The scanline counter increments at the leading edge of hsync.
13827          *
13828          * On most platforms it starts counting from vtotal-1 on the
13829          * first active line. That means the scanline counter value is
13830          * always one less than what we would expect. Ie. just after
13831          * start of vblank, which also occurs at start of hsync (on the
13832          * last active line), the scanline counter will read vblank_start-1.
13833          *
13834          * On gen2 the scanline counter starts counting from 1 instead
13835          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13836          * to keep the value positive), instead of adding one.
13837          *
13838          * On HSW+ the behaviour of the scanline counter depends on the output
13839          * type. For DP ports it behaves like most other platforms, but on HDMI
13840          * there's an extra 1 line difference. So we need to add two instead of
13841          * one to the value.
13842          */
13843         if (IS_GEN2(dev_priv)) {
13844                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13845                 int vtotal;
13846
13847                 vtotal = adjusted_mode->crtc_vtotal;
13848                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13849                         vtotal /= 2;
13850
13851                 crtc->scanline_offset = vtotal - 1;
13852         } else if (HAS_DDI(dev_priv) &&
13853                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
13854                 crtc->scanline_offset = 2;
13855         } else
13856                 crtc->scanline_offset = 1;
13857 }
13858
13859 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13860 {
13861         struct drm_device *dev = state->dev;
13862         struct drm_i915_private *dev_priv = to_i915(dev);
13863         struct intel_shared_dpll_config *shared_dpll = NULL;
13864         struct drm_crtc *crtc;
13865         struct drm_crtc_state *crtc_state;
13866         int i;
13867
13868         if (!dev_priv->display.crtc_compute_clock)
13869                 return;
13870
13871         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13872                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13873                 struct intel_shared_dpll *old_dpll =
13874                         to_intel_crtc_state(crtc->state)->shared_dpll;
13875
13876                 if (!needs_modeset(crtc_state))
13877                         continue;
13878
13879                 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13880
13881                 if (!old_dpll)
13882                         continue;
13883
13884                 if (!shared_dpll)
13885                         shared_dpll = intel_atomic_get_shared_dpll_state(state);
13886
13887                 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13888         }
13889 }
13890
13891 /*
13892  * This implements the workaround described in the "notes" section of the mode
13893  * set sequence documentation. When going from no pipes or single pipe to
13894  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13895  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13896  */
13897 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13898 {
13899         struct drm_crtc_state *crtc_state;
13900         struct intel_crtc *intel_crtc;
13901         struct drm_crtc *crtc;
13902         struct intel_crtc_state *first_crtc_state = NULL;
13903         struct intel_crtc_state *other_crtc_state = NULL;
13904         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13905         int i;
13906
13907         /* look at all crtc's that are going to be enabled in during modeset */
13908         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13909                 intel_crtc = to_intel_crtc(crtc);
13910
13911                 if (!crtc_state->active || !needs_modeset(crtc_state))
13912                         continue;
13913
13914                 if (first_crtc_state) {
13915                         other_crtc_state = to_intel_crtc_state(crtc_state);
13916                         break;
13917                 } else {
13918                         first_crtc_state = to_intel_crtc_state(crtc_state);
13919                         first_pipe = intel_crtc->pipe;
13920                 }
13921         }
13922
13923         /* No workaround needed? */
13924         if (!first_crtc_state)
13925                 return 0;
13926
13927         /* w/a possibly needed, check how many crtc's are already enabled. */
13928         for_each_intel_crtc(state->dev, intel_crtc) {
13929                 struct intel_crtc_state *pipe_config;
13930
13931                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13932                 if (IS_ERR(pipe_config))
13933                         return PTR_ERR(pipe_config);
13934
13935                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13936
13937                 if (!pipe_config->base.active ||
13938                     needs_modeset(&pipe_config->base))
13939                         continue;
13940
13941                 /* 2 or more enabled crtcs means no need for w/a */
13942                 if (enabled_pipe != INVALID_PIPE)
13943                         return 0;
13944
13945                 enabled_pipe = intel_crtc->pipe;
13946         }
13947
13948         if (enabled_pipe != INVALID_PIPE)
13949                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13950         else if (other_crtc_state)
13951                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13952
13953         return 0;
13954 }
13955
13956 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13957 {
13958         struct drm_crtc *crtc;
13959         struct drm_crtc_state *crtc_state;
13960         int ret = 0;
13961
13962         /* add all active pipes to the state */
13963         for_each_crtc(state->dev, crtc) {
13964                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13965                 if (IS_ERR(crtc_state))
13966                         return PTR_ERR(crtc_state);
13967
13968                 if (!crtc_state->active || needs_modeset(crtc_state))
13969                         continue;
13970
13971                 crtc_state->mode_changed = true;
13972
13973                 ret = drm_atomic_add_affected_connectors(state, crtc);
13974                 if (ret)
13975                         break;
13976
13977                 ret = drm_atomic_add_affected_planes(state, crtc);
13978                 if (ret)
13979                         break;
13980         }
13981
13982         return ret;
13983 }
13984
13985 static int intel_modeset_checks(struct drm_atomic_state *state)
13986 {
13987         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13988         struct drm_i915_private *dev_priv = to_i915(state->dev);
13989         struct drm_crtc *crtc;
13990         struct drm_crtc_state *crtc_state;
13991         int ret = 0, i;
13992
13993         if (!check_digital_port_conflicts(state)) {
13994                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13995                 return -EINVAL;
13996         }
13997
13998         intel_state->modeset = true;
13999         intel_state->active_crtcs = dev_priv->active_crtcs;
14000
14001         for_each_crtc_in_state(state, crtc, crtc_state, i) {
14002                 if (crtc_state->active)
14003                         intel_state->active_crtcs |= 1 << i;
14004                 else
14005                         intel_state->active_crtcs &= ~(1 << i);
14006
14007                 if (crtc_state->active != crtc->state->active)
14008                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
14009         }
14010
14011         /*
14012          * See if the config requires any additional preparation, e.g.
14013          * to adjust global state with pipes off.  We need to do this
14014          * here so we can get the modeset_pipe updated config for the new
14015          * mode set on this crtc.  For other crtcs we need to use the
14016          * adjusted_mode bits in the crtc directly.
14017          */
14018         if (dev_priv->display.modeset_calc_cdclk) {
14019                 if (!intel_state->cdclk_pll_vco)
14020                         intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
14021                 if (!intel_state->cdclk_pll_vco)
14022                         intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
14023
14024                 ret = dev_priv->display.modeset_calc_cdclk(state);
14025                 if (ret < 0)
14026                         return ret;
14027
14028                 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
14029                     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
14030                         ret = intel_modeset_all_pipes(state);
14031
14032                 if (ret < 0)
14033                         return ret;
14034
14035                 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
14036                               intel_state->cdclk, intel_state->dev_cdclk);
14037         } else
14038                 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
14039
14040         intel_modeset_clear_plls(state);
14041
14042         if (IS_HASWELL(dev_priv))
14043                 return haswell_mode_set_planes_workaround(state);
14044
14045         return 0;
14046 }
14047
14048 /*
14049  * Handle calculation of various watermark data at the end of the atomic check
14050  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14051  * handlers to ensure that all derived state has been updated.
14052  */
14053 static int calc_watermark_data(struct drm_atomic_state *state)
14054 {
14055         struct drm_device *dev = state->dev;
14056         struct drm_i915_private *dev_priv = to_i915(dev);
14057
14058         /* Is there platform-specific watermark information to calculate? */
14059         if (dev_priv->display.compute_global_watermarks)
14060                 return dev_priv->display.compute_global_watermarks(state);
14061
14062         return 0;
14063 }
14064
14065 /**
14066  * intel_atomic_check - validate state object
14067  * @dev: drm device
14068  * @state: state to validate
14069  */
14070 static int intel_atomic_check(struct drm_device *dev,
14071                               struct drm_atomic_state *state)
14072 {
14073         struct drm_i915_private *dev_priv = to_i915(dev);
14074         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14075         struct drm_crtc *crtc;
14076         struct drm_crtc_state *crtc_state;
14077         int ret, i;
14078         bool any_ms = false;
14079
14080         ret = drm_atomic_helper_check_modeset(dev, state);
14081         if (ret)
14082                 return ret;
14083
14084         for_each_crtc_in_state(state, crtc, crtc_state, i) {
14085                 struct intel_crtc_state *pipe_config =
14086                         to_intel_crtc_state(crtc_state);
14087
14088                 /* Catch I915_MODE_FLAG_INHERITED */
14089                 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
14090                         crtc_state->mode_changed = true;
14091
14092                 if (!needs_modeset(crtc_state))
14093                         continue;
14094
14095                 if (!crtc_state->enable) {
14096                         any_ms = true;
14097                         continue;
14098                 }
14099
14100                 /* FIXME: For only active_changed we shouldn't need to do any
14101                  * state recomputation at all. */
14102
14103                 ret = drm_atomic_add_affected_connectors(state, crtc);
14104                 if (ret)
14105                         return ret;
14106
14107                 ret = intel_modeset_pipe_config(crtc, pipe_config);
14108                 if (ret) {
14109                         intel_dump_pipe_config(to_intel_crtc(crtc),
14110                                                pipe_config, "[failed]");
14111                         return ret;
14112                 }
14113
14114                 if (i915.fastboot &&
14115                     intel_pipe_config_compare(dev,
14116                                         to_intel_crtc_state(crtc->state),
14117                                         pipe_config, true)) {
14118                         crtc_state->mode_changed = false;
14119                         to_intel_crtc_state(crtc_state)->update_pipe = true;
14120                 }
14121
14122                 if (needs_modeset(crtc_state))
14123                         any_ms = true;
14124
14125                 ret = drm_atomic_add_affected_planes(state, crtc);
14126                 if (ret)
14127                         return ret;
14128
14129                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
14130                                        needs_modeset(crtc_state) ?
14131                                        "[modeset]" : "[fastset]");
14132         }
14133
14134         if (any_ms) {
14135                 ret = intel_modeset_checks(state);
14136
14137                 if (ret)
14138                         return ret;
14139         } else
14140                 intel_state->cdclk = dev_priv->cdclk_freq;
14141
14142         ret = drm_atomic_helper_check_planes(dev, state);
14143         if (ret)
14144                 return ret;
14145
14146         intel_fbc_choose_crtc(dev_priv, state);
14147         return calc_watermark_data(state);
14148 }
14149
14150 static int intel_atomic_prepare_commit(struct drm_device *dev,
14151                                        struct drm_atomic_state *state,
14152                                        bool nonblock)
14153 {
14154         struct drm_i915_private *dev_priv = to_i915(dev);
14155         struct drm_plane_state *plane_state;
14156         struct drm_crtc_state *crtc_state;
14157         struct drm_plane *plane;
14158         struct drm_crtc *crtc;
14159         int i, ret;
14160
14161         for_each_crtc_in_state(state, crtc, crtc_state, i) {
14162                 if (state->legacy_cursor_update)
14163                         continue;
14164
14165                 ret = intel_crtc_wait_for_pending_flips(crtc);
14166                 if (ret)
14167                         return ret;
14168
14169                 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
14170                         flush_workqueue(dev_priv->wq);
14171         }
14172
14173         ret = mutex_lock_interruptible(&dev->struct_mutex);
14174         if (ret)
14175                 return ret;
14176
14177         ret = drm_atomic_helper_prepare_planes(dev, state);
14178         mutex_unlock(&dev->struct_mutex);
14179
14180         if (!ret && !nonblock) {
14181                 for_each_plane_in_state(state, plane, plane_state, i) {
14182                         struct intel_plane_state *intel_plane_state =
14183                                 to_intel_plane_state(plane_state);
14184
14185                         if (!intel_plane_state->wait_req)
14186                                 continue;
14187
14188                         ret = i915_wait_request(intel_plane_state->wait_req,
14189                                                 I915_WAIT_INTERRUPTIBLE,
14190                                                 NULL, NULL);
14191                         if (ret) {
14192                                 /* Any hang should be swallowed by the wait */
14193                                 WARN_ON(ret == -EIO);
14194                                 mutex_lock(&dev->struct_mutex);
14195                                 drm_atomic_helper_cleanup_planes(dev, state);
14196                                 mutex_unlock(&dev->struct_mutex);
14197                                 break;
14198                         }
14199                 }
14200         }
14201
14202         return ret;
14203 }
14204
14205 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14206 {
14207         struct drm_device *dev = crtc->base.dev;
14208
14209         if (!dev->max_vblank_count)
14210                 return drm_accurate_vblank_count(&crtc->base);
14211
14212         return dev->driver->get_vblank_counter(dev, crtc->pipe);
14213 }
14214
14215 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
14216                                           struct drm_i915_private *dev_priv,
14217                                           unsigned crtc_mask)
14218 {
14219         unsigned last_vblank_count[I915_MAX_PIPES];
14220         enum pipe pipe;
14221         int ret;
14222
14223         if (!crtc_mask)
14224                 return;
14225
14226         for_each_pipe(dev_priv, pipe) {
14227                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
14228
14229                 if (!((1 << pipe) & crtc_mask))
14230                         continue;
14231
14232                 ret = drm_crtc_vblank_get(crtc);
14233                 if (WARN_ON(ret != 0)) {
14234                         crtc_mask &= ~(1 << pipe);
14235                         continue;
14236                 }
14237
14238                 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
14239         }
14240
14241         for_each_pipe(dev_priv, pipe) {
14242                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
14243                 long lret;
14244
14245                 if (!((1 << pipe) & crtc_mask))
14246                         continue;
14247
14248                 lret = wait_event_timeout(dev->vblank[pipe].queue,
14249                                 last_vblank_count[pipe] !=
14250                                         drm_crtc_vblank_count(crtc),
14251                                 msecs_to_jiffies(50));
14252
14253                 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
14254
14255                 drm_crtc_vblank_put(crtc);
14256         }
14257 }
14258
14259 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
14260 {
14261         /* fb updated, need to unpin old fb */
14262         if (crtc_state->fb_changed)
14263                 return true;
14264
14265         /* wm changes, need vblank before final wm's */
14266         if (crtc_state->update_wm_post)
14267                 return true;
14268
14269         /*
14270          * cxsr is re-enabled after vblank.
14271          * This is already handled by crtc_state->update_wm_post,
14272          * but added for clarity.
14273          */
14274         if (crtc_state->disable_cxsr)
14275                 return true;
14276
14277         return false;
14278 }
14279
14280 static void intel_update_crtc(struct drm_crtc *crtc,
14281                               struct drm_atomic_state *state,
14282                               struct drm_crtc_state *old_crtc_state,
14283                               unsigned int *crtc_vblank_mask)
14284 {
14285         struct drm_device *dev = crtc->dev;
14286         struct drm_i915_private *dev_priv = to_i915(dev);
14287         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14288         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
14289         bool modeset = needs_modeset(crtc->state);
14290
14291         if (modeset) {
14292                 update_scanline_offset(intel_crtc);
14293                 dev_priv->display.crtc_enable(pipe_config, state);
14294         } else {
14295                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
14296         }
14297
14298         if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
14299                 intel_fbc_enable(
14300                     intel_crtc, pipe_config,
14301                     to_intel_plane_state(crtc->primary->state));
14302         }
14303
14304         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
14305
14306         if (needs_vblank_wait(pipe_config))
14307                 *crtc_vblank_mask |= drm_crtc_mask(crtc);
14308 }
14309
14310 static void intel_update_crtcs(struct drm_atomic_state *state,
14311                                unsigned int *crtc_vblank_mask)
14312 {
14313         struct drm_crtc *crtc;
14314         struct drm_crtc_state *old_crtc_state;
14315         int i;
14316
14317         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14318                 if (!crtc->state->active)
14319                         continue;
14320
14321                 intel_update_crtc(crtc, state, old_crtc_state,
14322                                   crtc_vblank_mask);
14323         }
14324 }
14325
14326 static void skl_update_crtcs(struct drm_atomic_state *state,
14327                              unsigned int *crtc_vblank_mask)
14328 {
14329         struct drm_device *dev = state->dev;
14330         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14331         struct drm_crtc *crtc;
14332         struct intel_crtc *intel_crtc;
14333         struct drm_crtc_state *old_crtc_state;
14334         struct intel_crtc_state *cstate;
14335         unsigned int updated = 0;
14336         bool progress;
14337         enum pipe pipe;
14338
14339         /*
14340          * Whenever the number of active pipes changes, we need to make sure we
14341          * update the pipes in the right order so that their ddb allocations
14342          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14343          * cause pipe underruns and other bad stuff.
14344          */
14345         do {
14346                 int i;
14347                 progress = false;
14348
14349                 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14350                         bool vbl_wait = false;
14351                         unsigned int cmask = drm_crtc_mask(crtc);
14352
14353                         intel_crtc = to_intel_crtc(crtc);
14354                         cstate = to_intel_crtc_state(crtc->state);
14355                         pipe = intel_crtc->pipe;
14356
14357                         if (updated & cmask || !crtc->state->active)
14358                                 continue;
14359                         if (skl_ddb_allocation_overlaps(state, intel_crtc))
14360                                 continue;
14361
14362                         updated |= cmask;
14363
14364                         /*
14365                          * If this is an already active pipe, it's DDB changed,
14366                          * and this isn't the last pipe that needs updating
14367                          * then we need to wait for a vblank to pass for the
14368                          * new ddb allocation to take effect.
14369                          */
14370                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
14371                                                  &intel_crtc->hw_ddb) &&
14372                             !crtc->state->active_changed &&
14373                             intel_state->wm_results.dirty_pipes != updated)
14374                                 vbl_wait = true;
14375
14376                         intel_update_crtc(crtc, state, old_crtc_state,
14377                                           crtc_vblank_mask);
14378
14379                         if (vbl_wait)
14380                                 intel_wait_for_vblank(dev, pipe);
14381
14382                         progress = true;
14383                 }
14384         } while (progress);
14385 }
14386
14387 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14388 {
14389         struct drm_device *dev = state->dev;
14390         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14391         struct drm_i915_private *dev_priv = to_i915(dev);
14392         struct drm_crtc_state *old_crtc_state;
14393         struct drm_crtc *crtc;
14394         struct intel_crtc_state *intel_cstate;
14395         struct drm_plane *plane;
14396         struct drm_plane_state *plane_state;
14397         bool hw_check = intel_state->modeset;
14398         unsigned long put_domains[I915_MAX_PIPES] = {};
14399         unsigned crtc_vblank_mask = 0;
14400         int i, ret;
14401
14402         for_each_plane_in_state(state, plane, plane_state, i) {
14403                 struct intel_plane_state *intel_plane_state =
14404                         to_intel_plane_state(plane_state);
14405
14406                 if (!intel_plane_state->wait_req)
14407                         continue;
14408
14409                 ret = i915_wait_request(intel_plane_state->wait_req,
14410                                         0, NULL, NULL);
14411                 /* EIO should be eaten, and we can't get interrupted in the
14412                  * worker, and blocking commits have waited already. */
14413                 WARN_ON(ret);
14414         }
14415
14416         drm_atomic_helper_wait_for_dependencies(state);
14417
14418         if (intel_state->modeset) {
14419                 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
14420                        sizeof(intel_state->min_pixclk));
14421                 dev_priv->active_crtcs = intel_state->active_crtcs;
14422                 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
14423
14424                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14425         }
14426
14427         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14428                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14429
14430                 if (needs_modeset(crtc->state) ||
14431                     to_intel_crtc_state(crtc->state)->update_pipe) {
14432                         hw_check = true;
14433
14434                         put_domains[to_intel_crtc(crtc)->pipe] =
14435                                 modeset_get_crtc_power_domains(crtc,
14436                                         to_intel_crtc_state(crtc->state));
14437                 }
14438
14439                 if (!needs_modeset(crtc->state))
14440                         continue;
14441
14442                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
14443
14444                 if (old_crtc_state->active) {
14445                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
14446                         dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
14447                         intel_crtc->active = false;
14448                         intel_fbc_disable(intel_crtc);
14449                         intel_disable_shared_dpll(intel_crtc);
14450
14451                         /*
14452                          * Underruns don't always raise
14453                          * interrupts, so check manually.
14454                          */
14455                         intel_check_cpu_fifo_underruns(dev_priv);
14456                         intel_check_pch_fifo_underruns(dev_priv);
14457
14458                         if (!crtc->state->active)
14459                                 intel_update_watermarks(crtc);
14460                 }
14461         }
14462
14463         /* Only after disabling all output pipelines that will be changed can we
14464          * update the the output configuration. */
14465         intel_modeset_update_crtc_state(state);
14466
14467         if (intel_state->modeset) {
14468                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
14469
14470                 if (dev_priv->display.modeset_commit_cdclk &&
14471                     (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
14472                      intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
14473                         dev_priv->display.modeset_commit_cdclk(state);
14474
14475                 /*
14476                  * SKL workaround: bspec recommends we disable the SAGV when we
14477                  * have more then one pipe enabled
14478                  */
14479                 if (!intel_can_enable_sagv(state))
14480                         intel_disable_sagv(dev_priv);
14481
14482                 intel_modeset_verify_disabled(dev);
14483         }
14484
14485         /* Complete the events for pipes that have now been disabled */
14486         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14487                 bool modeset = needs_modeset(crtc->state);
14488
14489                 /* Complete events for now disable pipes here. */
14490                 if (modeset && !crtc->state->active && crtc->state->event) {
14491                         spin_lock_irq(&dev->event_lock);
14492                         drm_crtc_send_vblank_event(crtc, crtc->state->event);
14493                         spin_unlock_irq(&dev->event_lock);
14494
14495                         crtc->state->event = NULL;
14496                 }
14497         }
14498
14499         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14500         dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
14501
14502         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14503          * already, but still need the state for the delayed optimization. To
14504          * fix this:
14505          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14506          * - schedule that vblank worker _before_ calling hw_done
14507          * - at the start of commit_tail, cancel it _synchrously
14508          * - switch over to the vblank wait helper in the core after that since
14509          *   we don't need out special handling any more.
14510          */
14511         if (!state->legacy_cursor_update)
14512                 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
14513
14514         /*
14515          * Now that the vblank has passed, we can go ahead and program the
14516          * optimal watermarks on platforms that need two-step watermark
14517          * programming.
14518          *
14519          * TODO: Move this (and other cleanup) to an async worker eventually.
14520          */
14521         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14522                 intel_cstate = to_intel_crtc_state(crtc->state);
14523
14524                 if (dev_priv->display.optimize_watermarks)
14525                         dev_priv->display.optimize_watermarks(intel_cstate);
14526         }
14527
14528         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
14529                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
14530
14531                 if (put_domains[i])
14532                         modeset_put_power_domains(dev_priv, put_domains[i]);
14533
14534                 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
14535         }
14536
14537         if (intel_state->modeset && intel_can_enable_sagv(state))
14538                 intel_enable_sagv(dev_priv);
14539
14540         drm_atomic_helper_commit_hw_done(state);
14541
14542         if (intel_state->modeset)
14543                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
14544
14545         mutex_lock(&dev->struct_mutex);
14546         drm_atomic_helper_cleanup_planes(dev, state);
14547         mutex_unlock(&dev->struct_mutex);
14548
14549         drm_atomic_helper_commit_cleanup_done(state);
14550
14551         drm_atomic_state_free(state);
14552
14553         /* As one of the primary mmio accessors, KMS has a high likelihood
14554          * of triggering bugs in unclaimed access. After we finish
14555          * modesetting, see if an error has been flagged, and if so
14556          * enable debugging for the next modeset - and hope we catch
14557          * the culprit.
14558          *
14559          * XXX note that we assume display power is on at this point.
14560          * This might hold true now but we need to add pm helper to check
14561          * unclaimed only when the hardware is on, as atomic commits
14562          * can happen also when the device is completely off.
14563          */
14564         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
14565 }
14566
14567 static void intel_atomic_commit_work(struct work_struct *work)
14568 {
14569         struct drm_atomic_state *state = container_of(work,
14570                                                       struct drm_atomic_state,
14571                                                       commit_work);
14572         intel_atomic_commit_tail(state);
14573 }
14574
14575 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
14576 {
14577         struct drm_plane_state *old_plane_state;
14578         struct drm_plane *plane;
14579         int i;
14580
14581         for_each_plane_in_state(state, plane, old_plane_state, i)
14582                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
14583                                   intel_fb_obj(plane->state->fb),
14584                                   to_intel_plane(plane)->frontbuffer_bit);
14585 }
14586
14587 /**
14588  * intel_atomic_commit - commit validated state object
14589  * @dev: DRM device
14590  * @state: the top-level driver state object
14591  * @nonblock: nonblocking commit
14592  *
14593  * This function commits a top-level state object that has been validated
14594  * with drm_atomic_helper_check().
14595  *
14596  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
14597  * nonblocking commits are only safe for pure plane updates. Everything else
14598  * should work though.
14599  *
14600  * RETURNS
14601  * Zero for success or -errno.
14602  */
14603 static int intel_atomic_commit(struct drm_device *dev,
14604                                struct drm_atomic_state *state,
14605                                bool nonblock)
14606 {
14607         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14608         struct drm_i915_private *dev_priv = to_i915(dev);
14609         int ret = 0;
14610
14611         if (intel_state->modeset && nonblock) {
14612                 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
14613                 return -EINVAL;
14614         }
14615
14616         ret = drm_atomic_helper_setup_commit(state, nonblock);
14617         if (ret)
14618                 return ret;
14619
14620         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
14621
14622         ret = intel_atomic_prepare_commit(dev, state, nonblock);
14623         if (ret) {
14624                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14625                 return ret;
14626         }
14627
14628         drm_atomic_helper_swap_state(state, true);
14629         dev_priv->wm.distrust_bios_wm = false;
14630         dev_priv->wm.skl_results = intel_state->wm_results;
14631         intel_shared_dpll_commit(state);
14632         intel_atomic_track_fbs(state);
14633
14634         if (nonblock)
14635                 queue_work(system_unbound_wq, &state->commit_work);
14636         else
14637                 intel_atomic_commit_tail(state);
14638
14639         return 0;
14640 }
14641
14642 void intel_crtc_restore_mode(struct drm_crtc *crtc)
14643 {
14644         struct drm_device *dev = crtc->dev;
14645         struct drm_atomic_state *state;
14646         struct drm_crtc_state *crtc_state;
14647         int ret;
14648
14649         state = drm_atomic_state_alloc(dev);
14650         if (!state) {
14651                 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
14652                               crtc->base.id, crtc->name);
14653                 return;
14654         }
14655
14656         state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
14657
14658 retry:
14659         crtc_state = drm_atomic_get_crtc_state(state, crtc);
14660         ret = PTR_ERR_OR_ZERO(crtc_state);
14661         if (!ret) {
14662                 if (!crtc_state->active)
14663                         goto out;
14664
14665                 crtc_state->mode_changed = true;
14666                 ret = drm_atomic_commit(state);
14667         }
14668
14669         if (ret == -EDEADLK) {
14670                 drm_atomic_state_clear(state);
14671                 drm_modeset_backoff(state->acquire_ctx);
14672                 goto retry;
14673         }
14674
14675         if (ret)
14676 out:
14677                 drm_atomic_state_free(state);
14678 }
14679
14680 /*
14681  * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
14682  *        drm_atomic_helper_legacy_gamma_set() directly.
14683  */
14684 static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
14685                                          u16 *red, u16 *green, u16 *blue,
14686                                          uint32_t size)
14687 {
14688         struct drm_device *dev = crtc->dev;
14689         struct drm_mode_config *config = &dev->mode_config;
14690         struct drm_crtc_state *state;
14691         int ret;
14692
14693         ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
14694         if (ret)
14695                 return ret;
14696
14697         /*
14698          * Make sure we update the legacy properties so this works when
14699          * atomic is not enabled.
14700          */
14701
14702         state = crtc->state;
14703
14704         drm_object_property_set_value(&crtc->base,
14705                                       config->degamma_lut_property,
14706                                       (state->degamma_lut) ?
14707                                       state->degamma_lut->base.id : 0);
14708
14709         drm_object_property_set_value(&crtc->base,
14710                                       config->ctm_property,
14711                                       (state->ctm) ?
14712                                       state->ctm->base.id : 0);
14713
14714         drm_object_property_set_value(&crtc->base,
14715                                       config->gamma_lut_property,
14716                                       (state->gamma_lut) ?
14717                                       state->gamma_lut->base.id : 0);
14718
14719         return 0;
14720 }
14721
14722 static const struct drm_crtc_funcs intel_crtc_funcs = {
14723         .gamma_set = intel_atomic_legacy_gamma_set,
14724         .set_config = drm_atomic_helper_set_config,
14725         .set_property = drm_atomic_helper_crtc_set_property,
14726         .destroy = intel_crtc_destroy,
14727         .page_flip = intel_crtc_page_flip,
14728         .atomic_duplicate_state = intel_crtc_duplicate_state,
14729         .atomic_destroy_state = intel_crtc_destroy_state,
14730 };
14731
14732 /**
14733  * intel_prepare_plane_fb - Prepare fb for usage on plane
14734  * @plane: drm plane to prepare for
14735  * @fb: framebuffer to prepare for presentation
14736  *
14737  * Prepares a framebuffer for usage on a display plane.  Generally this
14738  * involves pinning the underlying object and updating the frontbuffer tracking
14739  * bits.  Some older platforms need special physical address handling for
14740  * cursor planes.
14741  *
14742  * Must be called with struct_mutex held.
14743  *
14744  * Returns 0 on success, negative error code on failure.
14745  */
14746 int
14747 intel_prepare_plane_fb(struct drm_plane *plane,
14748                        struct drm_plane_state *new_state)
14749 {
14750         struct drm_device *dev = plane->dev;
14751         struct drm_i915_private *dev_priv = to_i915(dev);
14752         struct drm_framebuffer *fb = new_state->fb;
14753         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14754         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14755         struct reservation_object *resv;
14756         int ret = 0;
14757
14758         if (!obj && !old_obj)
14759                 return 0;
14760
14761         if (old_obj) {
14762                 struct drm_crtc_state *crtc_state =
14763                         drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
14764
14765                 /* Big Hammer, we also need to ensure that any pending
14766                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14767                  * current scanout is retired before unpinning the old
14768                  * framebuffer. Note that we rely on userspace rendering
14769                  * into the buffer attached to the pipe they are waiting
14770                  * on. If not, userspace generates a GPU hang with IPEHR
14771                  * point to the MI_WAIT_FOR_EVENT.
14772                  *
14773                  * This should only fail upon a hung GPU, in which case we
14774                  * can safely continue.
14775                  */
14776                 if (needs_modeset(crtc_state))
14777                         ret = i915_gem_object_wait_rendering(old_obj, true);
14778                 if (ret) {
14779                         /* GPU hangs should have been swallowed by the wait */
14780                         WARN_ON(ret == -EIO);
14781                         return ret;
14782                 }
14783         }
14784
14785         if (!obj)
14786                 return 0;
14787
14788         /* For framebuffer backed by dmabuf, wait for fence */
14789         resv = i915_gem_object_get_dmabuf_resv(obj);
14790         if (resv) {
14791                 long lret;
14792
14793                 lret = reservation_object_wait_timeout_rcu(resv, false, true,
14794                                                            MAX_SCHEDULE_TIMEOUT);
14795                 if (lret == -ERESTARTSYS)
14796                         return lret;
14797
14798                 WARN(lret < 0, "waiting returns %li\n", lret);
14799         }
14800
14801         if (plane->type == DRM_PLANE_TYPE_CURSOR &&
14802             INTEL_INFO(dev)->cursor_needs_physical) {
14803                 int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
14804                 ret = i915_gem_object_attach_phys(obj, align);
14805                 if (ret)
14806                         DRM_DEBUG_KMS("failed to attach phys object\n");
14807         } else {
14808                 struct i915_vma *vma;
14809
14810                 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
14811                 if (IS_ERR(vma))
14812                         ret = PTR_ERR(vma);
14813         }
14814
14815         if (ret == 0) {
14816                 to_intel_plane_state(new_state)->wait_req =
14817                         i915_gem_active_get(&obj->last_write,
14818                                             &obj->base.dev->struct_mutex);
14819         }
14820
14821         return ret;
14822 }
14823
14824 /**
14825  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14826  * @plane: drm plane to clean up for
14827  * @fb: old framebuffer that was on plane
14828  *
14829  * Cleans up a framebuffer that has just been removed from a plane.
14830  *
14831  * Must be called with struct_mutex held.
14832  */
14833 void
14834 intel_cleanup_plane_fb(struct drm_plane *plane,
14835                        struct drm_plane_state *old_state)
14836 {
14837         struct drm_device *dev = plane->dev;
14838         struct intel_plane_state *old_intel_state;
14839         struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
14840         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14841         struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14842
14843         old_intel_state = to_intel_plane_state(old_state);
14844
14845         if (!obj && !old_obj)
14846                 return;
14847
14848         if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
14849             !INTEL_INFO(dev)->cursor_needs_physical))
14850                 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
14851
14852         i915_gem_request_assign(&intel_state->wait_req, NULL);
14853         i915_gem_request_assign(&old_intel_state->wait_req, NULL);
14854 }
14855
14856 int
14857 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14858 {
14859         int max_scale;
14860         int crtc_clock, cdclk;
14861
14862         if (!intel_crtc || !crtc_state->base.enable)
14863                 return DRM_PLANE_HELPER_NO_SCALING;
14864
14865         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14866         cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
14867
14868         if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
14869                 return DRM_PLANE_HELPER_NO_SCALING;
14870
14871         /*
14872          * skl max scale is lower of:
14873          *    close to 3 but not 3, -1 is for that purpose
14874          *            or
14875          *    cdclk/crtc_clock
14876          */
14877         max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
14878
14879         return max_scale;
14880 }
14881
14882 static int
14883 intel_check_primary_plane(struct drm_plane *plane,
14884                           struct intel_crtc_state *crtc_state,
14885                           struct intel_plane_state *state)
14886 {
14887         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14888         struct drm_crtc *crtc = state->base.crtc;
14889         int min_scale = DRM_PLANE_HELPER_NO_SCALING;
14890         int max_scale = DRM_PLANE_HELPER_NO_SCALING;
14891         bool can_position = false;
14892         int ret;
14893
14894         if (INTEL_GEN(dev_priv) >= 9) {
14895                 /* use scaler when colorkey is not required */
14896                 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
14897                         min_scale = 1;
14898                         max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
14899                 }
14900                 can_position = true;
14901         }
14902
14903         ret = drm_plane_helper_check_state(&state->base,
14904                                            &state->clip,
14905                                            min_scale, max_scale,
14906                                            can_position, true);
14907         if (ret)
14908                 return ret;
14909
14910         if (!state->base.fb)
14911                 return 0;
14912
14913         if (INTEL_GEN(dev_priv) >= 9) {
14914                 ret = skl_check_plane_surface(state);
14915                 if (ret)
14916                         return ret;
14917         }
14918
14919         return 0;
14920 }
14921
14922 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14923                                     struct drm_crtc_state *old_crtc_state)
14924 {
14925         struct drm_device *dev = crtc->dev;
14926         struct drm_i915_private *dev_priv = to_i915(dev);
14927         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14928         struct intel_crtc_state *intel_cstate =
14929                 to_intel_crtc_state(crtc->state);
14930         struct intel_crtc_state *old_intel_state =
14931                 to_intel_crtc_state(old_crtc_state);
14932         bool modeset = needs_modeset(crtc->state);
14933         enum pipe pipe = intel_crtc->pipe;
14934
14935         /* Perform vblank evasion around commit operation */
14936         intel_pipe_update_start(intel_crtc);
14937
14938         if (modeset)
14939                 return;
14940
14941         if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14942                 intel_color_set_csc(crtc->state);
14943                 intel_color_load_luts(crtc->state);
14944         }
14945
14946         if (intel_cstate->update_pipe) {
14947                 intel_update_pipe_config(intel_crtc, old_intel_state);
14948         } else if (INTEL_GEN(dev_priv) >= 9) {
14949                 skl_detach_scalers(intel_crtc);
14950
14951                 I915_WRITE(PIPE_WM_LINETIME(pipe),
14952                            intel_cstate->wm.skl.optimal.linetime);
14953         }
14954 }
14955
14956 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14957                                      struct drm_crtc_state *old_crtc_state)
14958 {
14959         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14960
14961         intel_pipe_update_end(intel_crtc, NULL);
14962 }
14963
14964 /**
14965  * intel_plane_destroy - destroy a plane
14966  * @plane: plane to destroy
14967  *
14968  * Common destruction function for all types of planes (primary, cursor,
14969  * sprite).
14970  */
14971 void intel_plane_destroy(struct drm_plane *plane)
14972 {
14973         if (!plane)
14974                 return;
14975
14976         drm_plane_cleanup(plane);
14977         kfree(to_intel_plane(plane));
14978 }
14979
14980 const struct drm_plane_funcs intel_plane_funcs = {
14981         .update_plane = drm_atomic_helper_update_plane,
14982         .disable_plane = drm_atomic_helper_disable_plane,
14983         .destroy = intel_plane_destroy,
14984         .set_property = drm_atomic_helper_plane_set_property,
14985         .atomic_get_property = intel_plane_atomic_get_property,
14986         .atomic_set_property = intel_plane_atomic_set_property,
14987         .atomic_duplicate_state = intel_plane_duplicate_state,
14988         .atomic_destroy_state = intel_plane_destroy_state,
14989
14990 };
14991
14992 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14993                                                     int pipe)
14994 {
14995         struct drm_i915_private *dev_priv = to_i915(dev);
14996         struct intel_plane *primary = NULL;
14997         struct intel_plane_state *state = NULL;
14998         const uint32_t *intel_primary_formats;
14999         unsigned int num_formats;
15000         int ret;
15001
15002         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
15003         if (!primary)
15004                 goto fail;
15005
15006         state = intel_create_plane_state(&primary->base);
15007         if (!state)
15008                 goto fail;
15009         primary->base.state = &state->base;
15010
15011         primary->can_scale = false;
15012         primary->max_downscale = 1;
15013         if (INTEL_INFO(dev)->gen >= 9) {
15014                 primary->can_scale = true;
15015                 state->scaler_id = -1;
15016         }
15017         primary->pipe = pipe;
15018         primary->plane = pipe;
15019         primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
15020         primary->check_plane = intel_check_primary_plane;
15021         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
15022                 primary->plane = !pipe;
15023
15024         if (INTEL_INFO(dev)->gen >= 9) {
15025                 intel_primary_formats = skl_primary_formats;
15026                 num_formats = ARRAY_SIZE(skl_primary_formats);
15027
15028                 primary->update_plane = skylake_update_primary_plane;
15029                 primary->disable_plane = skylake_disable_primary_plane;
15030         } else if (HAS_PCH_SPLIT(dev_priv)) {
15031                 intel_primary_formats = i965_primary_formats;
15032                 num_formats = ARRAY_SIZE(i965_primary_formats);
15033
15034                 primary->update_plane = ironlake_update_primary_plane;
15035                 primary->disable_plane = i9xx_disable_primary_plane;
15036         } else if (INTEL_INFO(dev)->gen >= 4) {
15037                 intel_primary_formats = i965_primary_formats;
15038                 num_formats = ARRAY_SIZE(i965_primary_formats);
15039
15040                 primary->update_plane = i9xx_update_primary_plane;
15041                 primary->disable_plane = i9xx_disable_primary_plane;
15042         } else {
15043                 intel_primary_formats = i8xx_primary_formats;
15044                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15045
15046                 primary->update_plane = i9xx_update_primary_plane;
15047                 primary->disable_plane = i9xx_disable_primary_plane;
15048         }
15049
15050         if (INTEL_INFO(dev)->gen >= 9)
15051                 ret = drm_universal_plane_init(dev, &primary->base, 0,
15052                                                &intel_plane_funcs,
15053                                                intel_primary_formats, num_formats,
15054                                                DRM_PLANE_TYPE_PRIMARY,
15055                                                "plane 1%c", pipe_name(pipe));
15056         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15057                 ret = drm_universal_plane_init(dev, &primary->base, 0,
15058                                                &intel_plane_funcs,
15059                                                intel_primary_formats, num_formats,
15060                                                DRM_PLANE_TYPE_PRIMARY,
15061                                                "primary %c", pipe_name(pipe));
15062         else
15063                 ret = drm_universal_plane_init(dev, &primary->base, 0,
15064                                                &intel_plane_funcs,
15065                                                intel_primary_formats, num_formats,
15066                                                DRM_PLANE_TYPE_PRIMARY,
15067                                                "plane %c", plane_name(primary->plane));
15068         if (ret)
15069                 goto fail;
15070
15071         if (INTEL_INFO(dev)->gen >= 4)
15072                 intel_create_rotation_property(dev, primary);
15073
15074         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
15075
15076         return &primary->base;
15077
15078 fail:
15079         kfree(state);
15080         kfree(primary);
15081
15082         return NULL;
15083 }
15084
15085 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
15086 {
15087         if (!dev->mode_config.rotation_property) {
15088                 unsigned long flags = DRM_ROTATE_0 |
15089                         DRM_ROTATE_180;
15090
15091                 if (INTEL_INFO(dev)->gen >= 9)
15092                         flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
15093
15094                 dev->mode_config.rotation_property =
15095                         drm_mode_create_rotation_property(dev, flags);
15096         }
15097         if (dev->mode_config.rotation_property)
15098                 drm_object_attach_property(&plane->base.base,
15099                                 dev->mode_config.rotation_property,
15100                                 plane->base.state->rotation);
15101 }
15102
15103 static int
15104 intel_check_cursor_plane(struct drm_plane *plane,
15105                          struct intel_crtc_state *crtc_state,
15106                          struct intel_plane_state *state)
15107 {
15108         struct drm_framebuffer *fb = state->base.fb;
15109         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15110         enum pipe pipe = to_intel_plane(plane)->pipe;
15111         unsigned stride;
15112         int ret;
15113
15114         ret = drm_plane_helper_check_state(&state->base,
15115                                            &state->clip,
15116                                            DRM_PLANE_HELPER_NO_SCALING,
15117                                            DRM_PLANE_HELPER_NO_SCALING,
15118                                            true, true);
15119         if (ret)
15120                 return ret;
15121
15122         /* if we want to turn off the cursor ignore width and height */
15123         if (!obj)
15124                 return 0;
15125
15126         /* Check for which cursor types we support */
15127         if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
15128                             state->base.crtc_h)) {
15129                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
15130                           state->base.crtc_w, state->base.crtc_h);
15131                 return -EINVAL;
15132         }
15133
15134         stride = roundup_pow_of_two(state->base.crtc_w) * 4;
15135         if (obj->base.size < stride * state->base.crtc_h) {
15136                 DRM_DEBUG_KMS("buffer is too small\n");
15137                 return -ENOMEM;
15138         }
15139
15140         if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
15141                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
15142                 return -EINVAL;
15143         }
15144
15145         /*
15146          * There's something wrong with the cursor on CHV pipe C.
15147          * If it straddles the left edge of the screen then
15148          * moving it away from the edge or disabling it often
15149          * results in a pipe underrun, and often that can lead to
15150          * dead pipe (constant underrun reported, and it scans
15151          * out just a solid color). To recover from that, the
15152          * display power well must be turned off and on again.
15153          * Refuse the put the cursor into that compromised position.
15154          */
15155         if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
15156             state->base.visible && state->base.crtc_x < 0) {
15157                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
15158                 return -EINVAL;
15159         }
15160
15161         return 0;
15162 }
15163
15164 static void
15165 intel_disable_cursor_plane(struct drm_plane *plane,
15166                            struct drm_crtc *crtc)
15167 {
15168         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
15169
15170         intel_crtc->cursor_addr = 0;
15171         intel_crtc_update_cursor(crtc, NULL);
15172 }
15173
15174 static void
15175 intel_update_cursor_plane(struct drm_plane *plane,
15176                           const struct intel_crtc_state *crtc_state,
15177                           const struct intel_plane_state *state)
15178 {
15179         struct drm_crtc *crtc = crtc_state->base.crtc;
15180         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
15181         struct drm_device *dev = plane->dev;
15182         struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
15183         uint32_t addr;
15184
15185         if (!obj)
15186                 addr = 0;
15187         else if (!INTEL_INFO(dev)->cursor_needs_physical)
15188                 addr = i915_gem_object_ggtt_offset(obj, NULL);
15189         else
15190                 addr = obj->phys_handle->busaddr;
15191
15192         intel_crtc->cursor_addr = addr;
15193         intel_crtc_update_cursor(crtc, state);
15194 }
15195
15196 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
15197                                                    int pipe)
15198 {
15199         struct intel_plane *cursor = NULL;
15200         struct intel_plane_state *state = NULL;
15201         int ret;
15202
15203         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
15204         if (!cursor)
15205                 goto fail;
15206
15207         state = intel_create_plane_state(&cursor->base);
15208         if (!state)
15209                 goto fail;
15210         cursor->base.state = &state->base;
15211
15212         cursor->can_scale = false;
15213         cursor->max_downscale = 1;
15214         cursor->pipe = pipe;
15215         cursor->plane = pipe;
15216         cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
15217         cursor->check_plane = intel_check_cursor_plane;
15218         cursor->update_plane = intel_update_cursor_plane;
15219         cursor->disable_plane = intel_disable_cursor_plane;
15220
15221         ret = drm_universal_plane_init(dev, &cursor->base, 0,
15222                                        &intel_plane_funcs,
15223                                        intel_cursor_formats,
15224                                        ARRAY_SIZE(intel_cursor_formats),
15225                                        DRM_PLANE_TYPE_CURSOR,
15226                                        "cursor %c", pipe_name(pipe));
15227         if (ret)
15228                 goto fail;
15229
15230         if (INTEL_INFO(dev)->gen >= 4) {
15231                 if (!dev->mode_config.rotation_property)
15232                         dev->mode_config.rotation_property =
15233                                 drm_mode_create_rotation_property(dev,
15234                                                         DRM_ROTATE_0 |
15235                                                         DRM_ROTATE_180);
15236                 if (dev->mode_config.rotation_property)
15237                         drm_object_attach_property(&cursor->base.base,
15238                                 dev->mode_config.rotation_property,
15239                                 state->base.rotation);
15240         }
15241
15242         if (INTEL_INFO(dev)->gen >=9)
15243                 state->scaler_id = -1;
15244
15245         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15246
15247         return &cursor->base;
15248
15249 fail:
15250         kfree(state);
15251         kfree(cursor);
15252
15253         return NULL;
15254 }
15255
15256 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
15257         struct intel_crtc_state *crtc_state)
15258 {
15259         int i;
15260         struct intel_scaler *intel_scaler;
15261         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
15262
15263         for (i = 0; i < intel_crtc->num_scalers; i++) {
15264                 intel_scaler = &scaler_state->scalers[i];
15265                 intel_scaler->in_use = 0;
15266                 intel_scaler->mode = PS_SCALER_MODE_DYN;
15267         }
15268
15269         scaler_state->scaler_id = -1;
15270 }
15271
15272 static void intel_crtc_init(struct drm_device *dev, int pipe)
15273 {
15274         struct drm_i915_private *dev_priv = to_i915(dev);
15275         struct intel_crtc *intel_crtc;
15276         struct intel_crtc_state *crtc_state = NULL;
15277         struct drm_plane *primary = NULL;
15278         struct drm_plane *cursor = NULL;
15279         int ret;
15280
15281         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15282         if (intel_crtc == NULL)
15283                 return;
15284
15285         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15286         if (!crtc_state)
15287                 goto fail;
15288         intel_crtc->config = crtc_state;
15289         intel_crtc->base.state = &crtc_state->base;
15290         crtc_state->base.crtc = &intel_crtc->base;
15291
15292         /* initialize shared scalers */
15293         if (INTEL_INFO(dev)->gen >= 9) {
15294                 if (pipe == PIPE_C)
15295                         intel_crtc->num_scalers = 1;
15296                 else
15297                         intel_crtc->num_scalers = SKL_NUM_SCALERS;
15298
15299                 skl_init_scalers(dev, intel_crtc, crtc_state);
15300         }
15301
15302         primary = intel_primary_plane_create(dev, pipe);
15303         if (!primary)
15304                 goto fail;
15305
15306         cursor = intel_cursor_plane_create(dev, pipe);
15307         if (!cursor)
15308                 goto fail;
15309
15310         ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
15311                                         cursor, &intel_crtc_funcs,
15312                                         "pipe %c", pipe_name(pipe));
15313         if (ret)
15314                 goto fail;
15315
15316         /*
15317          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
15318          * is hooked to pipe B. Hence we want plane A feeding pipe B.
15319          */
15320         intel_crtc->pipe = pipe;
15321         intel_crtc->plane = pipe;
15322         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
15323                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
15324                 intel_crtc->plane = !pipe;
15325         }
15326
15327         intel_crtc->cursor_base = ~0;
15328         intel_crtc->cursor_cntl = ~0;
15329         intel_crtc->cursor_size = ~0;
15330
15331         intel_crtc->wm.cxsr_allowed = true;
15332
15333         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15334                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
15335         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
15336         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
15337
15338         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
15339
15340         intel_color_init(&intel_crtc->base);
15341
15342         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15343         return;
15344
15345 fail:
15346         intel_plane_destroy(primary);
15347         intel_plane_destroy(cursor);
15348         kfree(crtc_state);
15349         kfree(intel_crtc);
15350 }
15351
15352 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
15353 {
15354         struct drm_encoder *encoder = connector->base.encoder;
15355         struct drm_device *dev = connector->base.dev;
15356
15357         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
15358
15359         if (!encoder || WARN_ON(!encoder->crtc))
15360                 return INVALID_PIPE;
15361
15362         return to_intel_crtc(encoder->crtc)->pipe;
15363 }
15364
15365 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
15366                                 struct drm_file *file)
15367 {
15368         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15369         struct drm_crtc *drmmode_crtc;
15370         struct intel_crtc *crtc;
15371
15372         drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
15373         if (!drmmode_crtc)
15374                 return -ENOENT;
15375
15376         crtc = to_intel_crtc(drmmode_crtc);
15377         pipe_from_crtc_id->pipe = crtc->pipe;
15378
15379         return 0;
15380 }
15381
15382 static int intel_encoder_clones(struct intel_encoder *encoder)
15383 {
15384         struct drm_device *dev = encoder->base.dev;
15385         struct intel_encoder *source_encoder;
15386         int index_mask = 0;
15387         int entry = 0;
15388
15389         for_each_intel_encoder(dev, source_encoder) {
15390                 if (encoders_cloneable(encoder, source_encoder))
15391                         index_mask |= (1 << entry);
15392
15393                 entry++;
15394         }
15395
15396         return index_mask;
15397 }
15398
15399 static bool has_edp_a(struct drm_device *dev)
15400 {
15401         struct drm_i915_private *dev_priv = to_i915(dev);
15402
15403         if (!IS_MOBILE(dev))
15404                 return false;
15405
15406         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15407                 return false;
15408
15409         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15410                 return false;
15411
15412         return true;
15413 }
15414
15415 static bool intel_crt_present(struct drm_device *dev)
15416 {
15417         struct drm_i915_private *dev_priv = to_i915(dev);
15418
15419         if (INTEL_INFO(dev)->gen >= 9)
15420                 return false;
15421
15422         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15423                 return false;
15424
15425         if (IS_CHERRYVIEW(dev_priv))
15426                 return false;
15427
15428         if (HAS_PCH_LPT_H(dev_priv) &&
15429             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15430                 return false;
15431
15432         /* DDI E can't be used if DDI A requires 4 lanes */
15433         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15434                 return false;
15435
15436         if (!dev_priv->vbt.int_crt_support)
15437                 return false;
15438
15439         return true;
15440 }
15441
15442 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15443 {
15444         int pps_num;
15445         int pps_idx;
15446
15447         if (HAS_DDI(dev_priv))
15448                 return;
15449         /*
15450          * This w/a is needed at least on CPT/PPT, but to be sure apply it
15451          * everywhere where registers can be write protected.
15452          */
15453         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15454                 pps_num = 2;
15455         else
15456                 pps_num = 1;
15457
15458         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15459                 u32 val = I915_READ(PP_CONTROL(pps_idx));
15460
15461                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15462                 I915_WRITE(PP_CONTROL(pps_idx), val);
15463         }
15464 }
15465
15466 static void intel_pps_init(struct drm_i915_private *dev_priv)
15467 {
15468         if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
15469                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15470         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15471                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15472         else
15473                 dev_priv->pps_mmio_base = PPS_BASE;
15474
15475         intel_pps_unlock_regs_wa(dev_priv);
15476 }
15477
15478 static void intel_setup_outputs(struct drm_device *dev)
15479 {
15480         struct drm_i915_private *dev_priv = to_i915(dev);
15481         struct intel_encoder *encoder;
15482         bool dpd_is_edp = false;
15483
15484         intel_pps_init(dev_priv);
15485
15486         /*
15487          * intel_edp_init_connector() depends on this completing first, to
15488          * prevent the registeration of both eDP and LVDS and the incorrect
15489          * sharing of the PPS.
15490          */
15491         intel_lvds_init(dev);
15492
15493         if (intel_crt_present(dev))
15494                 intel_crt_init(dev);
15495
15496         if (IS_BROXTON(dev_priv)) {
15497                 /*
15498                  * FIXME: Broxton doesn't support port detection via the
15499                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15500                  * detect the ports.
15501                  */
15502                 intel_ddi_init(dev, PORT_A);
15503                 intel_ddi_init(dev, PORT_B);
15504                 intel_ddi_init(dev, PORT_C);
15505
15506                 intel_dsi_init(dev);
15507         } else if (HAS_DDI(dev_priv)) {
15508                 int found;
15509
15510                 /*
15511                  * Haswell uses DDI functions to detect digital outputs.
15512                  * On SKL pre-D0 the strap isn't connected, so we assume
15513                  * it's there.
15514                  */
15515                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15516                 /* WaIgnoreDDIAStrap: skl */
15517                 if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15518                         intel_ddi_init(dev, PORT_A);
15519
15520                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
15521                  * register */
15522                 found = I915_READ(SFUSE_STRAP);
15523
15524                 if (found & SFUSE_STRAP_DDIB_DETECTED)
15525                         intel_ddi_init(dev, PORT_B);
15526                 if (found & SFUSE_STRAP_DDIC_DETECTED)
15527                         intel_ddi_init(dev, PORT_C);
15528                 if (found & SFUSE_STRAP_DDID_DETECTED)
15529                         intel_ddi_init(dev, PORT_D);
15530                 /*
15531                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15532                  */
15533                 if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
15534                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
15535                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
15536                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
15537                         intel_ddi_init(dev, PORT_E);
15538
15539         } else if (HAS_PCH_SPLIT(dev_priv)) {
15540                 int found;
15541                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
15542
15543                 if (has_edp_a(dev))
15544                         intel_dp_init(dev, DP_A, PORT_A);
15545
15546                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15547                         /* PCH SDVOB multiplex with HDMIB */
15548                         found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
15549                         if (!found)
15550                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
15551                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15552                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
15553                 }
15554
15555                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15556                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
15557
15558                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15559                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
15560
15561                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15562                         intel_dp_init(dev, PCH_DP_C, PORT_C);
15563
15564                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15565                         intel_dp_init(dev, PCH_DP_D, PORT_D);
15566         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15567                 bool has_edp, has_port;
15568
15569                 /*
15570                  * The DP_DETECTED bit is the latched state of the DDC
15571                  * SDA pin at boot. However since eDP doesn't require DDC
15572                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
15573                  * eDP ports may have been muxed to an alternate function.
15574                  * Thus we can't rely on the DP_DETECTED bit alone to detect
15575                  * eDP ports. Consult the VBT as well as DP_DETECTED to
15576                  * detect eDP ports.
15577                  *
15578                  * Sadly the straps seem to be missing sometimes even for HDMI
15579                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15580                  * and VBT for the presence of the port. Additionally we can't
15581                  * trust the port type the VBT declares as we've seen at least
15582                  * HDMI ports that the VBT claim are DP or eDP.
15583                  */
15584                 has_edp = intel_dp_is_edp(dev, PORT_B);
15585                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15586                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15587                         has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
15588                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15589                         intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
15590
15591                 has_edp = intel_dp_is_edp(dev, PORT_C);
15592                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15593                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15594                         has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
15595                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15596                         intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
15597
15598                 if (IS_CHERRYVIEW(dev_priv)) {
15599                         /*
15600                          * eDP not supported on port D,
15601                          * so no need to worry about it
15602                          */
15603                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15604                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15605                                 intel_dp_init(dev, CHV_DP_D, PORT_D);
15606                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15607                                 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
15608                 }
15609
15610                 intel_dsi_init(dev);
15611         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
15612                 bool found = false;
15613
15614                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15615                         DRM_DEBUG_KMS("probing SDVOB\n");
15616                         found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
15617                         if (!found && IS_G4X(dev_priv)) {
15618                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15619                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
15620                         }
15621
15622                         if (!found && IS_G4X(dev_priv))
15623                                 intel_dp_init(dev, DP_B, PORT_B);
15624                 }
15625
15626                 /* Before G4X SDVOC doesn't have its own detect register */
15627
15628                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15629                         DRM_DEBUG_KMS("probing SDVOC\n");
15630                         found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
15631                 }
15632
15633                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15634
15635                         if (IS_G4X(dev_priv)) {
15636                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15637                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
15638                         }
15639                         if (IS_G4X(dev_priv))
15640                                 intel_dp_init(dev, DP_C, PORT_C);
15641                 }
15642
15643                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15644                         intel_dp_init(dev, DP_D, PORT_D);
15645         } else if (IS_GEN2(dev_priv))
15646                 intel_dvo_init(dev);
15647
15648         if (SUPPORTS_TV(dev))
15649                 intel_tv_init(dev);
15650
15651         intel_psr_init(dev);
15652
15653         for_each_intel_encoder(dev, encoder) {
15654                 encoder->base.possible_crtcs = encoder->crtc_mask;
15655                 encoder->base.possible_clones =
15656                         intel_encoder_clones(encoder);
15657         }
15658
15659         intel_init_pch_refclk(dev);
15660
15661         drm_helper_move_panel_connectors_to_head(dev);
15662 }
15663
15664 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15665 {
15666         struct drm_device *dev = fb->dev;
15667         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15668
15669         drm_framebuffer_cleanup(fb);
15670         mutex_lock(&dev->struct_mutex);
15671         WARN_ON(!intel_fb->obj->framebuffer_references--);
15672         i915_gem_object_put(intel_fb->obj);
15673         mutex_unlock(&dev->struct_mutex);
15674         kfree(intel_fb);
15675 }
15676
15677 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15678                                                 struct drm_file *file,
15679                                                 unsigned int *handle)
15680 {
15681         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15682         struct drm_i915_gem_object *obj = intel_fb->obj;
15683
15684         if (obj->userptr.mm) {
15685                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15686                 return -EINVAL;
15687         }
15688
15689         return drm_gem_handle_create(file, &obj->base, handle);
15690 }
15691
15692 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15693                                         struct drm_file *file,
15694                                         unsigned flags, unsigned color,
15695                                         struct drm_clip_rect *clips,
15696                                         unsigned num_clips)
15697 {
15698         struct drm_device *dev = fb->dev;
15699         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15700         struct drm_i915_gem_object *obj = intel_fb->obj;
15701
15702         mutex_lock(&dev->struct_mutex);
15703         intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
15704         mutex_unlock(&dev->struct_mutex);
15705
15706         return 0;
15707 }
15708
15709 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15710         .destroy = intel_user_framebuffer_destroy,
15711         .create_handle = intel_user_framebuffer_create_handle,
15712         .dirty = intel_user_framebuffer_dirty,
15713 };
15714
15715 static
15716 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
15717                          uint64_t fb_modifier, uint32_t pixel_format)
15718 {
15719         u32 gen = INTEL_INFO(dev_priv)->gen;
15720
15721         if (gen >= 9) {
15722                 int cpp = drm_format_plane_cpp(pixel_format, 0);
15723
15724                 /* "The stride in bytes must not exceed the of the size of 8K
15725                  *  pixels and 32K bytes."
15726                  */
15727                 return min(8192 * cpp, 32768);
15728         } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
15729                    !IS_CHERRYVIEW(dev_priv)) {
15730                 return 32*1024;
15731         } else if (gen >= 4) {
15732                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
15733                         return 16*1024;
15734                 else
15735                         return 32*1024;
15736         } else if (gen >= 3) {
15737                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
15738                         return 8*1024;
15739                 else
15740                         return 16*1024;
15741         } else {
15742                 /* XXX DSPC is limited to 4k tiled */
15743                 return 8*1024;
15744         }
15745 }
15746
15747 static int intel_framebuffer_init(struct drm_device *dev,
15748                                   struct intel_framebuffer *intel_fb,
15749                                   struct drm_mode_fb_cmd2 *mode_cmd,
15750                                   struct drm_i915_gem_object *obj)
15751 {
15752         struct drm_i915_private *dev_priv = to_i915(dev);
15753         unsigned int tiling = i915_gem_object_get_tiling(obj);
15754         int ret;
15755         u32 pitch_limit, stride_alignment;
15756         char *format_name;
15757
15758         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
15759
15760         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15761                 /*
15762                  * If there's a fence, enforce that
15763                  * the fb modifier and tiling mode match.
15764                  */
15765                 if (tiling != I915_TILING_NONE &&
15766                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15767                         DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
15768                         return -EINVAL;
15769                 }
15770         } else {
15771                 if (tiling == I915_TILING_X) {
15772                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15773                 } else if (tiling == I915_TILING_Y) {
15774                         DRM_DEBUG("No Y tiling for legacy addfb\n");
15775                         return -EINVAL;
15776                 }
15777         }
15778
15779         /* Passed in modifier sanity checking. */
15780         switch (mode_cmd->modifier[0]) {
15781         case I915_FORMAT_MOD_Y_TILED:
15782         case I915_FORMAT_MOD_Yf_TILED:
15783                 if (INTEL_INFO(dev)->gen < 9) {
15784                         DRM_DEBUG("Unsupported tiling 0x%llx!\n",
15785                                   mode_cmd->modifier[0]);
15786                         return -EINVAL;
15787                 }
15788         case DRM_FORMAT_MOD_NONE:
15789         case I915_FORMAT_MOD_X_TILED:
15790                 break;
15791         default:
15792                 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
15793                           mode_cmd->modifier[0]);
15794                 return -EINVAL;
15795         }
15796
15797         /*
15798          * gen2/3 display engine uses the fence if present,
15799          * so the tiling mode must match the fb modifier exactly.
15800          */
15801         if (INTEL_INFO(dev_priv)->gen < 4 &&
15802             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15803                 DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
15804                 return -EINVAL;
15805         }
15806
15807         stride_alignment = intel_fb_stride_alignment(dev_priv,
15808                                                      mode_cmd->modifier[0],
15809                                                      mode_cmd->pixel_format);
15810         if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
15811                 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
15812                           mode_cmd->pitches[0], stride_alignment);
15813                 return -EINVAL;
15814         }
15815
15816         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
15817                                            mode_cmd->pixel_format);
15818         if (mode_cmd->pitches[0] > pitch_limit) {
15819                 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
15820                           mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
15821                           "tiled" : "linear",
15822                           mode_cmd->pitches[0], pitch_limit);
15823                 return -EINVAL;
15824         }
15825
15826         /*
15827          * If there's a fence, enforce that
15828          * the fb pitch and fence stride match.
15829          */
15830         if (tiling != I915_TILING_NONE &&
15831             mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
15832                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
15833                           mode_cmd->pitches[0],
15834                           i915_gem_object_get_stride(obj));
15835                 return -EINVAL;
15836         }
15837
15838         /* Reject formats not supported by any plane early. */
15839         switch (mode_cmd->pixel_format) {
15840         case DRM_FORMAT_C8:
15841         case DRM_FORMAT_RGB565:
15842         case DRM_FORMAT_XRGB8888:
15843         case DRM_FORMAT_ARGB8888:
15844                 break;
15845         case DRM_FORMAT_XRGB1555:
15846                 if (INTEL_INFO(dev)->gen > 3) {
15847                         format_name = drm_get_format_name(mode_cmd->pixel_format);
15848                         DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15849                         kfree(format_name);
15850                         return -EINVAL;
15851                 }
15852                 break;
15853         case DRM_FORMAT_ABGR8888:
15854                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
15855                     INTEL_INFO(dev)->gen < 9) {
15856                         format_name = drm_get_format_name(mode_cmd->pixel_format);
15857                         DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15858                         kfree(format_name);
15859                         return -EINVAL;
15860                 }
15861                 break;
15862         case DRM_FORMAT_XBGR8888:
15863         case DRM_FORMAT_XRGB2101010:
15864         case DRM_FORMAT_XBGR2101010:
15865                 if (INTEL_INFO(dev)->gen < 4) {
15866                         format_name = drm_get_format_name(mode_cmd->pixel_format);
15867                         DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15868                         kfree(format_name);
15869                         return -EINVAL;
15870                 }
15871                 break;
15872         case DRM_FORMAT_ABGR2101010:
15873                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
15874                         format_name = drm_get_format_name(mode_cmd->pixel_format);
15875                         DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15876                         kfree(format_name);
15877                         return -EINVAL;
15878                 }
15879                 break;
15880         case DRM_FORMAT_YUYV:
15881         case DRM_FORMAT_UYVY:
15882         case DRM_FORMAT_YVYU:
15883         case DRM_FORMAT_VYUY:
15884                 if (INTEL_INFO(dev)->gen < 5) {
15885                         format_name = drm_get_format_name(mode_cmd->pixel_format);
15886                         DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15887                         kfree(format_name);
15888                         return -EINVAL;
15889                 }
15890                 break;
15891         default:
15892                 format_name = drm_get_format_name(mode_cmd->pixel_format);
15893                 DRM_DEBUG("unsupported pixel format: %s\n", format_name);
15894                 kfree(format_name);
15895                 return -EINVAL;
15896         }
15897
15898         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15899         if (mode_cmd->offsets[0] != 0)
15900                 return -EINVAL;
15901
15902         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
15903         intel_fb->obj = obj;
15904
15905         ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
15906         if (ret)
15907                 return ret;
15908
15909         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
15910         if (ret) {
15911                 DRM_ERROR("framebuffer init failed %d\n", ret);
15912                 return ret;
15913         }
15914
15915         intel_fb->obj->framebuffer_references++;
15916
15917         return 0;
15918 }
15919
15920 static struct drm_framebuffer *
15921 intel_user_framebuffer_create(struct drm_device *dev,
15922                               struct drm_file *filp,
15923                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
15924 {
15925         struct drm_framebuffer *fb;
15926         struct drm_i915_gem_object *obj;
15927         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15928
15929         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15930         if (!obj)
15931                 return ERR_PTR(-ENOENT);
15932
15933         fb = intel_framebuffer_create(dev, &mode_cmd, obj);
15934         if (IS_ERR(fb))
15935                 i915_gem_object_put_unlocked(obj);
15936
15937         return fb;
15938 }
15939
15940 static const struct drm_mode_config_funcs intel_mode_funcs = {
15941         .fb_create = intel_user_framebuffer_create,
15942         .output_poll_changed = intel_fbdev_output_poll_changed,
15943         .atomic_check = intel_atomic_check,
15944         .atomic_commit = intel_atomic_commit,
15945         .atomic_state_alloc = intel_atomic_state_alloc,
15946         .atomic_state_clear = intel_atomic_state_clear,
15947 };
15948
15949 /**
15950  * intel_init_display_hooks - initialize the display modesetting hooks
15951  * @dev_priv: device private
15952  */
15953 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15954 {
15955         if (INTEL_INFO(dev_priv)->gen >= 9) {
15956                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15957                 dev_priv->display.get_initial_plane_config =
15958                         skylake_get_initial_plane_config;
15959                 dev_priv->display.crtc_compute_clock =
15960                         haswell_crtc_compute_clock;
15961                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15962                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15963         } else if (HAS_DDI(dev_priv)) {
15964                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15965                 dev_priv->display.get_initial_plane_config =
15966                         ironlake_get_initial_plane_config;
15967                 dev_priv->display.crtc_compute_clock =
15968                         haswell_crtc_compute_clock;
15969                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15970                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15971         } else if (HAS_PCH_SPLIT(dev_priv)) {
15972                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15973                 dev_priv->display.get_initial_plane_config =
15974                         ironlake_get_initial_plane_config;
15975                 dev_priv->display.crtc_compute_clock =
15976                         ironlake_crtc_compute_clock;
15977                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15978                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15979         } else if (IS_CHERRYVIEW(dev_priv)) {
15980                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15981                 dev_priv->display.get_initial_plane_config =
15982                         i9xx_get_initial_plane_config;
15983                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15984                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15985                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15986         } else if (IS_VALLEYVIEW(dev_priv)) {
15987                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15988                 dev_priv->display.get_initial_plane_config =
15989                         i9xx_get_initial_plane_config;
15990                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15991                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15992                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15993         } else if (IS_G4X(dev_priv)) {
15994                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15995                 dev_priv->display.get_initial_plane_config =
15996                         i9xx_get_initial_plane_config;
15997                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15998                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15999                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16000         } else if (IS_PINEVIEW(dev_priv)) {
16001                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16002                 dev_priv->display.get_initial_plane_config =
16003                         i9xx_get_initial_plane_config;
16004                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16005                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16006                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16007         } else if (!IS_GEN2(dev_priv)) {
16008                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16009                 dev_priv->display.get_initial_plane_config =
16010                         i9xx_get_initial_plane_config;
16011                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16012                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16013                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16014         } else {
16015                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16016                 dev_priv->display.get_initial_plane_config =
16017                         i9xx_get_initial_plane_config;
16018                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16019                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16020                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16021         }
16022
16023         /* Returns the core display clock speed */
16024         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
16025                 dev_priv->display.get_display_clock_speed =
16026                         skylake_get_display_clock_speed;
16027         else if (IS_BROXTON(dev_priv))
16028                 dev_priv->display.get_display_clock_speed =
16029                         broxton_get_display_clock_speed;
16030         else if (IS_BROADWELL(dev_priv))
16031                 dev_priv->display.get_display_clock_speed =
16032                         broadwell_get_display_clock_speed;
16033         else if (IS_HASWELL(dev_priv))
16034                 dev_priv->display.get_display_clock_speed =
16035                         haswell_get_display_clock_speed;
16036         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16037                 dev_priv->display.get_display_clock_speed =
16038                         valleyview_get_display_clock_speed;
16039         else if (IS_GEN5(dev_priv))
16040                 dev_priv->display.get_display_clock_speed =
16041                         ilk_get_display_clock_speed;
16042         else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
16043                  IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
16044                 dev_priv->display.get_display_clock_speed =
16045                         i945_get_display_clock_speed;
16046         else if (IS_GM45(dev_priv))
16047                 dev_priv->display.get_display_clock_speed =
16048                         gm45_get_display_clock_speed;
16049         else if (IS_CRESTLINE(dev_priv))
16050                 dev_priv->display.get_display_clock_speed =
16051                         i965gm_get_display_clock_speed;
16052         else if (IS_PINEVIEW(dev_priv))
16053                 dev_priv->display.get_display_clock_speed =
16054                         pnv_get_display_clock_speed;
16055         else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
16056                 dev_priv->display.get_display_clock_speed =
16057                         g33_get_display_clock_speed;
16058         else if (IS_I915G(dev_priv))
16059                 dev_priv->display.get_display_clock_speed =
16060                         i915_get_display_clock_speed;
16061         else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
16062                 dev_priv->display.get_display_clock_speed =
16063                         i9xx_misc_get_display_clock_speed;
16064         else if (IS_I915GM(dev_priv))
16065                 dev_priv->display.get_display_clock_speed =
16066                         i915gm_get_display_clock_speed;
16067         else if (IS_I865G(dev_priv))
16068                 dev_priv->display.get_display_clock_speed =
16069                         i865_get_display_clock_speed;
16070         else if (IS_I85X(dev_priv))
16071                 dev_priv->display.get_display_clock_speed =
16072                         i85x_get_display_clock_speed;
16073         else { /* 830 */
16074                 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
16075                 dev_priv->display.get_display_clock_speed =
16076                         i830_get_display_clock_speed;
16077         }
16078
16079         if (IS_GEN5(dev_priv)) {
16080                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16081         } else if (IS_GEN6(dev_priv)) {
16082                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16083         } else if (IS_IVYBRIDGE(dev_priv)) {
16084                 /* FIXME: detect B0+ stepping and use auto training */
16085                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16086         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16087                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16088         }
16089
16090         if (IS_BROADWELL(dev_priv)) {
16091                 dev_priv->display.modeset_commit_cdclk =
16092                         broadwell_modeset_commit_cdclk;
16093                 dev_priv->display.modeset_calc_cdclk =
16094                         broadwell_modeset_calc_cdclk;
16095         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16096                 dev_priv->display.modeset_commit_cdclk =
16097                         valleyview_modeset_commit_cdclk;
16098                 dev_priv->display.modeset_calc_cdclk =
16099                         valleyview_modeset_calc_cdclk;
16100         } else if (IS_BROXTON(dev_priv)) {
16101                 dev_priv->display.modeset_commit_cdclk =
16102                         bxt_modeset_commit_cdclk;
16103                 dev_priv->display.modeset_calc_cdclk =
16104                         bxt_modeset_calc_cdclk;
16105         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
16106                 dev_priv->display.modeset_commit_cdclk =
16107                         skl_modeset_commit_cdclk;
16108                 dev_priv->display.modeset_calc_cdclk =
16109                         skl_modeset_calc_cdclk;
16110         }
16111
16112         if (dev_priv->info.gen >= 9)
16113                 dev_priv->display.update_crtcs = skl_update_crtcs;
16114         else
16115                 dev_priv->display.update_crtcs = intel_update_crtcs;
16116
16117         switch (INTEL_INFO(dev_priv)->gen) {
16118         case 2:
16119                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
16120                 break;
16121
16122         case 3:
16123                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
16124                 break;
16125
16126         case 4:
16127         case 5:
16128                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
16129                 break;
16130
16131         case 6:
16132                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
16133                 break;
16134         case 7:
16135         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
16136                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
16137                 break;
16138         case 9:
16139                 /* Drop through - unsupported since execlist only. */
16140         default:
16141                 /* Default just returns -ENODEV to indicate unsupported */
16142                 dev_priv->display.queue_flip = intel_default_queue_flip;
16143         }
16144 }
16145
16146 /*
16147  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
16148  * resume, or other times.  This quirk makes sure that's the case for
16149  * affected systems.
16150  */
16151 static void quirk_pipea_force(struct drm_device *dev)
16152 {
16153         struct drm_i915_private *dev_priv = to_i915(dev);
16154
16155         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
16156         DRM_INFO("applying pipe a force quirk\n");
16157 }
16158
16159 static void quirk_pipeb_force(struct drm_device *dev)
16160 {
16161         struct drm_i915_private *dev_priv = to_i915(dev);
16162
16163         dev_priv->quirks |= QUIRK_PIPEB_FORCE;
16164         DRM_INFO("applying pipe b force quirk\n");
16165 }
16166
16167 /*
16168  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
16169  */
16170 static void quirk_ssc_force_disable(struct drm_device *dev)
16171 {
16172         struct drm_i915_private *dev_priv = to_i915(dev);
16173         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
16174         DRM_INFO("applying lvds SSC disable quirk\n");
16175 }
16176
16177 /*
16178  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
16179  * brightness value
16180  */
16181 static void quirk_invert_brightness(struct drm_device *dev)
16182 {
16183         struct drm_i915_private *dev_priv = to_i915(dev);
16184         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
16185         DRM_INFO("applying inverted panel brightness quirk\n");
16186 }
16187
16188 /* Some VBT's incorrectly indicate no backlight is present */
16189 static void quirk_backlight_present(struct drm_device *dev)
16190 {
16191         struct drm_i915_private *dev_priv = to_i915(dev);
16192         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
16193         DRM_INFO("applying backlight present quirk\n");
16194 }
16195
16196 struct intel_quirk {
16197         int device;
16198         int subsystem_vendor;
16199         int subsystem_device;
16200         void (*hook)(struct drm_device *dev);
16201 };
16202
16203 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
16204 struct intel_dmi_quirk {
16205         void (*hook)(struct drm_device *dev);
16206         const struct dmi_system_id (*dmi_id_list)[];
16207 };
16208
16209 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
16210 {
16211         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
16212         return 1;
16213 }
16214
16215 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
16216         {
16217                 .dmi_id_list = &(const struct dmi_system_id[]) {
16218                         {
16219                                 .callback = intel_dmi_reverse_brightness,
16220                                 .ident = "NCR Corporation",
16221                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
16222                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
16223                                 },
16224                         },
16225                         { }  /* terminating entry */
16226                 },
16227                 .hook = quirk_invert_brightness,
16228         },
16229 };
16230
16231 static struct intel_quirk intel_quirks[] = {
16232         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
16233         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
16234
16235         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
16236         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
16237
16238         /* 830 needs to leave pipe A & dpll A up */
16239         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
16240
16241         /* 830 needs to leave pipe B & dpll B up */
16242         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
16243
16244         /* Lenovo U160 cannot use SSC on LVDS */
16245         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
16246
16247         /* Sony Vaio Y cannot use SSC on LVDS */
16248         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
16249
16250         /* Acer Aspire 5734Z must invert backlight brightness */
16251         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
16252
16253         /* Acer/eMachines G725 */
16254         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
16255
16256         /* Acer/eMachines e725 */
16257         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
16258
16259         /* Acer/Packard Bell NCL20 */
16260         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
16261
16262         /* Acer Aspire 4736Z */
16263         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
16264
16265         /* Acer Aspire 5336 */
16266         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
16267
16268         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
16269         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
16270
16271         /* Acer C720 Chromebook (Core i3 4005U) */
16272         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
16273
16274         /* Apple Macbook 2,1 (Core 2 T7400) */
16275         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
16276
16277         /* Apple Macbook 4,1 */
16278         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
16279
16280         /* Toshiba CB35 Chromebook (Celeron 2955U) */
16281         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
16282
16283         /* HP Chromebook 14 (Celeron 2955U) */
16284         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
16285
16286         /* Dell Chromebook 11 */
16287         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
16288
16289         /* Dell Chromebook 11 (2015 version) */
16290         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
16291 };
16292
16293 static void intel_init_quirks(struct drm_device *dev)
16294 {
16295         struct pci_dev *d = dev->pdev;
16296         int i;
16297
16298         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
16299                 struct intel_quirk *q = &intel_quirks[i];
16300
16301                 if (d->device == q->device &&
16302                     (d->subsystem_vendor == q->subsystem_vendor ||
16303                      q->subsystem_vendor == PCI_ANY_ID) &&
16304                     (d->subsystem_device == q->subsystem_device ||
16305                      q->subsystem_device == PCI_ANY_ID))
16306                         q->hook(dev);
16307         }
16308         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
16309                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
16310                         intel_dmi_quirks[i].hook(dev);
16311         }
16312 }
16313
16314 /* Disable the VGA plane that we never use */
16315 static void i915_disable_vga(struct drm_device *dev)
16316 {
16317         struct drm_i915_private *dev_priv = to_i915(dev);
16318         struct pci_dev *pdev = dev_priv->drm.pdev;
16319         u8 sr1;
16320         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16321
16322         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
16323         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
16324         outb(SR01, VGA_SR_INDEX);
16325         sr1 = inb(VGA_SR_DATA);
16326         outb(sr1 | 1<<5, VGA_SR_DATA);
16327         vga_put(pdev, VGA_RSRC_LEGACY_IO);
16328         udelay(300);
16329
16330         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
16331         POSTING_READ(vga_reg);
16332 }
16333
16334 void intel_modeset_init_hw(struct drm_device *dev)
16335 {
16336         struct drm_i915_private *dev_priv = to_i915(dev);
16337
16338         intel_update_cdclk(dev);
16339
16340         dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
16341
16342         intel_init_clock_gating(dev);
16343 }
16344
16345 /*
16346  * Calculate what we think the watermarks should be for the state we've read
16347  * out of the hardware and then immediately program those watermarks so that
16348  * we ensure the hardware settings match our internal state.
16349  *
16350  * We can calculate what we think WM's should be by creating a duplicate of the
16351  * current state (which was constructed during hardware readout) and running it
16352  * through the atomic check code to calculate new watermark values in the
16353  * state object.
16354  */
16355 static void sanitize_watermarks(struct drm_device *dev)
16356 {
16357         struct drm_i915_private *dev_priv = to_i915(dev);
16358         struct drm_atomic_state *state;
16359         struct drm_crtc *crtc;
16360         struct drm_crtc_state *cstate;
16361         struct drm_modeset_acquire_ctx ctx;
16362         int ret;
16363         int i;
16364
16365         /* Only supported on platforms that use atomic watermark design */
16366         if (!dev_priv->display.optimize_watermarks)
16367                 return;
16368
16369         /*
16370          * We need to hold connection_mutex before calling duplicate_state so
16371          * that the connector loop is protected.
16372          */
16373         drm_modeset_acquire_init(&ctx, 0);
16374 retry:
16375         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16376         if (ret == -EDEADLK) {
16377                 drm_modeset_backoff(&ctx);
16378                 goto retry;
16379         } else if (WARN_ON(ret)) {
16380                 goto fail;
16381         }
16382
16383         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16384         if (WARN_ON(IS_ERR(state)))
16385                 goto fail;
16386
16387         /*
16388          * Hardware readout is the only time we don't want to calculate
16389          * intermediate watermarks (since we don't trust the current
16390          * watermarks).
16391          */
16392         to_intel_atomic_state(state)->skip_intermediate_wm = true;
16393
16394         ret = intel_atomic_check(dev, state);
16395         if (ret) {
16396                 /*
16397                  * If we fail here, it means that the hardware appears to be
16398                  * programmed in a way that shouldn't be possible, given our
16399                  * understanding of watermark requirements.  This might mean a
16400                  * mistake in the hardware readout code or a mistake in the
16401                  * watermark calculations for a given platform.  Raise a WARN
16402                  * so that this is noticeable.
16403                  *
16404                  * If this actually happens, we'll have to just leave the
16405                  * BIOS-programmed watermarks untouched and hope for the best.
16406                  */
16407                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16408                 goto fail;
16409         }
16410
16411         /* Write calculated watermark values back */
16412         for_each_crtc_in_state(state, crtc, cstate, i) {
16413                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
16414
16415                 cs->wm.need_postvbl_update = true;
16416                 dev_priv->display.optimize_watermarks(cs);
16417         }
16418
16419         drm_atomic_state_free(state);
16420 fail:
16421         drm_modeset_drop_locks(&ctx);
16422         drm_modeset_acquire_fini(&ctx);
16423 }
16424
16425 void intel_modeset_init(struct drm_device *dev)
16426 {
16427         struct drm_i915_private *dev_priv = to_i915(dev);
16428         struct i915_ggtt *ggtt = &dev_priv->ggtt;
16429         int sprite, ret;
16430         enum pipe pipe;
16431         struct intel_crtc *crtc;
16432
16433         drm_mode_config_init(dev);
16434
16435         dev->mode_config.min_width = 0;
16436         dev->mode_config.min_height = 0;
16437
16438         dev->mode_config.preferred_depth = 24;
16439         dev->mode_config.prefer_shadow = 1;
16440
16441         dev->mode_config.allow_fb_modifiers = true;
16442
16443         dev->mode_config.funcs = &intel_mode_funcs;
16444
16445         intel_init_quirks(dev);
16446
16447         intel_init_pm(dev);
16448
16449         if (INTEL_INFO(dev)->num_pipes == 0)
16450                 return;
16451
16452         /*
16453          * There may be no VBT; and if the BIOS enabled SSC we can
16454          * just keep using it to avoid unnecessary flicker.  Whereas if the
16455          * BIOS isn't using it, don't assume it will work even if the VBT
16456          * indicates as much.
16457          */
16458         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
16459                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
16460                                             DREF_SSC1_ENABLE);
16461
16462                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
16463                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
16464                                      bios_lvds_use_ssc ? "en" : "dis",
16465                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
16466                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
16467                 }
16468         }
16469
16470         if (IS_GEN2(dev_priv)) {
16471                 dev->mode_config.max_width = 2048;
16472                 dev->mode_config.max_height = 2048;
16473         } else if (IS_GEN3(dev_priv)) {
16474                 dev->mode_config.max_width = 4096;
16475                 dev->mode_config.max_height = 4096;
16476         } else {
16477                 dev->mode_config.max_width = 8192;
16478                 dev->mode_config.max_height = 8192;
16479         }
16480
16481         if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
16482                 dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
16483                 dev->mode_config.cursor_height = 1023;
16484         } else if (IS_GEN2(dev_priv)) {
16485                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
16486                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
16487         } else {
16488                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
16489                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
16490         }
16491
16492         dev->mode_config.fb_base = ggtt->mappable_base;
16493
16494         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16495                       INTEL_INFO(dev)->num_pipes,
16496                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
16497
16498         for_each_pipe(dev_priv, pipe) {
16499                 intel_crtc_init(dev, pipe);
16500                 for_each_sprite(dev_priv, pipe, sprite) {
16501                         ret = intel_plane_init(dev, pipe, sprite);
16502                         if (ret)
16503                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
16504                                               pipe_name(pipe), sprite_name(pipe, sprite), ret);
16505                 }
16506         }
16507
16508         intel_update_czclk(dev_priv);
16509         intel_update_cdclk(dev);
16510
16511         intel_shared_dpll_init(dev);
16512
16513         if (dev_priv->max_cdclk_freq == 0)
16514                 intel_update_max_cdclk(dev);
16515
16516         /* Just disable it once at startup */
16517         i915_disable_vga(dev);
16518         intel_setup_outputs(dev);
16519
16520         drm_modeset_lock_all(dev);
16521         intel_modeset_setup_hw_state(dev);
16522         drm_modeset_unlock_all(dev);
16523
16524         for_each_intel_crtc(dev, crtc) {
16525                 struct intel_initial_plane_config plane_config = {};
16526
16527                 if (!crtc->active)
16528                         continue;
16529
16530                 /*
16531                  * Note that reserving the BIOS fb up front prevents us
16532                  * from stuffing other stolen allocations like the ring
16533                  * on top.  This prevents some ugliness at boot time, and
16534                  * can even allow for smooth boot transitions if the BIOS
16535                  * fb is large enough for the active pipe configuration.
16536                  */
16537                 dev_priv->display.get_initial_plane_config(crtc,
16538                                                            &plane_config);
16539
16540                 /*
16541                  * If the fb is shared between multiple heads, we'll
16542                  * just get the first one.
16543                  */
16544                 intel_find_initial_plane_obj(crtc, &plane_config);
16545         }
16546
16547         /*
16548          * Make sure hardware watermarks really match the state we read out.
16549          * Note that we need to do this after reconstructing the BIOS fb's
16550          * since the watermark calculation done here will use pstate->fb.
16551          */
16552         sanitize_watermarks(dev);
16553 }
16554
16555 static void intel_enable_pipe_a(struct drm_device *dev)
16556 {
16557         struct intel_connector *connector;
16558         struct drm_connector *crt = NULL;
16559         struct intel_load_detect_pipe load_detect_temp;
16560         struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
16561
16562         /* We can't just switch on the pipe A, we need to set things up with a
16563          * proper mode and output configuration. As a gross hack, enable pipe A
16564          * by enabling the load detect pipe once. */
16565         for_each_intel_connector(dev, connector) {
16566                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
16567                         crt = &connector->base;
16568                         break;
16569                 }
16570         }
16571
16572         if (!crt)
16573                 return;
16574
16575         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
16576                 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
16577 }
16578
16579 static bool
16580 intel_check_plane_mapping(struct intel_crtc *crtc)
16581 {
16582         struct drm_device *dev = crtc->base.dev;
16583         struct drm_i915_private *dev_priv = to_i915(dev);
16584         u32 val;
16585
16586         if (INTEL_INFO(dev)->num_pipes == 1)
16587                 return true;
16588
16589         val = I915_READ(DSPCNTR(!crtc->plane));
16590
16591         if ((val & DISPLAY_PLANE_ENABLE) &&
16592             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
16593                 return false;
16594
16595         return true;
16596 }
16597
16598 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16599 {
16600         struct drm_device *dev = crtc->base.dev;
16601         struct intel_encoder *encoder;
16602
16603         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16604                 return true;
16605
16606         return false;
16607 }
16608
16609 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16610 {
16611         struct drm_device *dev = encoder->base.dev;
16612         struct intel_connector *connector;
16613
16614         for_each_connector_on_encoder(dev, &encoder->base, connector)
16615                 return connector;
16616
16617         return NULL;
16618 }
16619
16620 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16621                               enum transcoder pch_transcoder)
16622 {
16623         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16624                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
16625 }
16626
16627 static void intel_sanitize_crtc(struct intel_crtc *crtc)
16628 {
16629         struct drm_device *dev = crtc->base.dev;
16630         struct drm_i915_private *dev_priv = to_i915(dev);
16631         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
16632
16633         /* Clear any frame start delays used for debugging left by the BIOS */
16634         if (!transcoder_is_dsi(cpu_transcoder)) {
16635                 i915_reg_t reg = PIPECONF(cpu_transcoder);
16636
16637                 I915_WRITE(reg,
16638                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16639         }
16640
16641         /* restore vblank interrupts to correct state */
16642         drm_crtc_vblank_reset(&crtc->base);
16643         if (crtc->active) {
16644                 struct intel_plane *plane;
16645
16646                 drm_crtc_vblank_on(&crtc->base);
16647
16648                 /* Disable everything but the primary plane */
16649                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16650                         if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
16651                                 continue;
16652
16653                         plane->disable_plane(&plane->base, &crtc->base);
16654                 }
16655         }
16656
16657         /* We need to sanitize the plane -> pipe mapping first because this will
16658          * disable the crtc (and hence change the state) if it is wrong. Note
16659          * that gen4+ has a fixed plane -> pipe mapping.  */
16660         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
16661                 bool plane;
16662
16663                 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
16664                               crtc->base.base.id, crtc->base.name);
16665
16666                 /* Pipe has the wrong plane attached and the plane is active.
16667                  * Temporarily change the plane mapping and disable everything
16668                  * ...  */
16669                 plane = crtc->plane;
16670                 to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
16671                 crtc->plane = !plane;
16672                 intel_crtc_disable_noatomic(&crtc->base);
16673                 crtc->plane = plane;
16674         }
16675
16676         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
16677             crtc->pipe == PIPE_A && !crtc->active) {
16678                 /* BIOS forgot to enable pipe A, this mostly happens after
16679                  * resume. Force-enable the pipe to fix this, the update_dpms
16680                  * call below we restore the pipe to the right state, but leave
16681                  * the required bits on. */
16682                 intel_enable_pipe_a(dev);
16683         }
16684
16685         /* Adjust the state of the output pipe according to whether we
16686          * have active connectors/encoders. */
16687         if (crtc->active && !intel_crtc_has_encoders(crtc))
16688                 intel_crtc_disable_noatomic(&crtc->base);
16689
16690         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
16691                 /*
16692                  * We start out with underrun reporting disabled to avoid races.
16693                  * For correct bookkeeping mark this on active crtcs.
16694                  *
16695                  * Also on gmch platforms we dont have any hardware bits to
16696                  * disable the underrun reporting. Which means we need to start
16697                  * out with underrun reporting disabled also on inactive pipes,
16698                  * since otherwise we'll complain about the garbage we read when
16699                  * e.g. coming up after runtime pm.
16700                  *
16701                  * No protection against concurrent access is required - at
16702                  * worst a fifo underrun happens which also sets this to false.
16703                  */
16704                 crtc->cpu_fifo_underrun_disabled = true;
16705                 /*
16706                  * We track the PCH trancoder underrun reporting state
16707                  * within the crtc. With crtc for pipe A housing the underrun
16708                  * reporting state for PCH transcoder A, crtc for pipe B housing
16709                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16710                  * and marking underrun reporting as disabled for the non-existing
16711                  * PCH transcoders B and C would prevent enabling the south
16712                  * error interrupt (see cpt_can_enable_serr_int()).
16713                  */
16714                 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
16715                         crtc->pch_fifo_underrun_disabled = true;
16716         }
16717 }
16718
16719 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16720 {
16721         struct intel_connector *connector;
16722
16723         /* We need to check both for a crtc link (meaning that the
16724          * encoder is active and trying to read from a pipe) and the
16725          * pipe itself being active. */
16726         bool has_active_crtc = encoder->base.crtc &&
16727                 to_intel_crtc(encoder->base.crtc)->active;
16728
16729         connector = intel_encoder_find_connector(encoder);
16730         if (connector && !has_active_crtc) {
16731                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16732                               encoder->base.base.id,
16733                               encoder->base.name);
16734
16735                 /* Connector is active, but has no active pipe. This is
16736                  * fallout from our resume register restoring. Disable
16737                  * the encoder manually again. */
16738                 if (encoder->base.crtc) {
16739                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
16740
16741                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16742                                       encoder->base.base.id,
16743                                       encoder->base.name);
16744                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
16745                         if (encoder->post_disable)
16746                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
16747                 }
16748                 encoder->base.crtc = NULL;
16749
16750                 /* Inconsistent output/port/pipe state happens presumably due to
16751                  * a bug in one of the get_hw_state functions. Or someplace else
16752                  * in our code, like the register restore mess on resume. Clamp
16753                  * things to off as a safer default. */
16754
16755                 connector->base.dpms = DRM_MODE_DPMS_OFF;
16756                 connector->base.encoder = NULL;
16757         }
16758         /* Enabled encoders without active connectors will be fixed in
16759          * the crtc fixup. */
16760 }
16761
16762 void i915_redisable_vga_power_on(struct drm_device *dev)
16763 {
16764         struct drm_i915_private *dev_priv = to_i915(dev);
16765         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16766
16767         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16768                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16769                 i915_disable_vga(dev);
16770         }
16771 }
16772
16773 void i915_redisable_vga(struct drm_device *dev)
16774 {
16775         struct drm_i915_private *dev_priv = to_i915(dev);
16776
16777         /* This function can be called both from intel_modeset_setup_hw_state or
16778          * at a very early point in our resume sequence, where the power well
16779          * structures are not yet restored. Since this function is at a very
16780          * paranoid "someone might have enabled VGA while we were not looking"
16781          * level, just check if the power well is enabled instead of trying to
16782          * follow the "don't touch the power well if we don't need it" policy
16783          * the rest of the driver uses. */
16784         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
16785                 return;
16786
16787         i915_redisable_vga_power_on(dev);
16788
16789         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
16790 }
16791
16792 static bool primary_get_hw_state(struct intel_plane *plane)
16793 {
16794         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16795
16796         return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
16797 }
16798
16799 /* FIXME read out full plane state for all planes */
16800 static void readout_plane_state(struct intel_crtc *crtc)
16801 {
16802         struct drm_plane *primary = crtc->base.primary;
16803         struct intel_plane_state *plane_state =
16804                 to_intel_plane_state(primary->state);
16805
16806         plane_state->base.visible = crtc->active &&
16807                 primary_get_hw_state(to_intel_plane(primary));
16808
16809         if (plane_state->base.visible)
16810                 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
16811 }
16812
16813 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16814 {
16815         struct drm_i915_private *dev_priv = to_i915(dev);
16816         enum pipe pipe;
16817         struct intel_crtc *crtc;
16818         struct intel_encoder *encoder;
16819         struct intel_connector *connector;
16820         int i;
16821
16822         dev_priv->active_crtcs = 0;
16823
16824         for_each_intel_crtc(dev, crtc) {
16825                 struct intel_crtc_state *crtc_state = crtc->config;
16826                 int pixclk = 0;
16827
16828                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16829                 memset(crtc_state, 0, sizeof(*crtc_state));
16830                 crtc_state->base.crtc = &crtc->base;
16831
16832                 crtc_state->base.active = crtc_state->base.enable =
16833                         dev_priv->display.get_pipe_config(crtc, crtc_state);
16834
16835                 crtc->base.enabled = crtc_state->base.enable;
16836                 crtc->active = crtc_state->base.active;
16837
16838                 if (crtc_state->base.active) {
16839                         dev_priv->active_crtcs |= 1 << crtc->pipe;
16840
16841                         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16842                                 pixclk = ilk_pipe_pixel_rate(crtc_state);
16843                         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16844                                 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16845                         else
16846                                 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16847
16848                         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16849                         if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16850                                 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16851                 }
16852
16853                 dev_priv->min_pixclk[crtc->pipe] = pixclk;
16854
16855                 readout_plane_state(crtc);
16856
16857                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16858                               crtc->base.base.id, crtc->base.name,
16859                               crtc->active ? "enabled" : "disabled");
16860         }
16861
16862         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16863                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16864
16865                 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
16866                                                   &pll->config.hw_state);
16867                 pll->config.crtc_mask = 0;
16868                 for_each_intel_crtc(dev, crtc) {
16869                         if (crtc->active && crtc->config->shared_dpll == pll)
16870                                 pll->config.crtc_mask |= 1 << crtc->pipe;
16871                 }
16872                 pll->active_mask = pll->config.crtc_mask;
16873
16874                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16875                               pll->name, pll->config.crtc_mask, pll->on);
16876         }
16877
16878         for_each_intel_encoder(dev, encoder) {
16879                 pipe = 0;
16880
16881                 if (encoder->get_hw_state(encoder, &pipe)) {
16882                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16883                         encoder->base.crtc = &crtc->base;
16884                         crtc->config->output_types |= 1 << encoder->type;
16885                         encoder->get_config(encoder, crtc->config);
16886                 } else {
16887                         encoder->base.crtc = NULL;
16888                 }
16889
16890                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16891                               encoder->base.base.id,
16892                               encoder->base.name,
16893                               encoder->base.crtc ? "enabled" : "disabled",
16894                               pipe_name(pipe));
16895         }
16896
16897         for_each_intel_connector(dev, connector) {
16898                 if (connector->get_hw_state(connector)) {
16899                         connector->base.dpms = DRM_MODE_DPMS_ON;
16900
16901                         encoder = connector->encoder;
16902                         connector->base.encoder = &encoder->base;
16903
16904                         if (encoder->base.crtc &&
16905                             encoder->base.crtc->state->active) {
16906                                 /*
16907                                  * This has to be done during hardware readout
16908                                  * because anything calling .crtc_disable may
16909                                  * rely on the connector_mask being accurate.
16910                                  */
16911                                 encoder->base.crtc->state->connector_mask |=
16912                                         1 << drm_connector_index(&connector->base);
16913                                 encoder->base.crtc->state->encoder_mask |=
16914                                         1 << drm_encoder_index(&encoder->base);
16915                         }
16916
16917                 } else {
16918                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16919                         connector->base.encoder = NULL;
16920                 }
16921                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16922                               connector->base.base.id,
16923                               connector->base.name,
16924                               connector->base.encoder ? "enabled" : "disabled");
16925         }
16926
16927         for_each_intel_crtc(dev, crtc) {
16928                 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16929
16930                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16931                 if (crtc->base.state->active) {
16932                         intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
16933                         intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
16934                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16935
16936                         /*
16937                          * The initial mode needs to be set in order to keep
16938                          * the atomic core happy. It wants a valid mode if the
16939                          * crtc's enabled, so we do the above call.
16940                          *
16941                          * At this point some state updated by the connectors
16942                          * in their ->detect() callback has not run yet, so
16943                          * no recalculation can be done yet.
16944                          *
16945                          * Even if we could do a recalculation and modeset
16946                          * right now it would cause a double modeset if
16947                          * fbdev or userspace chooses a different initial mode.
16948                          *
16949                          * If that happens, someone indicated they wanted a
16950                          * mode change, which means it's safe to do a full
16951                          * recalculation.
16952                          */
16953                         crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
16954
16955                         drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16956                         update_scanline_offset(crtc);
16957                 }
16958
16959                 intel_pipe_config_sanity_check(dev_priv, crtc->config);
16960         }
16961 }
16962
16963 /* Scan out the current hw modeset state,
16964  * and sanitizes it to the current state
16965  */
16966 static void
16967 intel_modeset_setup_hw_state(struct drm_device *dev)
16968 {
16969         struct drm_i915_private *dev_priv = to_i915(dev);
16970         enum pipe pipe;
16971         struct intel_crtc *crtc;
16972         struct intel_encoder *encoder;
16973         int i;
16974
16975         intel_modeset_readout_hw_state(dev);
16976
16977         /* HW state is read out, now we need to sanitize this mess. */
16978         for_each_intel_encoder(dev, encoder) {
16979                 intel_sanitize_encoder(encoder);
16980         }
16981
16982         for_each_pipe(dev_priv, pipe) {
16983                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16984                 intel_sanitize_crtc(crtc);
16985                 intel_dump_pipe_config(crtc, crtc->config,
16986                                        "[setup_hw_state]");
16987         }
16988
16989         intel_modeset_update_connector_atomic_state(dev);
16990
16991         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16992                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16993
16994                 if (!pll->on || pll->active_mask)
16995                         continue;
16996
16997                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
16998
16999                 pll->funcs.disable(dev_priv, pll);
17000                 pll->on = false;
17001         }
17002
17003         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
17004                 vlv_wm_get_hw_state(dev);
17005         else if (IS_GEN9(dev_priv))
17006                 skl_wm_get_hw_state(dev);
17007         else if (HAS_PCH_SPLIT(dev_priv))
17008                 ilk_wm_get_hw_state(dev);
17009
17010         for_each_intel_crtc(dev, crtc) {
17011                 unsigned long put_domains;
17012
17013                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
17014                 if (WARN_ON(put_domains))
17015                         modeset_put_power_domains(dev_priv, put_domains);
17016         }
17017         intel_display_set_init_power(dev_priv, false);
17018
17019         intel_fbc_init_pipe_state(dev_priv);
17020 }
17021
17022 void intel_display_resume(struct drm_device *dev)
17023 {
17024         struct drm_i915_private *dev_priv = to_i915(dev);
17025         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17026         struct drm_modeset_acquire_ctx ctx;
17027         int ret;
17028
17029         dev_priv->modeset_restore_state = NULL;
17030         if (state)
17031                 state->acquire_ctx = &ctx;
17032
17033         /*
17034          * This is a cludge because with real atomic modeset mode_config.mutex
17035          * won't be taken. Unfortunately some probed state like
17036          * audio_codec_enable is still protected by mode_config.mutex, so lock
17037          * it here for now.
17038          */
17039         mutex_lock(&dev->mode_config.mutex);
17040         drm_modeset_acquire_init(&ctx, 0);
17041
17042         while (1) {
17043                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17044                 if (ret != -EDEADLK)
17045                         break;
17046
17047                 drm_modeset_backoff(&ctx);
17048         }
17049
17050         if (!ret)
17051                 ret = __intel_display_resume(dev, state);
17052
17053         drm_modeset_drop_locks(&ctx);
17054         drm_modeset_acquire_fini(&ctx);
17055         mutex_unlock(&dev->mode_config.mutex);
17056
17057         if (ret) {
17058                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17059                 drm_atomic_state_free(state);
17060         }
17061 }
17062
17063 void intel_modeset_gem_init(struct drm_device *dev)
17064 {
17065         struct drm_i915_private *dev_priv = to_i915(dev);
17066         struct drm_crtc *c;
17067         struct drm_i915_gem_object *obj;
17068
17069         intel_init_gt_powersave(dev_priv);
17070
17071         intel_modeset_init_hw(dev);
17072
17073         intel_setup_overlay(dev_priv);
17074
17075         /*
17076          * Make sure any fbs we allocated at startup are properly
17077          * pinned & fenced.  When we do the allocation it's too early
17078          * for this.
17079          */
17080         for_each_crtc(dev, c) {
17081                 struct i915_vma *vma;
17082
17083                 obj = intel_fb_obj(c->primary->fb);
17084                 if (obj == NULL)
17085                         continue;
17086
17087                 mutex_lock(&dev->struct_mutex);
17088                 vma = intel_pin_and_fence_fb_obj(c->primary->fb,
17089                                                  c->primary->state->rotation);
17090                 mutex_unlock(&dev->struct_mutex);
17091                 if (IS_ERR(vma)) {
17092                         DRM_ERROR("failed to pin boot fb on pipe %d\n",
17093                                   to_intel_crtc(c)->pipe);
17094                         drm_framebuffer_unreference(c->primary->fb);
17095                         c->primary->fb = NULL;
17096                         c->primary->crtc = c->primary->state->crtc = NULL;
17097                         update_state_fb(c->primary);
17098                         c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
17099                 }
17100         }
17101 }
17102
17103 int intel_connector_register(struct drm_connector *connector)
17104 {
17105         struct intel_connector *intel_connector = to_intel_connector(connector);
17106         int ret;
17107
17108         ret = intel_backlight_device_register(intel_connector);
17109         if (ret)
17110                 goto err;
17111
17112         return 0;
17113
17114 err:
17115         return ret;
17116 }
17117
17118 void intel_connector_unregister(struct drm_connector *connector)
17119 {
17120         struct intel_connector *intel_connector = to_intel_connector(connector);
17121
17122         intel_backlight_device_unregister(intel_connector);
17123         intel_panel_destroy_backlight(connector);
17124 }
17125
17126 void intel_modeset_cleanup(struct drm_device *dev)
17127 {
17128         struct drm_i915_private *dev_priv = to_i915(dev);
17129
17130         intel_disable_gt_powersave(dev_priv);
17131
17132         /*
17133          * Interrupts and polling as the first thing to avoid creating havoc.
17134          * Too much stuff here (turning of connectors, ...) would
17135          * experience fancy races otherwise.
17136          */
17137         intel_irq_uninstall(dev_priv);
17138
17139         /*
17140          * Due to the hpd irq storm handling the hotplug work can re-arm the
17141          * poll handlers. Hence disable polling after hpd handling is shut down.
17142          */
17143         drm_kms_helper_poll_fini(dev);
17144
17145         intel_unregister_dsm_handler();
17146
17147         intel_fbc_global_disable(dev_priv);
17148
17149         /* flush any delayed tasks or pending work */
17150         flush_scheduled_work();
17151
17152         drm_mode_config_cleanup(dev);
17153
17154         intel_cleanup_overlay(dev_priv);
17155
17156         intel_cleanup_gt_powersave(dev_priv);
17157
17158         intel_teardown_gmbus(dev);
17159 }
17160
17161 void intel_connector_attach_encoder(struct intel_connector *connector,
17162                                     struct intel_encoder *encoder)
17163 {
17164         connector->encoder = encoder;
17165         drm_mode_connector_attach_encoder(&connector->base,
17166                                           &encoder->base);
17167 }
17168
17169 /*
17170  * set vga decode state - true == enable VGA decode
17171  */
17172 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
17173 {
17174         struct drm_i915_private *dev_priv = to_i915(dev);
17175         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
17176         u16 gmch_ctrl;
17177
17178         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
17179                 DRM_ERROR("failed to read control word\n");
17180                 return -EIO;
17181         }
17182
17183         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
17184                 return 0;
17185
17186         if (state)
17187                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
17188         else
17189                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
17190
17191         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
17192                 DRM_ERROR("failed to write control word\n");
17193                 return -EIO;
17194         }
17195
17196         return 0;
17197 }
17198
17199 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17200
17201 struct intel_display_error_state {
17202
17203         u32 power_well_driver;
17204
17205         int num_transcoders;
17206
17207         struct intel_cursor_error_state {
17208                 u32 control;
17209                 u32 position;
17210                 u32 base;
17211                 u32 size;
17212         } cursor[I915_MAX_PIPES];
17213
17214         struct intel_pipe_error_state {
17215                 bool power_domain_on;
17216                 u32 source;
17217                 u32 stat;
17218         } pipe[I915_MAX_PIPES];
17219
17220         struct intel_plane_error_state {
17221                 u32 control;
17222                 u32 stride;
17223                 u32 size;
17224                 u32 pos;
17225                 u32 addr;
17226                 u32 surface;
17227                 u32 tile_offset;
17228         } plane[I915_MAX_PIPES];
17229
17230         struct intel_transcoder_error_state {
17231                 bool power_domain_on;
17232                 enum transcoder cpu_transcoder;
17233
17234                 u32 conf;
17235
17236                 u32 htotal;
17237                 u32 hblank;
17238                 u32 hsync;
17239                 u32 vtotal;
17240                 u32 vblank;
17241                 u32 vsync;
17242         } transcoder[4];
17243 };
17244
17245 struct intel_display_error_state *
17246 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17247 {
17248         struct intel_display_error_state *error;
17249         int transcoders[] = {
17250                 TRANSCODER_A,
17251                 TRANSCODER_B,
17252                 TRANSCODER_C,
17253                 TRANSCODER_EDP,
17254         };
17255         int i;
17256
17257         if (INTEL_INFO(dev_priv)->num_pipes == 0)
17258                 return NULL;
17259
17260         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17261         if (error == NULL)
17262                 return NULL;
17263
17264         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17265                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
17266
17267         for_each_pipe(dev_priv, i) {
17268                 error->pipe[i].power_domain_on =
17269                         __intel_display_power_is_enabled(dev_priv,
17270                                                          POWER_DOMAIN_PIPE(i));
17271                 if (!error->pipe[i].power_domain_on)
17272                         continue;
17273
17274                 error->cursor[i].control = I915_READ(CURCNTR(i));
17275                 error->cursor[i].position = I915_READ(CURPOS(i));
17276                 error->cursor[i].base = I915_READ(CURBASE(i));
17277
17278                 error->plane[i].control = I915_READ(DSPCNTR(i));
17279                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17280                 if (INTEL_GEN(dev_priv) <= 3) {
17281                         error->plane[i].size = I915_READ(DSPSIZE(i));
17282                         error->plane[i].pos = I915_READ(DSPPOS(i));
17283                 }
17284                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17285                         error->plane[i].addr = I915_READ(DSPADDR(i));
17286                 if (INTEL_GEN(dev_priv) >= 4) {
17287                         error->plane[i].surface = I915_READ(DSPSURF(i));
17288                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17289                 }
17290
17291                 error->pipe[i].source = I915_READ(PIPESRC(i));
17292
17293                 if (HAS_GMCH_DISPLAY(dev_priv))
17294                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
17295         }
17296
17297         /* Note: this does not include DSI transcoders. */
17298         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
17299         if (HAS_DDI(dev_priv))
17300                 error->num_transcoders++; /* Account for eDP. */
17301
17302         for (i = 0; i < error->num_transcoders; i++) {
17303                 enum transcoder cpu_transcoder = transcoders[i];
17304
17305                 error->transcoder[i].power_domain_on =
17306                         __intel_display_power_is_enabled(dev_priv,
17307                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17308                 if (!error->transcoder[i].power_domain_on)
17309                         continue;
17310
17311                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
17312
17313                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17314                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17315                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17316                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17317                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17318                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17319                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17320         }
17321
17322         return error;
17323 }
17324
17325 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17326
17327 void
17328 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17329                                 struct drm_device *dev,
17330                                 struct intel_display_error_state *error)
17331 {
17332         struct drm_i915_private *dev_priv = to_i915(dev);
17333         int i;
17334
17335         if (!error)
17336                 return;
17337
17338         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
17339         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17340                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17341                            error->power_well_driver);
17342         for_each_pipe(dev_priv, i) {
17343                 err_printf(m, "Pipe [%d]:\n", i);
17344                 err_printf(m, "  Power: %s\n",
17345                            onoff(error->pipe[i].power_domain_on));
17346                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17347                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17348
17349                 err_printf(m, "Plane [%d]:\n", i);
17350                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17351                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17352                 if (INTEL_INFO(dev)->gen <= 3) {
17353                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17354                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17355                 }
17356                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17357                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17358                 if (INTEL_INFO(dev)->gen >= 4) {
17359                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
17360                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
17361                 }
17362
17363                 err_printf(m, "Cursor [%d]:\n", i);
17364                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
17365                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
17366                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
17367         }
17368
17369         for (i = 0; i < error->num_transcoders; i++) {
17370                 err_printf(m, "CPU transcoder: %s\n",
17371                            transcoder_name(error->transcoder[i].cpu_transcoder));
17372                 err_printf(m, "  Power: %s\n",
17373                            onoff(error->transcoder[i].power_domain_on));
17374                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
17375                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
17376                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
17377                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
17378                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
17379                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
17380                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
17381         }
17382 }
17383
17384 #endif