drm/i915: re-add locking around hw state readout
[profile/ivi/kernel-x86-ivi.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <linux/dma_remapping.h>
43
44 static void intel_increase_pllclock(struct drm_crtc *crtc);
45 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
46
47 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
48                                 struct intel_crtc_config *pipe_config);
49 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
50                                    struct intel_crtc_config *pipe_config);
51
52 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
53                           int x, int y, struct drm_framebuffer *old_fb);
54 int intel_framebuffer_init(struct drm_device *dev,
55                                   struct intel_framebuffer *ifb,
56                                   struct drm_mode_fb_cmd2 *mode_cmd,
57                                   struct drm_i915_gem_object *obj);
58
59 typedef struct {
60         int     min, max;
61 } intel_range_t;
62
63 typedef struct {
64         int     dot_limit;
65         int     p2_slow, p2_fast;
66 } intel_p2_t;
67
68 typedef struct intel_limit intel_limit_t;
69 struct intel_limit {
70         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
71         intel_p2_t          p2;
72 };
73
74 int
75 intel_pch_rawclk(struct drm_device *dev)
76 {
77         struct drm_i915_private *dev_priv = dev->dev_private;
78
79         WARN_ON(!HAS_PCH_SPLIT(dev));
80
81         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
82 }
83
84 static inline u32 /* units of 100MHz */
85 intel_fdi_link_freq(struct drm_device *dev)
86 {
87         if (IS_GEN5(dev)) {
88                 struct drm_i915_private *dev_priv = dev->dev_private;
89                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
90         } else
91                 return 27;
92 }
93
94 static const intel_limit_t intel_limits_i8xx_dac = {
95         .dot = { .min = 25000, .max = 350000 },
96         .vco = { .min = 908000, .max = 1512000 },
97         .n = { .min = 2, .max = 16 },
98         .m = { .min = 96, .max = 140 },
99         .m1 = { .min = 18, .max = 26 },
100         .m2 = { .min = 6, .max = 16 },
101         .p = { .min = 4, .max = 128 },
102         .p1 = { .min = 2, .max = 33 },
103         .p2 = { .dot_limit = 165000,
104                 .p2_slow = 4, .p2_fast = 2 },
105 };
106
107 static const intel_limit_t intel_limits_i8xx_dvo = {
108         .dot = { .min = 25000, .max = 350000 },
109         .vco = { .min = 908000, .max = 1512000 },
110         .n = { .min = 2, .max = 16 },
111         .m = { .min = 96, .max = 140 },
112         .m1 = { .min = 18, .max = 26 },
113         .m2 = { .min = 6, .max = 16 },
114         .p = { .min = 4, .max = 128 },
115         .p1 = { .min = 2, .max = 33 },
116         .p2 = { .dot_limit = 165000,
117                 .p2_slow = 4, .p2_fast = 4 },
118 };
119
120 static const intel_limit_t intel_limits_i8xx_lvds = {
121         .dot = { .min = 25000, .max = 350000 },
122         .vco = { .min = 908000, .max = 1512000 },
123         .n = { .min = 2, .max = 16 },
124         .m = { .min = 96, .max = 140 },
125         .m1 = { .min = 18, .max = 26 },
126         .m2 = { .min = 6, .max = 16 },
127         .p = { .min = 4, .max = 128 },
128         .p1 = { .min = 1, .max = 6 },
129         .p2 = { .dot_limit = 165000,
130                 .p2_slow = 14, .p2_fast = 7 },
131 };
132
133 static const intel_limit_t intel_limits_i9xx_sdvo = {
134         .dot = { .min = 20000, .max = 400000 },
135         .vco = { .min = 1400000, .max = 2800000 },
136         .n = { .min = 1, .max = 6 },
137         .m = { .min = 70, .max = 120 },
138         .m1 = { .min = 8, .max = 18 },
139         .m2 = { .min = 3, .max = 7 },
140         .p = { .min = 5, .max = 80 },
141         .p1 = { .min = 1, .max = 8 },
142         .p2 = { .dot_limit = 200000,
143                 .p2_slow = 10, .p2_fast = 5 },
144 };
145
146 static const intel_limit_t intel_limits_i9xx_lvds = {
147         .dot = { .min = 20000, .max = 400000 },
148         .vco = { .min = 1400000, .max = 2800000 },
149         .n = { .min = 1, .max = 6 },
150         .m = { .min = 70, .max = 120 },
151         .m1 = { .min = 8, .max = 18 },
152         .m2 = { .min = 3, .max = 7 },
153         .p = { .min = 7, .max = 98 },
154         .p1 = { .min = 1, .max = 8 },
155         .p2 = { .dot_limit = 112000,
156                 .p2_slow = 14, .p2_fast = 7 },
157 };
158
159
160 static const intel_limit_t intel_limits_g4x_sdvo = {
161         .dot = { .min = 25000, .max = 270000 },
162         .vco = { .min = 1750000, .max = 3500000},
163         .n = { .min = 1, .max = 4 },
164         .m = { .min = 104, .max = 138 },
165         .m1 = { .min = 17, .max = 23 },
166         .m2 = { .min = 5, .max = 11 },
167         .p = { .min = 10, .max = 30 },
168         .p1 = { .min = 1, .max = 3},
169         .p2 = { .dot_limit = 270000,
170                 .p2_slow = 10,
171                 .p2_fast = 10
172         },
173 };
174
175 static const intel_limit_t intel_limits_g4x_hdmi = {
176         .dot = { .min = 22000, .max = 400000 },
177         .vco = { .min = 1750000, .max = 3500000},
178         .n = { .min = 1, .max = 4 },
179         .m = { .min = 104, .max = 138 },
180         .m1 = { .min = 16, .max = 23 },
181         .m2 = { .min = 5, .max = 11 },
182         .p = { .min = 5, .max = 80 },
183         .p1 = { .min = 1, .max = 8},
184         .p2 = { .dot_limit = 165000,
185                 .p2_slow = 10, .p2_fast = 5 },
186 };
187
188 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
189         .dot = { .min = 20000, .max = 115000 },
190         .vco = { .min = 1750000, .max = 3500000 },
191         .n = { .min = 1, .max = 3 },
192         .m = { .min = 104, .max = 138 },
193         .m1 = { .min = 17, .max = 23 },
194         .m2 = { .min = 5, .max = 11 },
195         .p = { .min = 28, .max = 112 },
196         .p1 = { .min = 2, .max = 8 },
197         .p2 = { .dot_limit = 0,
198                 .p2_slow = 14, .p2_fast = 14
199         },
200 };
201
202 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
203         .dot = { .min = 80000, .max = 224000 },
204         .vco = { .min = 1750000, .max = 3500000 },
205         .n = { .min = 1, .max = 3 },
206         .m = { .min = 104, .max = 138 },
207         .m1 = { .min = 17, .max = 23 },
208         .m2 = { .min = 5, .max = 11 },
209         .p = { .min = 14, .max = 42 },
210         .p1 = { .min = 2, .max = 6 },
211         .p2 = { .dot_limit = 0,
212                 .p2_slow = 7, .p2_fast = 7
213         },
214 };
215
216 static const intel_limit_t intel_limits_pineview_sdvo = {
217         .dot = { .min = 20000, .max = 400000},
218         .vco = { .min = 1700000, .max = 3500000 },
219         /* Pineview's Ncounter is a ring counter */
220         .n = { .min = 3, .max = 6 },
221         .m = { .min = 2, .max = 256 },
222         /* Pineview only has one combined m divider, which we treat as m2. */
223         .m1 = { .min = 0, .max = 0 },
224         .m2 = { .min = 0, .max = 254 },
225         .p = { .min = 5, .max = 80 },
226         .p1 = { .min = 1, .max = 8 },
227         .p2 = { .dot_limit = 200000,
228                 .p2_slow = 10, .p2_fast = 5 },
229 };
230
231 static const intel_limit_t intel_limits_pineview_lvds = {
232         .dot = { .min = 20000, .max = 400000 },
233         .vco = { .min = 1700000, .max = 3500000 },
234         .n = { .min = 3, .max = 6 },
235         .m = { .min = 2, .max = 256 },
236         .m1 = { .min = 0, .max = 0 },
237         .m2 = { .min = 0, .max = 254 },
238         .p = { .min = 7, .max = 112 },
239         .p1 = { .min = 1, .max = 8 },
240         .p2 = { .dot_limit = 112000,
241                 .p2_slow = 14, .p2_fast = 14 },
242 };
243
244 /* Ironlake / Sandybridge
245  *
246  * We calculate clock using (register_value + 2) for N/M1/M2, so here
247  * the range value for them is (actual_value - 2).
248  */
249 static const intel_limit_t intel_limits_ironlake_dac = {
250         .dot = { .min = 25000, .max = 350000 },
251         .vco = { .min = 1760000, .max = 3510000 },
252         .n = { .min = 1, .max = 5 },
253         .m = { .min = 79, .max = 127 },
254         .m1 = { .min = 12, .max = 22 },
255         .m2 = { .min = 5, .max = 9 },
256         .p = { .min = 5, .max = 80 },
257         .p1 = { .min = 1, .max = 8 },
258         .p2 = { .dot_limit = 225000,
259                 .p2_slow = 10, .p2_fast = 5 },
260 };
261
262 static const intel_limit_t intel_limits_ironlake_single_lvds = {
263         .dot = { .min = 25000, .max = 350000 },
264         .vco = { .min = 1760000, .max = 3510000 },
265         .n = { .min = 1, .max = 3 },
266         .m = { .min = 79, .max = 118 },
267         .m1 = { .min = 12, .max = 22 },
268         .m2 = { .min = 5, .max = 9 },
269         .p = { .min = 28, .max = 112 },
270         .p1 = { .min = 2, .max = 8 },
271         .p2 = { .dot_limit = 225000,
272                 .p2_slow = 14, .p2_fast = 14 },
273 };
274
275 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
276         .dot = { .min = 25000, .max = 350000 },
277         .vco = { .min = 1760000, .max = 3510000 },
278         .n = { .min = 1, .max = 3 },
279         .m = { .min = 79, .max = 127 },
280         .m1 = { .min = 12, .max = 22 },
281         .m2 = { .min = 5, .max = 9 },
282         .p = { .min = 14, .max = 56 },
283         .p1 = { .min = 2, .max = 8 },
284         .p2 = { .dot_limit = 225000,
285                 .p2_slow = 7, .p2_fast = 7 },
286 };
287
288 /* LVDS 100mhz refclk limits. */
289 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
290         .dot = { .min = 25000, .max = 350000 },
291         .vco = { .min = 1760000, .max = 3510000 },
292         .n = { .min = 1, .max = 2 },
293         .m = { .min = 79, .max = 126 },
294         .m1 = { .min = 12, .max = 22 },
295         .m2 = { .min = 5, .max = 9 },
296         .p = { .min = 28, .max = 112 },
297         .p1 = { .min = 2, .max = 8 },
298         .p2 = { .dot_limit = 225000,
299                 .p2_slow = 14, .p2_fast = 14 },
300 };
301
302 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
303         .dot = { .min = 25000, .max = 350000 },
304         .vco = { .min = 1760000, .max = 3510000 },
305         .n = { .min = 1, .max = 3 },
306         .m = { .min = 79, .max = 126 },
307         .m1 = { .min = 12, .max = 22 },
308         .m2 = { .min = 5, .max = 9 },
309         .p = { .min = 14, .max = 42 },
310         .p1 = { .min = 2, .max = 6 },
311         .p2 = { .dot_limit = 225000,
312                 .p2_slow = 7, .p2_fast = 7 },
313 };
314
315 static const intel_limit_t intel_limits_vlv = {
316          /*
317           * These are the data rate limits (measured in fast clocks)
318           * since those are the strictest limits we have. The fast
319           * clock and actual rate limits are more relaxed, so checking
320           * them would make no difference.
321           */
322         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
323         .vco = { .min = 4000000, .max = 6000000 },
324         .n = { .min = 1, .max = 7 },
325         .m1 = { .min = 2, .max = 3 },
326         .m2 = { .min = 11, .max = 156 },
327         .p1 = { .min = 2, .max = 3 },
328         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
329 };
330
331 static void vlv_clock(int refclk, intel_clock_t *clock)
332 {
333         clock->m = clock->m1 * clock->m2;
334         clock->p = clock->p1 * clock->p2;
335         if (WARN_ON(clock->n == 0 || clock->p == 0))
336                 return;
337         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
338         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
339 }
340
341 /**
342  * Returns whether any output on the specified pipe is of the specified type
343  */
344 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
345 {
346         struct drm_device *dev = crtc->dev;
347         struct intel_encoder *encoder;
348
349         for_each_encoder_on_crtc(dev, crtc, encoder)
350                 if (encoder->type == type)
351                         return true;
352
353         return false;
354 }
355
356 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
357                                                 int refclk)
358 {
359         struct drm_device *dev = crtc->dev;
360         const intel_limit_t *limit;
361
362         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
363                 if (intel_is_dual_link_lvds(dev)) {
364                         if (refclk == 100000)
365                                 limit = &intel_limits_ironlake_dual_lvds_100m;
366                         else
367                                 limit = &intel_limits_ironlake_dual_lvds;
368                 } else {
369                         if (refclk == 100000)
370                                 limit = &intel_limits_ironlake_single_lvds_100m;
371                         else
372                                 limit = &intel_limits_ironlake_single_lvds;
373                 }
374         } else
375                 limit = &intel_limits_ironlake_dac;
376
377         return limit;
378 }
379
380 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
381 {
382         struct drm_device *dev = crtc->dev;
383         const intel_limit_t *limit;
384
385         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
386                 if (intel_is_dual_link_lvds(dev))
387                         limit = &intel_limits_g4x_dual_channel_lvds;
388                 else
389                         limit = &intel_limits_g4x_single_channel_lvds;
390         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
391                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
392                 limit = &intel_limits_g4x_hdmi;
393         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
394                 limit = &intel_limits_g4x_sdvo;
395         } else /* The option is for other outputs */
396                 limit = &intel_limits_i9xx_sdvo;
397
398         return limit;
399 }
400
401 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
402 {
403         struct drm_device *dev = crtc->dev;
404         const intel_limit_t *limit;
405
406         if (HAS_PCH_SPLIT(dev))
407                 limit = intel_ironlake_limit(crtc, refclk);
408         else if (IS_G4X(dev)) {
409                 limit = intel_g4x_limit(crtc);
410         } else if (IS_PINEVIEW(dev)) {
411                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
412                         limit = &intel_limits_pineview_lvds;
413                 else
414                         limit = &intel_limits_pineview_sdvo;
415         } else if (IS_VALLEYVIEW(dev)) {
416                 limit = &intel_limits_vlv;
417         } else if (!IS_GEN2(dev)) {
418                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
419                         limit = &intel_limits_i9xx_lvds;
420                 else
421                         limit = &intel_limits_i9xx_sdvo;
422         } else {
423                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
424                         limit = &intel_limits_i8xx_lvds;
425                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
426                         limit = &intel_limits_i8xx_dvo;
427                 else
428                         limit = &intel_limits_i8xx_dac;
429         }
430         return limit;
431 }
432
433 /* m1 is reserved as 0 in Pineview, n is a ring counter */
434 static void pineview_clock(int refclk, intel_clock_t *clock)
435 {
436         clock->m = clock->m2 + 2;
437         clock->p = clock->p1 * clock->p2;
438         if (WARN_ON(clock->n == 0 || clock->p == 0))
439                 return;
440         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
441         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
442 }
443
444 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
445 {
446         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
447 }
448
449 static void i9xx_clock(int refclk, intel_clock_t *clock)
450 {
451         clock->m = i9xx_dpll_compute_m(clock);
452         clock->p = clock->p1 * clock->p2;
453         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
454                 return;
455         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
456         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
457 }
458
459 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
460 /**
461  * Returns whether the given set of divisors are valid for a given refclk with
462  * the given connectors.
463  */
464
465 static bool intel_PLL_is_valid(struct drm_device *dev,
466                                const intel_limit_t *limit,
467                                const intel_clock_t *clock)
468 {
469         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
470                 INTELPllInvalid("n out of range\n");
471         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
472                 INTELPllInvalid("p1 out of range\n");
473         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
474                 INTELPllInvalid("m2 out of range\n");
475         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
476                 INTELPllInvalid("m1 out of range\n");
477
478         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
479                 if (clock->m1 <= clock->m2)
480                         INTELPllInvalid("m1 <= m2\n");
481
482         if (!IS_VALLEYVIEW(dev)) {
483                 if (clock->p < limit->p.min || limit->p.max < clock->p)
484                         INTELPllInvalid("p out of range\n");
485                 if (clock->m < limit->m.min || limit->m.max < clock->m)
486                         INTELPllInvalid("m out of range\n");
487         }
488
489         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
490                 INTELPllInvalid("vco out of range\n");
491         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
492          * connector, etc., rather than just a single range.
493          */
494         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
495                 INTELPllInvalid("dot out of range\n");
496
497         return true;
498 }
499
500 static bool
501 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
502                     int target, int refclk, intel_clock_t *match_clock,
503                     intel_clock_t *best_clock)
504 {
505         struct drm_device *dev = crtc->dev;
506         intel_clock_t clock;
507         int err = target;
508
509         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
510                 /*
511                  * For LVDS just rely on its current settings for dual-channel.
512                  * We haven't figured out how to reliably set up different
513                  * single/dual channel state, if we even can.
514                  */
515                 if (intel_is_dual_link_lvds(dev))
516                         clock.p2 = limit->p2.p2_fast;
517                 else
518                         clock.p2 = limit->p2.p2_slow;
519         } else {
520                 if (target < limit->p2.dot_limit)
521                         clock.p2 = limit->p2.p2_slow;
522                 else
523                         clock.p2 = limit->p2.p2_fast;
524         }
525
526         memset(best_clock, 0, sizeof(*best_clock));
527
528         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
529              clock.m1++) {
530                 for (clock.m2 = limit->m2.min;
531                      clock.m2 <= limit->m2.max; clock.m2++) {
532                         if (clock.m2 >= clock.m1)
533                                 break;
534                         for (clock.n = limit->n.min;
535                              clock.n <= limit->n.max; clock.n++) {
536                                 for (clock.p1 = limit->p1.min;
537                                         clock.p1 <= limit->p1.max; clock.p1++) {
538                                         int this_err;
539
540                                         i9xx_clock(refclk, &clock);
541                                         if (!intel_PLL_is_valid(dev, limit,
542                                                                 &clock))
543                                                 continue;
544                                         if (match_clock &&
545                                             clock.p != match_clock->p)
546                                                 continue;
547
548                                         this_err = abs(clock.dot - target);
549                                         if (this_err < err) {
550                                                 *best_clock = clock;
551                                                 err = this_err;
552                                         }
553                                 }
554                         }
555                 }
556         }
557
558         return (err != target);
559 }
560
561 static bool
562 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
563                    int target, int refclk, intel_clock_t *match_clock,
564                    intel_clock_t *best_clock)
565 {
566         struct drm_device *dev = crtc->dev;
567         intel_clock_t clock;
568         int err = target;
569
570         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
571                 /*
572                  * For LVDS just rely on its current settings for dual-channel.
573                  * We haven't figured out how to reliably set up different
574                  * single/dual channel state, if we even can.
575                  */
576                 if (intel_is_dual_link_lvds(dev))
577                         clock.p2 = limit->p2.p2_fast;
578                 else
579                         clock.p2 = limit->p2.p2_slow;
580         } else {
581                 if (target < limit->p2.dot_limit)
582                         clock.p2 = limit->p2.p2_slow;
583                 else
584                         clock.p2 = limit->p2.p2_fast;
585         }
586
587         memset(best_clock, 0, sizeof(*best_clock));
588
589         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
590              clock.m1++) {
591                 for (clock.m2 = limit->m2.min;
592                      clock.m2 <= limit->m2.max; clock.m2++) {
593                         for (clock.n = limit->n.min;
594                              clock.n <= limit->n.max; clock.n++) {
595                                 for (clock.p1 = limit->p1.min;
596                                         clock.p1 <= limit->p1.max; clock.p1++) {
597                                         int this_err;
598
599                                         pineview_clock(refclk, &clock);
600                                         if (!intel_PLL_is_valid(dev, limit,
601                                                                 &clock))
602                                                 continue;
603                                         if (match_clock &&
604                                             clock.p != match_clock->p)
605                                                 continue;
606
607                                         this_err = abs(clock.dot - target);
608                                         if (this_err < err) {
609                                                 *best_clock = clock;
610                                                 err = this_err;
611                                         }
612                                 }
613                         }
614                 }
615         }
616
617         return (err != target);
618 }
619
620 static bool
621 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
622                    int target, int refclk, intel_clock_t *match_clock,
623                    intel_clock_t *best_clock)
624 {
625         struct drm_device *dev = crtc->dev;
626         intel_clock_t clock;
627         int max_n;
628         bool found;
629         /* approximately equals target * 0.00585 */
630         int err_most = (target >> 8) + (target >> 9);
631         found = false;
632
633         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
634                 if (intel_is_dual_link_lvds(dev))
635                         clock.p2 = limit->p2.p2_fast;
636                 else
637                         clock.p2 = limit->p2.p2_slow;
638         } else {
639                 if (target < limit->p2.dot_limit)
640                         clock.p2 = limit->p2.p2_slow;
641                 else
642                         clock.p2 = limit->p2.p2_fast;
643         }
644
645         memset(best_clock, 0, sizeof(*best_clock));
646         max_n = limit->n.max;
647         /* based on hardware requirement, prefer smaller n to precision */
648         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
649                 /* based on hardware requirement, prefere larger m1,m2 */
650                 for (clock.m1 = limit->m1.max;
651                      clock.m1 >= limit->m1.min; clock.m1--) {
652                         for (clock.m2 = limit->m2.max;
653                              clock.m2 >= limit->m2.min; clock.m2--) {
654                                 for (clock.p1 = limit->p1.max;
655                                      clock.p1 >= limit->p1.min; clock.p1--) {
656                                         int this_err;
657
658                                         i9xx_clock(refclk, &clock);
659                                         if (!intel_PLL_is_valid(dev, limit,
660                                                                 &clock))
661                                                 continue;
662
663                                         this_err = abs(clock.dot - target);
664                                         if (this_err < err_most) {
665                                                 *best_clock = clock;
666                                                 err_most = this_err;
667                                                 max_n = clock.n;
668                                                 found = true;
669                                         }
670                                 }
671                         }
672                 }
673         }
674         return found;
675 }
676
677 static bool
678 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
679                    int target, int refclk, intel_clock_t *match_clock,
680                    intel_clock_t *best_clock)
681 {
682         struct drm_device *dev = crtc->dev;
683         intel_clock_t clock;
684         unsigned int bestppm = 1000000;
685         /* min update 19.2 MHz */
686         int max_n = min(limit->n.max, refclk / 19200);
687         bool found = false;
688
689         target *= 5; /* fast clock */
690
691         memset(best_clock, 0, sizeof(*best_clock));
692
693         /* based on hardware requirement, prefer smaller n to precision */
694         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
695                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
696                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
697                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
698                                 clock.p = clock.p1 * clock.p2;
699                                 /* based on hardware requirement, prefer bigger m1,m2 values */
700                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
701                                         unsigned int ppm, diff;
702
703                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
704                                                                      refclk * clock.m1);
705
706                                         vlv_clock(refclk, &clock);
707
708                                         if (!intel_PLL_is_valid(dev, limit,
709                                                                 &clock))
710                                                 continue;
711
712                                         diff = abs(clock.dot - target);
713                                         ppm = div_u64(1000000ULL * diff, target);
714
715                                         if (ppm < 100 && clock.p > best_clock->p) {
716                                                 bestppm = 0;
717                                                 *best_clock = clock;
718                                                 found = true;
719                                         }
720
721                                         if (bestppm >= 10 && ppm < bestppm - 10) {
722                                                 bestppm = ppm;
723                                                 *best_clock = clock;
724                                                 found = true;
725                                         }
726                                 }
727                         }
728                 }
729         }
730
731         return found;
732 }
733
734 bool intel_crtc_active(struct drm_crtc *crtc)
735 {
736         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
737
738         /* Be paranoid as we can arrive here with only partial
739          * state retrieved from the hardware during setup.
740          *
741          * We can ditch the adjusted_mode.crtc_clock check as soon
742          * as Haswell has gained clock readout/fastboot support.
743          *
744          * We can ditch the crtc->fb check as soon as we can
745          * properly reconstruct framebuffers.
746          */
747         return intel_crtc->active && crtc->fb &&
748                 intel_crtc->config.adjusted_mode.crtc_clock;
749 }
750
751 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
752                                              enum pipe pipe)
753 {
754         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
755         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756
757         return intel_crtc->config.cpu_transcoder;
758 }
759
760 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
761 {
762         struct drm_i915_private *dev_priv = dev->dev_private;
763         u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
764
765         frame = I915_READ(frame_reg);
766
767         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
768                 DRM_DEBUG_KMS("vblank wait timed out\n");
769 }
770
771 /**
772  * intel_wait_for_vblank - wait for vblank on a given pipe
773  * @dev: drm device
774  * @pipe: pipe to wait for
775  *
776  * Wait for vblank to occur on a given pipe.  Needed for various bits of
777  * mode setting code.
778  */
779 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
780 {
781         struct drm_i915_private *dev_priv = dev->dev_private;
782         int pipestat_reg = PIPESTAT(pipe);
783
784         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
785                 g4x_wait_for_vblank(dev, pipe);
786                 return;
787         }
788
789         /* Clear existing vblank status. Note this will clear any other
790          * sticky status fields as well.
791          *
792          * This races with i915_driver_irq_handler() with the result
793          * that either function could miss a vblank event.  Here it is not
794          * fatal, as we will either wait upon the next vblank interrupt or
795          * timeout.  Generally speaking intel_wait_for_vblank() is only
796          * called during modeset at which time the GPU should be idle and
797          * should *not* be performing page flips and thus not waiting on
798          * vblanks...
799          * Currently, the result of us stealing a vblank from the irq
800          * handler is that a single frame will be skipped during swapbuffers.
801          */
802         I915_WRITE(pipestat_reg,
803                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
804
805         /* Wait for vblank interrupt bit to set */
806         if (wait_for(I915_READ(pipestat_reg) &
807                      PIPE_VBLANK_INTERRUPT_STATUS,
808                      50))
809                 DRM_DEBUG_KMS("vblank wait timed out\n");
810 }
811
812 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
813 {
814         struct drm_i915_private *dev_priv = dev->dev_private;
815         u32 reg = PIPEDSL(pipe);
816         u32 line1, line2;
817         u32 line_mask;
818
819         if (IS_GEN2(dev))
820                 line_mask = DSL_LINEMASK_GEN2;
821         else
822                 line_mask = DSL_LINEMASK_GEN3;
823
824         line1 = I915_READ(reg) & line_mask;
825         mdelay(5);
826         line2 = I915_READ(reg) & line_mask;
827
828         return line1 == line2;
829 }
830
831 /*
832  * intel_wait_for_pipe_off - wait for pipe to turn off
833  * @dev: drm device
834  * @pipe: pipe to wait for
835  *
836  * After disabling a pipe, we can't wait for vblank in the usual way,
837  * spinning on the vblank interrupt status bit, since we won't actually
838  * see an interrupt when the pipe is disabled.
839  *
840  * On Gen4 and above:
841  *   wait for the pipe register state bit to turn off
842  *
843  * Otherwise:
844  *   wait for the display line value to settle (it usually
845  *   ends up stopping at the start of the next frame).
846  *
847  */
848 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
849 {
850         struct drm_i915_private *dev_priv = dev->dev_private;
851         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
852                                                                       pipe);
853
854         if (INTEL_INFO(dev)->gen >= 4) {
855                 int reg = PIPECONF(cpu_transcoder);
856
857                 /* Wait for the Pipe State to go off */
858                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
859                              100))
860                         WARN(1, "pipe_off wait timed out\n");
861         } else {
862                 /* Wait for the display line to settle */
863                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
864                         WARN(1, "pipe_off wait timed out\n");
865         }
866 }
867
868 /*
869  * ibx_digital_port_connected - is the specified port connected?
870  * @dev_priv: i915 private structure
871  * @port: the port to test
872  *
873  * Returns true if @port is connected, false otherwise.
874  */
875 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
876                                 struct intel_digital_port *port)
877 {
878         u32 bit;
879
880         if (HAS_PCH_IBX(dev_priv->dev)) {
881                 switch(port->port) {
882                 case PORT_B:
883                         bit = SDE_PORTB_HOTPLUG;
884                         break;
885                 case PORT_C:
886                         bit = SDE_PORTC_HOTPLUG;
887                         break;
888                 case PORT_D:
889                         bit = SDE_PORTD_HOTPLUG;
890                         break;
891                 default:
892                         return true;
893                 }
894         } else {
895                 switch(port->port) {
896                 case PORT_B:
897                         bit = SDE_PORTB_HOTPLUG_CPT;
898                         break;
899                 case PORT_C:
900                         bit = SDE_PORTC_HOTPLUG_CPT;
901                         break;
902                 case PORT_D:
903                         bit = SDE_PORTD_HOTPLUG_CPT;
904                         break;
905                 default:
906                         return true;
907                 }
908         }
909
910         return I915_READ(SDEISR) & bit;
911 }
912
913 static const char *state_string(bool enabled)
914 {
915         return enabled ? "on" : "off";
916 }
917
918 /* Only for pre-ILK configs */
919 void assert_pll(struct drm_i915_private *dev_priv,
920                 enum pipe pipe, bool state)
921 {
922         int reg;
923         u32 val;
924         bool cur_state;
925
926         reg = DPLL(pipe);
927         val = I915_READ(reg);
928         cur_state = !!(val & DPLL_VCO_ENABLE);
929         WARN(cur_state != state,
930              "PLL state assertion failure (expected %s, current %s)\n",
931              state_string(state), state_string(cur_state));
932 }
933
934 /* XXX: the dsi pll is shared between MIPI DSI ports */
935 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
936 {
937         u32 val;
938         bool cur_state;
939
940         mutex_lock(&dev_priv->dpio_lock);
941         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
942         mutex_unlock(&dev_priv->dpio_lock);
943
944         cur_state = val & DSI_PLL_VCO_EN;
945         WARN(cur_state != state,
946              "DSI PLL state assertion failure (expected %s, current %s)\n",
947              state_string(state), state_string(cur_state));
948 }
949 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
950 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
951
952 struct intel_shared_dpll *
953 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
954 {
955         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
956
957         if (crtc->config.shared_dpll < 0)
958                 return NULL;
959
960         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
961 }
962
963 /* For ILK+ */
964 void assert_shared_dpll(struct drm_i915_private *dev_priv,
965                         struct intel_shared_dpll *pll,
966                         bool state)
967 {
968         bool cur_state;
969         struct intel_dpll_hw_state hw_state;
970
971         if (HAS_PCH_LPT(dev_priv->dev)) {
972                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
973                 return;
974         }
975
976         if (WARN (!pll,
977                   "asserting DPLL %s with no DPLL\n", state_string(state)))
978                 return;
979
980         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
981         WARN(cur_state != state,
982              "%s assertion failure (expected %s, current %s)\n",
983              pll->name, state_string(state), state_string(cur_state));
984 }
985
986 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
987                           enum pipe pipe, bool state)
988 {
989         int reg;
990         u32 val;
991         bool cur_state;
992         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
993                                                                       pipe);
994
995         if (HAS_DDI(dev_priv->dev)) {
996                 /* DDI does not have a specific FDI_TX register */
997                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
998                 val = I915_READ(reg);
999                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1000         } else {
1001                 reg = FDI_TX_CTL(pipe);
1002                 val = I915_READ(reg);
1003                 cur_state = !!(val & FDI_TX_ENABLE);
1004         }
1005         WARN(cur_state != state,
1006              "FDI TX state assertion failure (expected %s, current %s)\n",
1007              state_string(state), state_string(cur_state));
1008 }
1009 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1010 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1011
1012 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1013                           enum pipe pipe, bool state)
1014 {
1015         int reg;
1016         u32 val;
1017         bool cur_state;
1018
1019         reg = FDI_RX_CTL(pipe);
1020         val = I915_READ(reg);
1021         cur_state = !!(val & FDI_RX_ENABLE);
1022         WARN(cur_state != state,
1023              "FDI RX state assertion failure (expected %s, current %s)\n",
1024              state_string(state), state_string(cur_state));
1025 }
1026 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1027 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1028
1029 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1030                                       enum pipe pipe)
1031 {
1032         int reg;
1033         u32 val;
1034
1035         /* ILK FDI PLL is always enabled */
1036         if (dev_priv->info->gen == 5)
1037                 return;
1038
1039         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1040         if (HAS_DDI(dev_priv->dev))
1041                 return;
1042
1043         reg = FDI_TX_CTL(pipe);
1044         val = I915_READ(reg);
1045         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1046 }
1047
1048 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1049                        enum pipe pipe, bool state)
1050 {
1051         int reg;
1052         u32 val;
1053         bool cur_state;
1054
1055         reg = FDI_RX_CTL(pipe);
1056         val = I915_READ(reg);
1057         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1058         WARN(cur_state != state,
1059              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1060              state_string(state), state_string(cur_state));
1061 }
1062
1063 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1064                                   enum pipe pipe)
1065 {
1066         int pp_reg, lvds_reg;
1067         u32 val;
1068         enum pipe panel_pipe = PIPE_A;
1069         bool locked = true;
1070
1071         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1072                 pp_reg = PCH_PP_CONTROL;
1073                 lvds_reg = PCH_LVDS;
1074         } else {
1075                 pp_reg = PP_CONTROL;
1076                 lvds_reg = LVDS;
1077         }
1078
1079         val = I915_READ(pp_reg);
1080         if (!(val & PANEL_POWER_ON) ||
1081             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1082                 locked = false;
1083
1084         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1085                 panel_pipe = PIPE_B;
1086
1087         WARN(panel_pipe == pipe && locked,
1088              "panel assertion failure, pipe %c regs locked\n",
1089              pipe_name(pipe));
1090 }
1091
1092 static void assert_cursor(struct drm_i915_private *dev_priv,
1093                           enum pipe pipe, bool state)
1094 {
1095         struct drm_device *dev = dev_priv->dev;
1096         bool cur_state;
1097
1098         if (IS_845G(dev) || IS_I865G(dev))
1099                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1100         else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
1101                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1102         else
1103                 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1104
1105         WARN(cur_state != state,
1106              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1107              pipe_name(pipe), state_string(state), state_string(cur_state));
1108 }
1109 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1110 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1111
1112 void assert_pipe(struct drm_i915_private *dev_priv,
1113                  enum pipe pipe, bool state)
1114 {
1115         int reg;
1116         u32 val;
1117         bool cur_state;
1118         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1119                                                                       pipe);
1120
1121         /* if we need the pipe A quirk it must be always on */
1122         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1123                 state = true;
1124
1125         if (!intel_display_power_enabled(dev_priv->dev,
1126                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1127                 cur_state = false;
1128         } else {
1129                 reg = PIPECONF(cpu_transcoder);
1130                 val = I915_READ(reg);
1131                 cur_state = !!(val & PIPECONF_ENABLE);
1132         }
1133
1134         WARN(cur_state != state,
1135              "pipe %c assertion failure (expected %s, current %s)\n",
1136              pipe_name(pipe), state_string(state), state_string(cur_state));
1137 }
1138
1139 static void assert_plane(struct drm_i915_private *dev_priv,
1140                          enum plane plane, bool state)
1141 {
1142         int reg;
1143         u32 val;
1144         bool cur_state;
1145
1146         reg = DSPCNTR(plane);
1147         val = I915_READ(reg);
1148         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1149         WARN(cur_state != state,
1150              "plane %c assertion failure (expected %s, current %s)\n",
1151              plane_name(plane), state_string(state), state_string(cur_state));
1152 }
1153
1154 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1155 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1156
1157 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1158                                    enum pipe pipe)
1159 {
1160         struct drm_device *dev = dev_priv->dev;
1161         int reg, i;
1162         u32 val;
1163         int cur_pipe;
1164
1165         /* Primary planes are fixed to pipes on gen4+ */
1166         if (INTEL_INFO(dev)->gen >= 4) {
1167                 reg = DSPCNTR(pipe);
1168                 val = I915_READ(reg);
1169                 WARN((val & DISPLAY_PLANE_ENABLE),
1170                      "plane %c assertion failure, should be disabled but not\n",
1171                      plane_name(pipe));
1172                 return;
1173         }
1174
1175         /* Need to check both planes against the pipe */
1176         for_each_pipe(i) {
1177                 reg = DSPCNTR(i);
1178                 val = I915_READ(reg);
1179                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1180                         DISPPLANE_SEL_PIPE_SHIFT;
1181                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1182                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1183                      plane_name(i), pipe_name(pipe));
1184         }
1185 }
1186
1187 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1188                                     enum pipe pipe)
1189 {
1190         struct drm_device *dev = dev_priv->dev;
1191         int reg, i;
1192         u32 val;
1193
1194         if (IS_VALLEYVIEW(dev)) {
1195                 for (i = 0; i < dev_priv->num_plane; i++) {
1196                         reg = SPCNTR(pipe, i);
1197                         val = I915_READ(reg);
1198                         WARN((val & SP_ENABLE),
1199                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1200                              sprite_name(pipe, i), pipe_name(pipe));
1201                 }
1202         } else if (INTEL_INFO(dev)->gen >= 7) {
1203                 reg = SPRCTL(pipe);
1204                 val = I915_READ(reg);
1205                 WARN((val & SPRITE_ENABLE),
1206                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1207                      plane_name(pipe), pipe_name(pipe));
1208         } else if (INTEL_INFO(dev)->gen >= 5) {
1209                 reg = DVSCNTR(pipe);
1210                 val = I915_READ(reg);
1211                 WARN((val & DVS_ENABLE),
1212                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1213                      plane_name(pipe), pipe_name(pipe));
1214         }
1215 }
1216
1217 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1218 {
1219         u32 val;
1220         bool enabled;
1221
1222         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1223
1224         val = I915_READ(PCH_DREF_CONTROL);
1225         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1226                             DREF_SUPERSPREAD_SOURCE_MASK));
1227         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1228 }
1229
1230 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1231                                            enum pipe pipe)
1232 {
1233         int reg;
1234         u32 val;
1235         bool enabled;
1236
1237         reg = PCH_TRANSCONF(pipe);
1238         val = I915_READ(reg);
1239         enabled = !!(val & TRANS_ENABLE);
1240         WARN(enabled,
1241              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1242              pipe_name(pipe));
1243 }
1244
1245 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1246                             enum pipe pipe, u32 port_sel, u32 val)
1247 {
1248         if ((val & DP_PORT_EN) == 0)
1249                 return false;
1250
1251         if (HAS_PCH_CPT(dev_priv->dev)) {
1252                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1253                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1254                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1255                         return false;
1256         } else {
1257                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1258                         return false;
1259         }
1260         return true;
1261 }
1262
1263 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1264                               enum pipe pipe, u32 val)
1265 {
1266         if ((val & SDVO_ENABLE) == 0)
1267                 return false;
1268
1269         if (HAS_PCH_CPT(dev_priv->dev)) {
1270                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1271                         return false;
1272         } else {
1273                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1274                         return false;
1275         }
1276         return true;
1277 }
1278
1279 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1280                               enum pipe pipe, u32 val)
1281 {
1282         if ((val & LVDS_PORT_EN) == 0)
1283                 return false;
1284
1285         if (HAS_PCH_CPT(dev_priv->dev)) {
1286                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1287                         return false;
1288         } else {
1289                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1290                         return false;
1291         }
1292         return true;
1293 }
1294
1295 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1296                               enum pipe pipe, u32 val)
1297 {
1298         if ((val & ADPA_DAC_ENABLE) == 0)
1299                 return false;
1300         if (HAS_PCH_CPT(dev_priv->dev)) {
1301                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1302                         return false;
1303         } else {
1304                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1305                         return false;
1306         }
1307         return true;
1308 }
1309
1310 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1311                                    enum pipe pipe, int reg, u32 port_sel)
1312 {
1313         u32 val = I915_READ(reg);
1314         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1315              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1316              reg, pipe_name(pipe));
1317
1318         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1319              && (val & DP_PIPEB_SELECT),
1320              "IBX PCH dp port still using transcoder B\n");
1321 }
1322
1323 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1324                                      enum pipe pipe, int reg)
1325 {
1326         u32 val = I915_READ(reg);
1327         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1328              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1329              reg, pipe_name(pipe));
1330
1331         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1332              && (val & SDVO_PIPE_B_SELECT),
1333              "IBX PCH hdmi port still using transcoder B\n");
1334 }
1335
1336 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1337                                       enum pipe pipe)
1338 {
1339         int reg;
1340         u32 val;
1341
1342         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1343         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1344         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1345
1346         reg = PCH_ADPA;
1347         val = I915_READ(reg);
1348         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1349              "PCH VGA enabled on transcoder %c, should be disabled\n",
1350              pipe_name(pipe));
1351
1352         reg = PCH_LVDS;
1353         val = I915_READ(reg);
1354         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1355              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1356              pipe_name(pipe));
1357
1358         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1359         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1360         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1361 }
1362
1363 static void intel_init_dpio(struct drm_device *dev)
1364 {
1365         struct drm_i915_private *dev_priv = dev->dev_private;
1366
1367         if (!IS_VALLEYVIEW(dev))
1368                 return;
1369
1370         DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1371 }
1372
1373 static void intel_reset_dpio(struct drm_device *dev)
1374 {
1375         struct drm_i915_private *dev_priv = dev->dev_private;
1376
1377         if (!IS_VALLEYVIEW(dev))
1378                 return;
1379
1380         /*
1381          * Enable the CRI clock source so we can get at the display and the
1382          * reference clock for VGA hotplug / manual detection.
1383          */
1384         I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1385                    DPLL_REFA_CLK_ENABLE_VLV |
1386                    DPLL_INTEGRATED_CRI_CLK_VLV);
1387
1388         /*
1389          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1390          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1391          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1392          *   b. The other bits such as sfr settings / modesel may all be set
1393          *      to 0.
1394          *
1395          * This should only be done on init and resume from S3 with both
1396          * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1397          */
1398         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1399 }
1400
1401 static void vlv_enable_pll(struct intel_crtc *crtc)
1402 {
1403         struct drm_device *dev = crtc->base.dev;
1404         struct drm_i915_private *dev_priv = dev->dev_private;
1405         int reg = DPLL(crtc->pipe);
1406         u32 dpll = crtc->config.dpll_hw_state.dpll;
1407
1408         assert_pipe_disabled(dev_priv, crtc->pipe);
1409
1410         /* No really, not for ILK+ */
1411         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1412
1413         /* PLL is protected by panel, make sure we can write it */
1414         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1415                 assert_panel_unlocked(dev_priv, crtc->pipe);
1416
1417         I915_WRITE(reg, dpll);
1418         POSTING_READ(reg);
1419         udelay(150);
1420
1421         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1422                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1423
1424         I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1425         POSTING_READ(DPLL_MD(crtc->pipe));
1426
1427         /* We do this three times for luck */
1428         I915_WRITE(reg, dpll);
1429         POSTING_READ(reg);
1430         udelay(150); /* wait for warmup */
1431         I915_WRITE(reg, dpll);
1432         POSTING_READ(reg);
1433         udelay(150); /* wait for warmup */
1434         I915_WRITE(reg, dpll);
1435         POSTING_READ(reg);
1436         udelay(150); /* wait for warmup */
1437 }
1438
1439 static void i9xx_enable_pll(struct intel_crtc *crtc)
1440 {
1441         struct drm_device *dev = crtc->base.dev;
1442         struct drm_i915_private *dev_priv = dev->dev_private;
1443         int reg = DPLL(crtc->pipe);
1444         u32 dpll = crtc->config.dpll_hw_state.dpll;
1445
1446         assert_pipe_disabled(dev_priv, crtc->pipe);
1447
1448         /* No really, not for ILK+ */
1449         BUG_ON(dev_priv->info->gen >= 5);
1450
1451         /* PLL is protected by panel, make sure we can write it */
1452         if (IS_MOBILE(dev) && !IS_I830(dev))
1453                 assert_panel_unlocked(dev_priv, crtc->pipe);
1454
1455         I915_WRITE(reg, dpll);
1456
1457         /* Wait for the clocks to stabilize. */
1458         POSTING_READ(reg);
1459         udelay(150);
1460
1461         if (INTEL_INFO(dev)->gen >= 4) {
1462                 I915_WRITE(DPLL_MD(crtc->pipe),
1463                            crtc->config.dpll_hw_state.dpll_md);
1464         } else {
1465                 /* The pixel multiplier can only be updated once the
1466                  * DPLL is enabled and the clocks are stable.
1467                  *
1468                  * So write it again.
1469                  */
1470                 I915_WRITE(reg, dpll);
1471         }
1472
1473         /* We do this three times for luck */
1474         I915_WRITE(reg, dpll);
1475         POSTING_READ(reg);
1476         udelay(150); /* wait for warmup */
1477         I915_WRITE(reg, dpll);
1478         POSTING_READ(reg);
1479         udelay(150); /* wait for warmup */
1480         I915_WRITE(reg, dpll);
1481         POSTING_READ(reg);
1482         udelay(150); /* wait for warmup */
1483 }
1484
1485 /**
1486  * i9xx_disable_pll - disable a PLL
1487  * @dev_priv: i915 private structure
1488  * @pipe: pipe PLL to disable
1489  *
1490  * Disable the PLL for @pipe, making sure the pipe is off first.
1491  *
1492  * Note!  This is for pre-ILK only.
1493  */
1494 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1495 {
1496         /* Don't disable pipe A or pipe A PLLs if needed */
1497         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1498                 return;
1499
1500         /* Make sure the pipe isn't still relying on us */
1501         assert_pipe_disabled(dev_priv, pipe);
1502
1503         I915_WRITE(DPLL(pipe), 0);
1504         POSTING_READ(DPLL(pipe));
1505 }
1506
1507 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1508 {
1509         u32 val = 0;
1510
1511         /* Make sure the pipe isn't still relying on us */
1512         assert_pipe_disabled(dev_priv, pipe);
1513
1514         /*
1515          * Leave integrated clock source and reference clock enabled for pipe B.
1516          * The latter is needed for VGA hotplug / manual detection.
1517          */
1518         if (pipe == PIPE_B)
1519                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1520         I915_WRITE(DPLL(pipe), val);
1521         POSTING_READ(DPLL(pipe));
1522 }
1523
1524 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1525                 struct intel_digital_port *dport)
1526 {
1527         u32 port_mask;
1528
1529         switch (dport->port) {
1530         case PORT_B:
1531                 port_mask = DPLL_PORTB_READY_MASK;
1532                 break;
1533         case PORT_C:
1534                 port_mask = DPLL_PORTC_READY_MASK;
1535                 break;
1536         default:
1537                 BUG();
1538         }
1539
1540         if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1541                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1542                      port_name(dport->port), I915_READ(DPLL(0)));
1543 }
1544
1545 /**
1546  * ironlake_enable_shared_dpll - enable PCH PLL
1547  * @dev_priv: i915 private structure
1548  * @pipe: pipe PLL to enable
1549  *
1550  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1551  * drives the transcoder clock.
1552  */
1553 static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1554 {
1555         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1556         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1557
1558         /* PCH PLLs only available on ILK, SNB and IVB */
1559         BUG_ON(dev_priv->info->gen < 5);
1560         if (WARN_ON(pll == NULL))
1561                 return;
1562
1563         if (WARN_ON(pll->refcount == 0))
1564                 return;
1565
1566         DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1567                       pll->name, pll->active, pll->on,
1568                       crtc->base.base.id);
1569
1570         if (pll->active++) {
1571                 WARN_ON(!pll->on);
1572                 assert_shared_dpll_enabled(dev_priv, pll);
1573                 return;
1574         }
1575         WARN_ON(pll->on);
1576
1577         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1578         pll->enable(dev_priv, pll);
1579         pll->on = true;
1580 }
1581
1582 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1583 {
1584         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1585         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1586
1587         /* PCH only available on ILK+ */
1588         BUG_ON(dev_priv->info->gen < 5);
1589         if (WARN_ON(pll == NULL))
1590                return;
1591
1592         if (WARN_ON(pll->refcount == 0))
1593                 return;
1594
1595         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1596                       pll->name, pll->active, pll->on,
1597                       crtc->base.base.id);
1598
1599         if (WARN_ON(pll->active == 0)) {
1600                 assert_shared_dpll_disabled(dev_priv, pll);
1601                 return;
1602         }
1603
1604         assert_shared_dpll_enabled(dev_priv, pll);
1605         WARN_ON(!pll->on);
1606         if (--pll->active)
1607                 return;
1608
1609         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1610         pll->disable(dev_priv, pll);
1611         pll->on = false;
1612 }
1613
1614 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1615                                            enum pipe pipe)
1616 {
1617         struct drm_device *dev = dev_priv->dev;
1618         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1619         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1620         uint32_t reg, val, pipeconf_val;
1621
1622         /* PCH only available on ILK+ */
1623         BUG_ON(dev_priv->info->gen < 5);
1624
1625         /* Make sure PCH DPLL is enabled */
1626         assert_shared_dpll_enabled(dev_priv,
1627                                    intel_crtc_to_shared_dpll(intel_crtc));
1628
1629         /* FDI must be feeding us bits for PCH ports */
1630         assert_fdi_tx_enabled(dev_priv, pipe);
1631         assert_fdi_rx_enabled(dev_priv, pipe);
1632
1633         if (HAS_PCH_CPT(dev)) {
1634                 /* Workaround: Set the timing override bit before enabling the
1635                  * pch transcoder. */
1636                 reg = TRANS_CHICKEN2(pipe);
1637                 val = I915_READ(reg);
1638                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1639                 I915_WRITE(reg, val);
1640         }
1641
1642         reg = PCH_TRANSCONF(pipe);
1643         val = I915_READ(reg);
1644         pipeconf_val = I915_READ(PIPECONF(pipe));
1645
1646         if (HAS_PCH_IBX(dev_priv->dev)) {
1647                 /*
1648                  * make the BPC in transcoder be consistent with
1649                  * that in pipeconf reg.
1650                  */
1651                 val &= ~PIPECONF_BPC_MASK;
1652                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1653         }
1654
1655         val &= ~TRANS_INTERLACE_MASK;
1656         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1657                 if (HAS_PCH_IBX(dev_priv->dev) &&
1658                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1659                         val |= TRANS_LEGACY_INTERLACED_ILK;
1660                 else
1661                         val |= TRANS_INTERLACED;
1662         else
1663                 val |= TRANS_PROGRESSIVE;
1664
1665         I915_WRITE(reg, val | TRANS_ENABLE);
1666         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1667                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1668 }
1669
1670 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1671                                       enum transcoder cpu_transcoder)
1672 {
1673         u32 val, pipeconf_val;
1674
1675         /* PCH only available on ILK+ */
1676         BUG_ON(dev_priv->info->gen < 5);
1677
1678         /* FDI must be feeding us bits for PCH ports */
1679         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1680         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1681
1682         /* Workaround: set timing override bit. */
1683         val = I915_READ(_TRANSA_CHICKEN2);
1684         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1685         I915_WRITE(_TRANSA_CHICKEN2, val);
1686
1687         val = TRANS_ENABLE;
1688         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1689
1690         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1691             PIPECONF_INTERLACED_ILK)
1692                 val |= TRANS_INTERLACED;
1693         else
1694                 val |= TRANS_PROGRESSIVE;
1695
1696         I915_WRITE(LPT_TRANSCONF, val);
1697         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1698                 DRM_ERROR("Failed to enable PCH transcoder\n");
1699 }
1700
1701 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1702                                             enum pipe pipe)
1703 {
1704         struct drm_device *dev = dev_priv->dev;
1705         uint32_t reg, val;
1706
1707         /* FDI relies on the transcoder */
1708         assert_fdi_tx_disabled(dev_priv, pipe);
1709         assert_fdi_rx_disabled(dev_priv, pipe);
1710
1711         /* Ports must be off as well */
1712         assert_pch_ports_disabled(dev_priv, pipe);
1713
1714         reg = PCH_TRANSCONF(pipe);
1715         val = I915_READ(reg);
1716         val &= ~TRANS_ENABLE;
1717         I915_WRITE(reg, val);
1718         /* wait for PCH transcoder off, transcoder state */
1719         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1720                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1721
1722         if (!HAS_PCH_IBX(dev)) {
1723                 /* Workaround: Clear the timing override chicken bit again. */
1724                 reg = TRANS_CHICKEN2(pipe);
1725                 val = I915_READ(reg);
1726                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1727                 I915_WRITE(reg, val);
1728         }
1729 }
1730
1731 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1732 {
1733         u32 val;
1734
1735         val = I915_READ(LPT_TRANSCONF);
1736         val &= ~TRANS_ENABLE;
1737         I915_WRITE(LPT_TRANSCONF, val);
1738         /* wait for PCH transcoder off, transcoder state */
1739         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1740                 DRM_ERROR("Failed to disable PCH transcoder\n");
1741
1742         /* Workaround: clear timing override bit. */
1743         val = I915_READ(_TRANSA_CHICKEN2);
1744         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1745         I915_WRITE(_TRANSA_CHICKEN2, val);
1746 }
1747
1748 /**
1749  * intel_enable_pipe - enable a pipe, asserting requirements
1750  * @dev_priv: i915 private structure
1751  * @pipe: pipe to enable
1752  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1753  *
1754  * Enable @pipe, making sure that various hardware specific requirements
1755  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1756  *
1757  * @pipe should be %PIPE_A or %PIPE_B.
1758  *
1759  * Will wait until the pipe is actually running (i.e. first vblank) before
1760  * returning.
1761  */
1762 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1763                               bool pch_port, bool dsi)
1764 {
1765         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1766                                                                       pipe);
1767         enum pipe pch_transcoder;
1768         int reg;
1769         u32 val;
1770
1771         assert_planes_disabled(dev_priv, pipe);
1772         assert_cursor_disabled(dev_priv, pipe);
1773         assert_sprites_disabled(dev_priv, pipe);
1774
1775         if (HAS_PCH_LPT(dev_priv->dev))
1776                 pch_transcoder = TRANSCODER_A;
1777         else
1778                 pch_transcoder = pipe;
1779
1780         /*
1781          * A pipe without a PLL won't actually be able to drive bits from
1782          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1783          * need the check.
1784          */
1785         if (!HAS_PCH_SPLIT(dev_priv->dev))
1786                 if (dsi)
1787                         assert_dsi_pll_enabled(dev_priv);
1788                 else
1789                         assert_pll_enabled(dev_priv, pipe);
1790         else {
1791                 if (pch_port) {
1792                         /* if driving the PCH, we need FDI enabled */
1793                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1794                         assert_fdi_tx_pll_enabled(dev_priv,
1795                                                   (enum pipe) cpu_transcoder);
1796                 }
1797                 /* FIXME: assert CPU port conditions for SNB+ */
1798         }
1799
1800         reg = PIPECONF(cpu_transcoder);
1801         val = I915_READ(reg);
1802         if (val & PIPECONF_ENABLE)
1803                 return;
1804
1805         I915_WRITE(reg, val | PIPECONF_ENABLE);
1806         intel_wait_for_vblank(dev_priv->dev, pipe);
1807 }
1808
1809 /**
1810  * intel_disable_pipe - disable a pipe, asserting requirements
1811  * @dev_priv: i915 private structure
1812  * @pipe: pipe to disable
1813  *
1814  * Disable @pipe, making sure that various hardware specific requirements
1815  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1816  *
1817  * @pipe should be %PIPE_A or %PIPE_B.
1818  *
1819  * Will wait until the pipe has shut down before returning.
1820  */
1821 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1822                                enum pipe pipe)
1823 {
1824         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1825                                                                       pipe);
1826         int reg;
1827         u32 val;
1828
1829         /*
1830          * Make sure planes won't keep trying to pump pixels to us,
1831          * or we might hang the display.
1832          */
1833         assert_planes_disabled(dev_priv, pipe);
1834         assert_cursor_disabled(dev_priv, pipe);
1835         assert_sprites_disabled(dev_priv, pipe);
1836
1837         /* Don't disable pipe A or pipe A PLLs if needed */
1838         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1839                 return;
1840
1841         reg = PIPECONF(cpu_transcoder);
1842         val = I915_READ(reg);
1843         if ((val & PIPECONF_ENABLE) == 0)
1844                 return;
1845
1846         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1847         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1848 }
1849
1850 /*
1851  * Plane regs are double buffered, going from enabled->disabled needs a
1852  * trigger in order to latch.  The display address reg provides this.
1853  */
1854 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1855                                enum plane plane)
1856 {
1857         u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1858
1859         I915_WRITE(reg, I915_READ(reg));
1860         POSTING_READ(reg);
1861 }
1862
1863 /**
1864  * intel_enable_primary_plane - enable the primary plane on a given pipe
1865  * @dev_priv: i915 private structure
1866  * @plane: plane to enable
1867  * @pipe: pipe being fed
1868  *
1869  * Enable @plane on @pipe, making sure that @pipe is running first.
1870  */
1871 static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1872                                        enum plane plane, enum pipe pipe)
1873 {
1874         struct intel_crtc *intel_crtc =
1875                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1876         int reg;
1877         u32 val;
1878
1879         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1880         assert_pipe_enabled(dev_priv, pipe);
1881
1882         WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
1883
1884         intel_crtc->primary_enabled = true;
1885
1886         reg = DSPCNTR(plane);
1887         val = I915_READ(reg);
1888         if (val & DISPLAY_PLANE_ENABLE)
1889                 return;
1890
1891         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1892         intel_flush_primary_plane(dev_priv, plane);
1893         intel_wait_for_vblank(dev_priv->dev, pipe);
1894 }
1895
1896 /**
1897  * intel_disable_primary_plane - disable the primary plane
1898  * @dev_priv: i915 private structure
1899  * @plane: plane to disable
1900  * @pipe: pipe consuming the data
1901  *
1902  * Disable @plane; should be an independent operation.
1903  */
1904 static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
1905                                         enum plane plane, enum pipe pipe)
1906 {
1907         struct intel_crtc *intel_crtc =
1908                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1909         int reg;
1910         u32 val;
1911
1912         WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
1913
1914         intel_crtc->primary_enabled = false;
1915
1916         reg = DSPCNTR(plane);
1917         val = I915_READ(reg);
1918         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1919                 return;
1920
1921         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1922         intel_flush_primary_plane(dev_priv, plane);
1923         intel_wait_for_vblank(dev_priv->dev, pipe);
1924 }
1925
1926 static bool need_vtd_wa(struct drm_device *dev)
1927 {
1928 #ifdef CONFIG_INTEL_IOMMU
1929         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1930                 return true;
1931 #endif
1932         return false;
1933 }
1934
1935 static int intel_align_height(struct drm_device *dev, int height, bool tiled)
1936 {
1937         int tile_height;
1938
1939         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
1940         return ALIGN(height, tile_height);
1941 }
1942
1943 int
1944 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1945                            struct drm_i915_gem_object *obj,
1946                            struct intel_ring_buffer *pipelined)
1947 {
1948         struct drm_i915_private *dev_priv = dev->dev_private;
1949         u32 alignment;
1950         int ret;
1951
1952         switch (obj->tiling_mode) {
1953         case I915_TILING_NONE:
1954                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1955                         alignment = 128 * 1024;
1956                 else if (INTEL_INFO(dev)->gen >= 4)
1957                         alignment = 4 * 1024;
1958                 else
1959                         alignment = 64 * 1024;
1960                 break;
1961         case I915_TILING_X:
1962                 /* pin() will align the object as required by fence */
1963                 alignment = 0;
1964                 break;
1965         case I915_TILING_Y:
1966                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
1967                 return -EINVAL;
1968         default:
1969                 BUG();
1970         }
1971
1972         /* Note that the w/a also requires 64 PTE of padding following the
1973          * bo. We currently fill all unused PTE with the shadow page and so
1974          * we should always have valid PTE following the scanout preventing
1975          * the VT-d warning.
1976          */
1977         if (need_vtd_wa(dev) && alignment < 256 * 1024)
1978                 alignment = 256 * 1024;
1979
1980         dev_priv->mm.interruptible = false;
1981         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1982         if (ret)
1983                 goto err_interruptible;
1984
1985         /* Install a fence for tiled scan-out. Pre-i965 always needs a
1986          * fence, whereas 965+ only requires a fence if using
1987          * framebuffer compression.  For simplicity, we always install
1988          * a fence as the cost is not that onerous.
1989          */
1990         ret = i915_gem_object_get_fence(obj);
1991         if (ret)
1992                 goto err_unpin;
1993
1994         i915_gem_object_pin_fence(obj);
1995
1996         dev_priv->mm.interruptible = true;
1997         return 0;
1998
1999 err_unpin:
2000         i915_gem_object_unpin_from_display_plane(obj);
2001 err_interruptible:
2002         dev_priv->mm.interruptible = true;
2003         return ret;
2004 }
2005
2006 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2007 {
2008         i915_gem_object_unpin_fence(obj);
2009         i915_gem_object_unpin_from_display_plane(obj);
2010 }
2011
2012 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2013  * is assumed to be a power-of-two. */
2014 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2015                                              unsigned int tiling_mode,
2016                                              unsigned int cpp,
2017                                              unsigned int pitch)
2018 {
2019         if (tiling_mode != I915_TILING_NONE) {
2020                 unsigned int tile_rows, tiles;
2021
2022                 tile_rows = *y / 8;
2023                 *y %= 8;
2024
2025                 tiles = *x / (512/cpp);
2026                 *x %= 512/cpp;
2027
2028                 return tile_rows * pitch * 8 + tiles * 4096;
2029         } else {
2030                 unsigned int offset;
2031
2032                 offset = *y * pitch + *x * cpp;
2033                 *y = 0;
2034                 *x = (offset & 4095) / cpp;
2035                 return offset & -4096;
2036         }
2037 }
2038
2039 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2040                              int x, int y)
2041 {
2042         struct drm_device *dev = crtc->dev;
2043         struct drm_i915_private *dev_priv = dev->dev_private;
2044         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2045         struct intel_framebuffer *intel_fb;
2046         struct drm_i915_gem_object *obj;
2047         int plane = intel_crtc->plane;
2048         unsigned long linear_offset;
2049         u32 dspcntr;
2050         u32 reg;
2051
2052         switch (plane) {
2053         case 0:
2054         case 1:
2055                 break;
2056         default:
2057                 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2058                 return -EINVAL;
2059         }
2060
2061         intel_fb = to_intel_framebuffer(fb);
2062         obj = intel_fb->obj;
2063
2064         reg = DSPCNTR(plane);
2065         dspcntr = I915_READ(reg);
2066         /* Mask out pixel format bits in case we change it */
2067         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2068         switch (fb->pixel_format) {
2069         case DRM_FORMAT_C8:
2070                 dspcntr |= DISPPLANE_8BPP;
2071                 break;
2072         case DRM_FORMAT_XRGB1555:
2073         case DRM_FORMAT_ARGB1555:
2074                 dspcntr |= DISPPLANE_BGRX555;
2075                 break;
2076         case DRM_FORMAT_RGB565:
2077                 dspcntr |= DISPPLANE_BGRX565;
2078                 break;
2079         case DRM_FORMAT_XRGB8888:
2080         case DRM_FORMAT_ARGB8888:
2081                 dspcntr |= DISPPLANE_BGRX888;
2082                 break;
2083         case DRM_FORMAT_XBGR8888:
2084         case DRM_FORMAT_ABGR8888:
2085                 dspcntr |= DISPPLANE_RGBX888;
2086                 break;
2087         case DRM_FORMAT_XRGB2101010:
2088         case DRM_FORMAT_ARGB2101010:
2089                 dspcntr |= DISPPLANE_BGRX101010;
2090                 break;
2091         case DRM_FORMAT_XBGR2101010:
2092         case DRM_FORMAT_ABGR2101010:
2093                 dspcntr |= DISPPLANE_RGBX101010;
2094                 break;
2095         default:
2096                 BUG();
2097         }
2098
2099         if (INTEL_INFO(dev)->gen >= 4) {
2100                 if (obj->tiling_mode != I915_TILING_NONE)
2101                         dspcntr |= DISPPLANE_TILED;
2102                 else
2103                         dspcntr &= ~DISPPLANE_TILED;
2104         }
2105
2106         if (IS_G4X(dev))
2107                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2108
2109         I915_WRITE(reg, dspcntr);
2110
2111         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2112
2113         if (INTEL_INFO(dev)->gen >= 4) {
2114                 intel_crtc->dspaddr_offset =
2115                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2116                                                        fb->bits_per_pixel / 8,
2117                                                        fb->pitches[0]);
2118                 linear_offset -= intel_crtc->dspaddr_offset;
2119         } else {
2120                 intel_crtc->dspaddr_offset = linear_offset;
2121         }
2122
2123         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2124                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2125                       fb->pitches[0]);
2126         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2127         if (INTEL_INFO(dev)->gen >= 4) {
2128                 I915_WRITE(DSPSURF(plane),
2129                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2130                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2131                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2132         } else
2133                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2134         POSTING_READ(reg);
2135
2136         return 0;
2137 }
2138
2139 static int ironlake_update_plane(struct drm_crtc *crtc,
2140                                  struct drm_framebuffer *fb, int x, int y)
2141 {
2142         struct drm_device *dev = crtc->dev;
2143         struct drm_i915_private *dev_priv = dev->dev_private;
2144         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2145         struct intel_framebuffer *intel_fb;
2146         struct drm_i915_gem_object *obj;
2147         int plane = intel_crtc->plane;
2148         unsigned long linear_offset;
2149         u32 dspcntr;
2150         u32 reg;
2151
2152         switch (plane) {
2153         case 0:
2154         case 1:
2155         case 2:
2156                 break;
2157         default:
2158                 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2159                 return -EINVAL;
2160         }
2161
2162         intel_fb = to_intel_framebuffer(fb);
2163         obj = intel_fb->obj;
2164
2165         reg = DSPCNTR(plane);
2166         dspcntr = I915_READ(reg);
2167         /* Mask out pixel format bits in case we change it */
2168         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2169         switch (fb->pixel_format) {
2170         case DRM_FORMAT_C8:
2171                 dspcntr |= DISPPLANE_8BPP;
2172                 break;
2173         case DRM_FORMAT_RGB565:
2174                 dspcntr |= DISPPLANE_BGRX565;
2175                 break;
2176         case DRM_FORMAT_XRGB8888:
2177         case DRM_FORMAT_ARGB8888:
2178                 dspcntr |= DISPPLANE_BGRX888;
2179                 break;
2180         case DRM_FORMAT_XBGR8888:
2181         case DRM_FORMAT_ABGR8888:
2182                 dspcntr |= DISPPLANE_RGBX888;
2183                 break;
2184         case DRM_FORMAT_XRGB2101010:
2185         case DRM_FORMAT_ARGB2101010:
2186                 dspcntr |= DISPPLANE_BGRX101010;
2187                 break;
2188         case DRM_FORMAT_XBGR2101010:
2189         case DRM_FORMAT_ABGR2101010:
2190                 dspcntr |= DISPPLANE_RGBX101010;
2191                 break;
2192         default:
2193                 BUG();
2194         }
2195
2196         if (obj->tiling_mode != I915_TILING_NONE)
2197                 dspcntr |= DISPPLANE_TILED;
2198         else
2199                 dspcntr &= ~DISPPLANE_TILED;
2200
2201         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2202                 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2203         else
2204                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2205
2206         I915_WRITE(reg, dspcntr);
2207
2208         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2209         intel_crtc->dspaddr_offset =
2210                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2211                                                fb->bits_per_pixel / 8,
2212                                                fb->pitches[0]);
2213         linear_offset -= intel_crtc->dspaddr_offset;
2214
2215         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2216                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2217                       fb->pitches[0]);
2218         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2219         I915_WRITE(DSPSURF(plane),
2220                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2221         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2222                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2223         } else {
2224                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2225                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2226         }
2227         POSTING_READ(reg);
2228
2229         return 0;
2230 }
2231
2232 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2233 static int
2234 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2235                            int x, int y, enum mode_set_atomic state)
2236 {
2237         struct drm_device *dev = crtc->dev;
2238         struct drm_i915_private *dev_priv = dev->dev_private;
2239
2240         if (dev_priv->display.disable_fbc)
2241                 dev_priv->display.disable_fbc(dev);
2242         intel_increase_pllclock(crtc);
2243
2244         return dev_priv->display.update_plane(crtc, fb, x, y);
2245 }
2246
2247 void intel_display_handle_reset(struct drm_device *dev)
2248 {
2249         struct drm_i915_private *dev_priv = dev->dev_private;
2250         struct drm_crtc *crtc;
2251
2252         /*
2253          * Flips in the rings have been nuked by the reset,
2254          * so complete all pending flips so that user space
2255          * will get its events and not get stuck.
2256          *
2257          * Also update the base address of all primary
2258          * planes to the the last fb to make sure we're
2259          * showing the correct fb after a reset.
2260          *
2261          * Need to make two loops over the crtcs so that we
2262          * don't try to grab a crtc mutex before the
2263          * pending_flip_queue really got woken up.
2264          */
2265
2266         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2267                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2268                 enum plane plane = intel_crtc->plane;
2269
2270                 intel_prepare_page_flip(dev, plane);
2271                 intel_finish_page_flip_plane(dev, plane);
2272         }
2273
2274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2275                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2276
2277                 mutex_lock(&crtc->mutex);
2278                 /*
2279                  * FIXME: Once we have proper support for primary planes (and
2280                  * disabling them without disabling the entire crtc) allow again
2281                  * a NULL crtc->fb.
2282                  */
2283                 if (intel_crtc->active && crtc->fb)
2284                         dev_priv->display.update_plane(crtc, crtc->fb,
2285                                                        crtc->x, crtc->y);
2286                 mutex_unlock(&crtc->mutex);
2287         }
2288 }
2289
2290 static int
2291 intel_finish_fb(struct drm_framebuffer *old_fb)
2292 {
2293         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2294         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2295         bool was_interruptible = dev_priv->mm.interruptible;
2296         int ret;
2297
2298         /* Big Hammer, we also need to ensure that any pending
2299          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2300          * current scanout is retired before unpinning the old
2301          * framebuffer.
2302          *
2303          * This should only fail upon a hung GPU, in which case we
2304          * can safely continue.
2305          */
2306         dev_priv->mm.interruptible = false;
2307         ret = i915_gem_object_finish_gpu(obj);
2308         dev_priv->mm.interruptible = was_interruptible;
2309
2310         return ret;
2311 }
2312
2313 static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2314 {
2315         struct drm_device *dev = crtc->dev;
2316         struct drm_i915_master_private *master_priv;
2317         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2318
2319         if (!dev->primary->master)
2320                 return;
2321
2322         master_priv = dev->primary->master->driver_priv;
2323         if (!master_priv->sarea_priv)
2324                 return;
2325
2326         switch (intel_crtc->pipe) {
2327         case 0:
2328                 master_priv->sarea_priv->pipeA_x = x;
2329                 master_priv->sarea_priv->pipeA_y = y;
2330                 break;
2331         case 1:
2332                 master_priv->sarea_priv->pipeB_x = x;
2333                 master_priv->sarea_priv->pipeB_y = y;
2334                 break;
2335         default:
2336                 break;
2337         }
2338 }
2339
2340 static int
2341 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2342                     struct drm_framebuffer *fb)
2343 {
2344         struct drm_device *dev = crtc->dev;
2345         struct drm_i915_private *dev_priv = dev->dev_private;
2346         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2347         struct drm_framebuffer *old_fb;
2348         int ret;
2349
2350         /* no fb bound */
2351         if (!fb) {
2352                 DRM_ERROR("No FB bound\n");
2353                 return 0;
2354         }
2355
2356         if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2357                 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2358                           plane_name(intel_crtc->plane),
2359                           INTEL_INFO(dev)->num_pipes);
2360                 return -EINVAL;
2361         }
2362
2363         mutex_lock(&dev->struct_mutex);
2364         ret = intel_pin_and_fence_fb_obj(dev,
2365                                          to_intel_framebuffer(fb)->obj,
2366                                          NULL);
2367         if (ret != 0) {
2368                 mutex_unlock(&dev->struct_mutex);
2369                 DRM_ERROR("pin & fence failed\n");
2370                 return ret;
2371         }
2372
2373         /*
2374          * Update pipe size and adjust fitter if needed: the reason for this is
2375          * that in compute_mode_changes we check the native mode (not the pfit
2376          * mode) to see if we can flip rather than do a full mode set. In the
2377          * fastboot case, we'll flip, but if we don't update the pipesrc and
2378          * pfit state, we'll end up with a big fb scanned out into the wrong
2379          * sized surface.
2380          *
2381          * To fix this properly, we need to hoist the checks up into
2382          * compute_mode_changes (or above), check the actual pfit state and
2383          * whether the platform allows pfit disable with pipe active, and only
2384          * then update the pipesrc and pfit state, even on the flip path.
2385          */
2386         if (i915_fastboot) {
2387                 const struct drm_display_mode *adjusted_mode =
2388                         &intel_crtc->config.adjusted_mode;
2389
2390                 I915_WRITE(PIPESRC(intel_crtc->pipe),
2391                            ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2392                            (adjusted_mode->crtc_vdisplay - 1));
2393                 if (!intel_crtc->config.pch_pfit.enabled &&
2394                     (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2395                      intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2396                         I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2397                         I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2398                         I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2399                 }
2400                 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2401                 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2402         }
2403
2404         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2405         if (ret) {
2406                 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2407                 mutex_unlock(&dev->struct_mutex);
2408                 DRM_ERROR("failed to update base address\n");
2409                 return ret;
2410         }
2411
2412         old_fb = crtc->fb;
2413         crtc->fb = fb;
2414         crtc->x = x;
2415         crtc->y = y;
2416
2417         if (old_fb) {
2418                 if (intel_crtc->active && old_fb != fb)
2419                         intel_wait_for_vblank(dev, intel_crtc->pipe);
2420                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2421         }
2422
2423         intel_update_fbc(dev);
2424         intel_edp_psr_update(dev);
2425         mutex_unlock(&dev->struct_mutex);
2426
2427         intel_crtc_update_sarea_pos(crtc, x, y);
2428
2429         return 0;
2430 }
2431
2432 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2433 {
2434         struct drm_device *dev = crtc->dev;
2435         struct drm_i915_private *dev_priv = dev->dev_private;
2436         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2437         int pipe = intel_crtc->pipe;
2438         u32 reg, temp;
2439
2440         /* enable normal train */
2441         reg = FDI_TX_CTL(pipe);
2442         temp = I915_READ(reg);
2443         if (IS_IVYBRIDGE(dev)) {
2444                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2445                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2446         } else {
2447                 temp &= ~FDI_LINK_TRAIN_NONE;
2448                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2449         }
2450         I915_WRITE(reg, temp);
2451
2452         reg = FDI_RX_CTL(pipe);
2453         temp = I915_READ(reg);
2454         if (HAS_PCH_CPT(dev)) {
2455                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2456                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2457         } else {
2458                 temp &= ~FDI_LINK_TRAIN_NONE;
2459                 temp |= FDI_LINK_TRAIN_NONE;
2460         }
2461         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2462
2463         /* wait one idle pattern time */
2464         POSTING_READ(reg);
2465         udelay(1000);
2466
2467         /* IVB wants error correction enabled */
2468         if (IS_IVYBRIDGE(dev))
2469                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2470                            FDI_FE_ERRC_ENABLE);
2471 }
2472
2473 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2474 {
2475         return crtc->base.enabled && crtc->active &&
2476                 crtc->config.has_pch_encoder;
2477 }
2478
2479 static void ivb_modeset_global_resources(struct drm_device *dev)
2480 {
2481         struct drm_i915_private *dev_priv = dev->dev_private;
2482         struct intel_crtc *pipe_B_crtc =
2483                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2484         struct intel_crtc *pipe_C_crtc =
2485                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2486         uint32_t temp;
2487
2488         /*
2489          * When everything is off disable fdi C so that we could enable fdi B
2490          * with all lanes. Note that we don't care about enabled pipes without
2491          * an enabled pch encoder.
2492          */
2493         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2494             !pipe_has_enabled_pch(pipe_C_crtc)) {
2495                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2496                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2497
2498                 temp = I915_READ(SOUTH_CHICKEN1);
2499                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2500                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2501                 I915_WRITE(SOUTH_CHICKEN1, temp);
2502         }
2503 }
2504
2505 /* The FDI link training functions for ILK/Ibexpeak. */
2506 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2507 {
2508         struct drm_device *dev = crtc->dev;
2509         struct drm_i915_private *dev_priv = dev->dev_private;
2510         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2511         int pipe = intel_crtc->pipe;
2512         int plane = intel_crtc->plane;
2513         u32 reg, temp, tries;
2514
2515         /* FDI needs bits from pipe & plane first */
2516         assert_pipe_enabled(dev_priv, pipe);
2517         assert_plane_enabled(dev_priv, plane);
2518
2519         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2520            for train result */
2521         reg = FDI_RX_IMR(pipe);
2522         temp = I915_READ(reg);
2523         temp &= ~FDI_RX_SYMBOL_LOCK;
2524         temp &= ~FDI_RX_BIT_LOCK;
2525         I915_WRITE(reg, temp);
2526         I915_READ(reg);
2527         udelay(150);
2528
2529         /* enable CPU FDI TX and PCH FDI RX */
2530         reg = FDI_TX_CTL(pipe);
2531         temp = I915_READ(reg);
2532         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2533         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2534         temp &= ~FDI_LINK_TRAIN_NONE;
2535         temp |= FDI_LINK_TRAIN_PATTERN_1;
2536         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2537
2538         reg = FDI_RX_CTL(pipe);
2539         temp = I915_READ(reg);
2540         temp &= ~FDI_LINK_TRAIN_NONE;
2541         temp |= FDI_LINK_TRAIN_PATTERN_1;
2542         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2543
2544         POSTING_READ(reg);
2545         udelay(150);
2546
2547         /* Ironlake workaround, enable clock pointer after FDI enable*/
2548         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2549         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2550                    FDI_RX_PHASE_SYNC_POINTER_EN);
2551
2552         reg = FDI_RX_IIR(pipe);
2553         for (tries = 0; tries < 5; tries++) {
2554                 temp = I915_READ(reg);
2555                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2556
2557                 if ((temp & FDI_RX_BIT_LOCK)) {
2558                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2559                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2560                         break;
2561                 }
2562         }
2563         if (tries == 5)
2564                 DRM_ERROR("FDI train 1 fail!\n");
2565
2566         /* Train 2 */
2567         reg = FDI_TX_CTL(pipe);
2568         temp = I915_READ(reg);
2569         temp &= ~FDI_LINK_TRAIN_NONE;
2570         temp |= FDI_LINK_TRAIN_PATTERN_2;
2571         I915_WRITE(reg, temp);
2572
2573         reg = FDI_RX_CTL(pipe);
2574         temp = I915_READ(reg);
2575         temp &= ~FDI_LINK_TRAIN_NONE;
2576         temp |= FDI_LINK_TRAIN_PATTERN_2;
2577         I915_WRITE(reg, temp);
2578
2579         POSTING_READ(reg);
2580         udelay(150);
2581
2582         reg = FDI_RX_IIR(pipe);
2583         for (tries = 0; tries < 5; tries++) {
2584                 temp = I915_READ(reg);
2585                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2586
2587                 if (temp & FDI_RX_SYMBOL_LOCK) {
2588                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2589                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2590                         break;
2591                 }
2592         }
2593         if (tries == 5)
2594                 DRM_ERROR("FDI train 2 fail!\n");
2595
2596         DRM_DEBUG_KMS("FDI train done\n");
2597
2598 }
2599
2600 static const int snb_b_fdi_train_param[] = {
2601         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2602         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2603         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2604         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2605 };
2606
2607 /* The FDI link training functions for SNB/Cougarpoint. */
2608 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2609 {
2610         struct drm_device *dev = crtc->dev;
2611         struct drm_i915_private *dev_priv = dev->dev_private;
2612         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2613         int pipe = intel_crtc->pipe;
2614         u32 reg, temp, i, retry;
2615
2616         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2617            for train result */
2618         reg = FDI_RX_IMR(pipe);
2619         temp = I915_READ(reg);
2620         temp &= ~FDI_RX_SYMBOL_LOCK;
2621         temp &= ~FDI_RX_BIT_LOCK;
2622         I915_WRITE(reg, temp);
2623
2624         POSTING_READ(reg);
2625         udelay(150);
2626
2627         /* enable CPU FDI TX and PCH FDI RX */
2628         reg = FDI_TX_CTL(pipe);
2629         temp = I915_READ(reg);
2630         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2631         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2632         temp &= ~FDI_LINK_TRAIN_NONE;
2633         temp |= FDI_LINK_TRAIN_PATTERN_1;
2634         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2635         /* SNB-B */
2636         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2637         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2638
2639         I915_WRITE(FDI_RX_MISC(pipe),
2640                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2641
2642         reg = FDI_RX_CTL(pipe);
2643         temp = I915_READ(reg);
2644         if (HAS_PCH_CPT(dev)) {
2645                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2646                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2647         } else {
2648                 temp &= ~FDI_LINK_TRAIN_NONE;
2649                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2650         }
2651         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2652
2653         POSTING_READ(reg);
2654         udelay(150);
2655
2656         for (i = 0; i < 4; i++) {
2657                 reg = FDI_TX_CTL(pipe);
2658                 temp = I915_READ(reg);
2659                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2660                 temp |= snb_b_fdi_train_param[i];
2661                 I915_WRITE(reg, temp);
2662
2663                 POSTING_READ(reg);
2664                 udelay(500);
2665
2666                 for (retry = 0; retry < 5; retry++) {
2667                         reg = FDI_RX_IIR(pipe);
2668                         temp = I915_READ(reg);
2669                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2670                         if (temp & FDI_RX_BIT_LOCK) {
2671                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2672                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2673                                 break;
2674                         }
2675                         udelay(50);
2676                 }
2677                 if (retry < 5)
2678                         break;
2679         }
2680         if (i == 4)
2681                 DRM_ERROR("FDI train 1 fail!\n");
2682
2683         /* Train 2 */
2684         reg = FDI_TX_CTL(pipe);
2685         temp = I915_READ(reg);
2686         temp &= ~FDI_LINK_TRAIN_NONE;
2687         temp |= FDI_LINK_TRAIN_PATTERN_2;
2688         if (IS_GEN6(dev)) {
2689                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2690                 /* SNB-B */
2691                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2692         }
2693         I915_WRITE(reg, temp);
2694
2695         reg = FDI_RX_CTL(pipe);
2696         temp = I915_READ(reg);
2697         if (HAS_PCH_CPT(dev)) {
2698                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2699                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2700         } else {
2701                 temp &= ~FDI_LINK_TRAIN_NONE;
2702                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2703         }
2704         I915_WRITE(reg, temp);
2705
2706         POSTING_READ(reg);
2707         udelay(150);
2708
2709         for (i = 0; i < 4; i++) {
2710                 reg = FDI_TX_CTL(pipe);
2711                 temp = I915_READ(reg);
2712                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2713                 temp |= snb_b_fdi_train_param[i];
2714                 I915_WRITE(reg, temp);
2715
2716                 POSTING_READ(reg);
2717                 udelay(500);
2718
2719                 for (retry = 0; retry < 5; retry++) {
2720                         reg = FDI_RX_IIR(pipe);
2721                         temp = I915_READ(reg);
2722                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2723                         if (temp & FDI_RX_SYMBOL_LOCK) {
2724                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2725                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2726                                 break;
2727                         }
2728                         udelay(50);
2729                 }
2730                 if (retry < 5)
2731                         break;
2732         }
2733         if (i == 4)
2734                 DRM_ERROR("FDI train 2 fail!\n");
2735
2736         DRM_DEBUG_KMS("FDI train done.\n");
2737 }
2738
2739 /* Manual link training for Ivy Bridge A0 parts */
2740 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2741 {
2742         struct drm_device *dev = crtc->dev;
2743         struct drm_i915_private *dev_priv = dev->dev_private;
2744         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2745         int pipe = intel_crtc->pipe;
2746         u32 reg, temp, i, j;
2747
2748         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2749            for train result */
2750         reg = FDI_RX_IMR(pipe);
2751         temp = I915_READ(reg);
2752         temp &= ~FDI_RX_SYMBOL_LOCK;
2753         temp &= ~FDI_RX_BIT_LOCK;
2754         I915_WRITE(reg, temp);
2755
2756         POSTING_READ(reg);
2757         udelay(150);
2758
2759         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2760                       I915_READ(FDI_RX_IIR(pipe)));
2761
2762         /* Try each vswing and preemphasis setting twice before moving on */
2763         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2764                 /* disable first in case we need to retry */
2765                 reg = FDI_TX_CTL(pipe);
2766                 temp = I915_READ(reg);
2767                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2768                 temp &= ~FDI_TX_ENABLE;
2769                 I915_WRITE(reg, temp);
2770
2771                 reg = FDI_RX_CTL(pipe);
2772                 temp = I915_READ(reg);
2773                 temp &= ~FDI_LINK_TRAIN_AUTO;
2774                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2775                 temp &= ~FDI_RX_ENABLE;
2776                 I915_WRITE(reg, temp);
2777
2778                 /* enable CPU FDI TX and PCH FDI RX */
2779                 reg = FDI_TX_CTL(pipe);
2780                 temp = I915_READ(reg);
2781                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2782                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2783                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2784                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2785                 temp |= snb_b_fdi_train_param[j/2];
2786                 temp |= FDI_COMPOSITE_SYNC;
2787                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2788
2789                 I915_WRITE(FDI_RX_MISC(pipe),
2790                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2791
2792                 reg = FDI_RX_CTL(pipe);
2793                 temp = I915_READ(reg);
2794                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2795                 temp |= FDI_COMPOSITE_SYNC;
2796                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2797
2798                 POSTING_READ(reg);
2799                 udelay(1); /* should be 0.5us */
2800
2801                 for (i = 0; i < 4; i++) {
2802                         reg = FDI_RX_IIR(pipe);
2803                         temp = I915_READ(reg);
2804                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2805
2806                         if (temp & FDI_RX_BIT_LOCK ||
2807                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2808                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2809                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2810                                               i);
2811                                 break;
2812                         }
2813                         udelay(1); /* should be 0.5us */
2814                 }
2815                 if (i == 4) {
2816                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2817                         continue;
2818                 }
2819
2820                 /* Train 2 */
2821                 reg = FDI_TX_CTL(pipe);
2822                 temp = I915_READ(reg);
2823                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2824                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2825                 I915_WRITE(reg, temp);
2826
2827                 reg = FDI_RX_CTL(pipe);
2828                 temp = I915_READ(reg);
2829                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2830                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2831                 I915_WRITE(reg, temp);
2832
2833                 POSTING_READ(reg);
2834                 udelay(2); /* should be 1.5us */
2835
2836                 for (i = 0; i < 4; i++) {
2837                         reg = FDI_RX_IIR(pipe);
2838                         temp = I915_READ(reg);
2839                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2840
2841                         if (temp & FDI_RX_SYMBOL_LOCK ||
2842                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2843                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2844                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2845                                               i);
2846                                 goto train_done;
2847                         }
2848                         udelay(2); /* should be 1.5us */
2849                 }
2850                 if (i == 4)
2851                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2852         }
2853
2854 train_done:
2855         DRM_DEBUG_KMS("FDI train done.\n");
2856 }
2857
2858 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2859 {
2860         struct drm_device *dev = intel_crtc->base.dev;
2861         struct drm_i915_private *dev_priv = dev->dev_private;
2862         int pipe = intel_crtc->pipe;
2863         u32 reg, temp;
2864
2865
2866         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2867         reg = FDI_RX_CTL(pipe);
2868         temp = I915_READ(reg);
2869         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2870         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2871         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2872         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2873
2874         POSTING_READ(reg);
2875         udelay(200);
2876
2877         /* Switch from Rawclk to PCDclk */
2878         temp = I915_READ(reg);
2879         I915_WRITE(reg, temp | FDI_PCDCLK);
2880
2881         POSTING_READ(reg);
2882         udelay(200);
2883
2884         /* Enable CPU FDI TX PLL, always on for Ironlake */
2885         reg = FDI_TX_CTL(pipe);
2886         temp = I915_READ(reg);
2887         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2888                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2889
2890                 POSTING_READ(reg);
2891                 udelay(100);
2892         }
2893 }
2894
2895 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2896 {
2897         struct drm_device *dev = intel_crtc->base.dev;
2898         struct drm_i915_private *dev_priv = dev->dev_private;
2899         int pipe = intel_crtc->pipe;
2900         u32 reg, temp;
2901
2902         /* Switch from PCDclk to Rawclk */
2903         reg = FDI_RX_CTL(pipe);
2904         temp = I915_READ(reg);
2905         I915_WRITE(reg, temp & ~FDI_PCDCLK);
2906
2907         /* Disable CPU FDI TX PLL */
2908         reg = FDI_TX_CTL(pipe);
2909         temp = I915_READ(reg);
2910         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2911
2912         POSTING_READ(reg);
2913         udelay(100);
2914
2915         reg = FDI_RX_CTL(pipe);
2916         temp = I915_READ(reg);
2917         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2918
2919         /* Wait for the clocks to turn off. */
2920         POSTING_READ(reg);
2921         udelay(100);
2922 }
2923
2924 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2925 {
2926         struct drm_device *dev = crtc->dev;
2927         struct drm_i915_private *dev_priv = dev->dev_private;
2928         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2929         int pipe = intel_crtc->pipe;
2930         u32 reg, temp;
2931
2932         /* disable CPU FDI tx and PCH FDI rx */
2933         reg = FDI_TX_CTL(pipe);
2934         temp = I915_READ(reg);
2935         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2936         POSTING_READ(reg);
2937
2938         reg = FDI_RX_CTL(pipe);
2939         temp = I915_READ(reg);
2940         temp &= ~(0x7 << 16);
2941         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2942         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2943
2944         POSTING_READ(reg);
2945         udelay(100);
2946
2947         /* Ironlake workaround, disable clock pointer after downing FDI */
2948         if (HAS_PCH_IBX(dev)) {
2949                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2950         }
2951
2952         /* still set train pattern 1 */
2953         reg = FDI_TX_CTL(pipe);
2954         temp = I915_READ(reg);
2955         temp &= ~FDI_LINK_TRAIN_NONE;
2956         temp |= FDI_LINK_TRAIN_PATTERN_1;
2957         I915_WRITE(reg, temp);
2958
2959         reg = FDI_RX_CTL(pipe);
2960         temp = I915_READ(reg);
2961         if (HAS_PCH_CPT(dev)) {
2962                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2963                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2964         } else {
2965                 temp &= ~FDI_LINK_TRAIN_NONE;
2966                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2967         }
2968         /* BPC in FDI rx is consistent with that in PIPECONF */
2969         temp &= ~(0x07 << 16);
2970         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2971         I915_WRITE(reg, temp);
2972
2973         POSTING_READ(reg);
2974         udelay(100);
2975 }
2976
2977 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2978 {
2979         struct drm_device *dev = crtc->dev;
2980         struct drm_i915_private *dev_priv = dev->dev_private;
2981         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2982         unsigned long flags;
2983         bool pending;
2984
2985         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2986             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2987                 return false;
2988
2989         spin_lock_irqsave(&dev->event_lock, flags);
2990         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2991         spin_unlock_irqrestore(&dev->event_lock, flags);
2992
2993         return pending;
2994 }
2995
2996 bool intel_has_pending_fb_unpin(struct drm_device *dev)
2997 {
2998         struct intel_crtc *crtc;
2999
3000         /* Note that we don't need to be called with mode_config.lock here
3001          * as our list of CRTC objects is static for the lifetime of the
3002          * device and so cannot disappear as we iterate. Similarly, we can
3003          * happily treat the predicates as racy, atomic checks as userspace
3004          * cannot claim and pin a new fb without at least acquring the
3005          * struct_mutex and so serialising with us.
3006          */
3007         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3008                 if (atomic_read(&crtc->unpin_work_count) == 0)
3009                         continue;
3010
3011                 if (crtc->unpin_work)
3012                         intel_wait_for_vblank(dev, crtc->pipe);
3013
3014                 return true;
3015         }
3016
3017         return false;
3018 }
3019
3020 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3021 {
3022         struct drm_device *dev = crtc->dev;
3023         struct drm_i915_private *dev_priv = dev->dev_private;
3024
3025         if (crtc->fb == NULL)
3026                 return;
3027
3028         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3029
3030         wait_event(dev_priv->pending_flip_queue,
3031                    !intel_crtc_has_pending_flip(crtc));
3032
3033         mutex_lock(&dev->struct_mutex);
3034         intel_finish_fb(crtc->fb);
3035         mutex_unlock(&dev->struct_mutex);
3036 }
3037
3038 /* Program iCLKIP clock to the desired frequency */
3039 static void lpt_program_iclkip(struct drm_crtc *crtc)
3040 {
3041         struct drm_device *dev = crtc->dev;
3042         struct drm_i915_private *dev_priv = dev->dev_private;
3043         int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3044         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3045         u32 temp;
3046
3047         mutex_lock(&dev_priv->dpio_lock);
3048
3049         /* It is necessary to ungate the pixclk gate prior to programming
3050          * the divisors, and gate it back when it is done.
3051          */
3052         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3053
3054         /* Disable SSCCTL */
3055         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3056                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3057                                 SBI_SSCCTL_DISABLE,
3058                         SBI_ICLK);
3059
3060         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3061         if (clock == 20000) {
3062                 auxdiv = 1;
3063                 divsel = 0x41;
3064                 phaseinc = 0x20;
3065         } else {
3066                 /* The iCLK virtual clock root frequency is in MHz,
3067                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3068                  * divisors, it is necessary to divide one by another, so we
3069                  * convert the virtual clock precision to KHz here for higher
3070                  * precision.
3071                  */
3072                 u32 iclk_virtual_root_freq = 172800 * 1000;
3073                 u32 iclk_pi_range = 64;
3074                 u32 desired_divisor, msb_divisor_value, pi_value;
3075
3076                 desired_divisor = (iclk_virtual_root_freq / clock);
3077                 msb_divisor_value = desired_divisor / iclk_pi_range;
3078                 pi_value = desired_divisor % iclk_pi_range;
3079
3080                 auxdiv = 0;
3081                 divsel = msb_divisor_value - 2;
3082                 phaseinc = pi_value;
3083         }
3084
3085         /* This should not happen with any sane values */
3086         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3087                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3088         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3089                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3090
3091         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3092                         clock,
3093                         auxdiv,
3094                         divsel,
3095                         phasedir,
3096                         phaseinc);
3097
3098         /* Program SSCDIVINTPHASE6 */
3099         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3100         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3101         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3102         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3103         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3104         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3105         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3106         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3107
3108         /* Program SSCAUXDIV */
3109         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3110         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3111         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3112         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3113
3114         /* Enable modulator and associated divider */
3115         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3116         temp &= ~SBI_SSCCTL_DISABLE;
3117         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3118
3119         /* Wait for initialization time */
3120         udelay(24);
3121
3122         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3123
3124         mutex_unlock(&dev_priv->dpio_lock);
3125 }
3126
3127 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3128                                                 enum pipe pch_transcoder)
3129 {
3130         struct drm_device *dev = crtc->base.dev;
3131         struct drm_i915_private *dev_priv = dev->dev_private;
3132         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3133
3134         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3135                    I915_READ(HTOTAL(cpu_transcoder)));
3136         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3137                    I915_READ(HBLANK(cpu_transcoder)));
3138         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3139                    I915_READ(HSYNC(cpu_transcoder)));
3140
3141         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3142                    I915_READ(VTOTAL(cpu_transcoder)));
3143         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3144                    I915_READ(VBLANK(cpu_transcoder)));
3145         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3146                    I915_READ(VSYNC(cpu_transcoder)));
3147         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3148                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3149 }
3150
3151 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3152 {
3153         struct drm_i915_private *dev_priv = dev->dev_private;
3154         uint32_t temp;
3155
3156         temp = I915_READ(SOUTH_CHICKEN1);
3157         if (temp & FDI_BC_BIFURCATION_SELECT)
3158                 return;
3159
3160         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3161         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3162
3163         temp |= FDI_BC_BIFURCATION_SELECT;
3164         DRM_DEBUG_KMS("enabling fdi C rx\n");
3165         I915_WRITE(SOUTH_CHICKEN1, temp);
3166         POSTING_READ(SOUTH_CHICKEN1);
3167 }
3168
3169 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3170 {
3171         struct drm_device *dev = intel_crtc->base.dev;
3172         struct drm_i915_private *dev_priv = dev->dev_private;
3173
3174         switch (intel_crtc->pipe) {
3175         case PIPE_A:
3176                 break;
3177         case PIPE_B:
3178                 if (intel_crtc->config.fdi_lanes > 2)
3179                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3180                 else
3181                         cpt_enable_fdi_bc_bifurcation(dev);
3182
3183                 break;
3184         case PIPE_C:
3185                 cpt_enable_fdi_bc_bifurcation(dev);
3186
3187                 break;
3188         default:
3189                 BUG();
3190         }
3191 }
3192
3193 /*
3194  * Enable PCH resources required for PCH ports:
3195  *   - PCH PLLs
3196  *   - FDI training & RX/TX
3197  *   - update transcoder timings
3198  *   - DP transcoding bits
3199  *   - transcoder
3200  */
3201 static void ironlake_pch_enable(struct drm_crtc *crtc)
3202 {
3203         struct drm_device *dev = crtc->dev;
3204         struct drm_i915_private *dev_priv = dev->dev_private;
3205         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3206         int pipe = intel_crtc->pipe;
3207         u32 reg, temp;
3208
3209         assert_pch_transcoder_disabled(dev_priv, pipe);
3210
3211         if (IS_IVYBRIDGE(dev))
3212                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3213
3214         /* Write the TU size bits before fdi link training, so that error
3215          * detection works. */
3216         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3217                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3218
3219         /* For PCH output, training FDI link */
3220         dev_priv->display.fdi_link_train(crtc);
3221
3222         /* We need to program the right clock selection before writing the pixel
3223          * mutliplier into the DPLL. */
3224         if (HAS_PCH_CPT(dev)) {
3225                 u32 sel;
3226
3227                 temp = I915_READ(PCH_DPLL_SEL);
3228                 temp |= TRANS_DPLL_ENABLE(pipe);
3229                 sel = TRANS_DPLLB_SEL(pipe);
3230                 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3231                         temp |= sel;
3232                 else
3233                         temp &= ~sel;
3234                 I915_WRITE(PCH_DPLL_SEL, temp);
3235         }
3236
3237         /* XXX: pch pll's can be enabled any time before we enable the PCH
3238          * transcoder, and we actually should do this to not upset any PCH
3239          * transcoder that already use the clock when we share it.
3240          *
3241          * Note that enable_shared_dpll tries to do the right thing, but
3242          * get_shared_dpll unconditionally resets the pll - we need that to have
3243          * the right LVDS enable sequence. */
3244         ironlake_enable_shared_dpll(intel_crtc);
3245
3246         /* set transcoder timing, panel must allow it */
3247         assert_panel_unlocked(dev_priv, pipe);
3248         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3249
3250         intel_fdi_normal_train(crtc);
3251
3252         /* For PCH DP, enable TRANS_DP_CTL */
3253         if (HAS_PCH_CPT(dev) &&
3254             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3255              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3256                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3257                 reg = TRANS_DP_CTL(pipe);
3258                 temp = I915_READ(reg);
3259                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3260                           TRANS_DP_SYNC_MASK |
3261                           TRANS_DP_BPC_MASK);
3262                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3263                          TRANS_DP_ENH_FRAMING);
3264                 temp |= bpc << 9; /* same format but at 11:9 */
3265
3266                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3267                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3268                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3269                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3270
3271                 switch (intel_trans_dp_port_sel(crtc)) {
3272                 case PCH_DP_B:
3273                         temp |= TRANS_DP_PORT_SEL_B;
3274                         break;
3275                 case PCH_DP_C:
3276                         temp |= TRANS_DP_PORT_SEL_C;
3277                         break;
3278                 case PCH_DP_D:
3279                         temp |= TRANS_DP_PORT_SEL_D;
3280                         break;
3281                 default:
3282                         BUG();
3283                 }
3284
3285                 I915_WRITE(reg, temp);
3286         }
3287
3288         ironlake_enable_pch_transcoder(dev_priv, pipe);
3289 }
3290
3291 static void lpt_pch_enable(struct drm_crtc *crtc)
3292 {
3293         struct drm_device *dev = crtc->dev;
3294         struct drm_i915_private *dev_priv = dev->dev_private;
3295         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3296         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3297
3298         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3299
3300         lpt_program_iclkip(crtc);
3301
3302         /* Set transcoder timing. */
3303         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3304
3305         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3306 }
3307
3308 static void intel_put_shared_dpll(struct intel_crtc *crtc)
3309 {
3310         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3311
3312         if (pll == NULL)
3313                 return;
3314
3315         if (pll->refcount == 0) {
3316                 WARN(1, "bad %s refcount\n", pll->name);
3317                 return;
3318         }
3319
3320         if (--pll->refcount == 0) {
3321                 WARN_ON(pll->on);
3322                 WARN_ON(pll->active);
3323         }
3324
3325         crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3326 }
3327
3328 static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3329 {
3330         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3331         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3332         enum intel_dpll_id i;
3333
3334         if (pll) {
3335                 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3336                               crtc->base.base.id, pll->name);
3337                 intel_put_shared_dpll(crtc);
3338         }
3339
3340         if (HAS_PCH_IBX(dev_priv->dev)) {
3341                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3342                 i = (enum intel_dpll_id) crtc->pipe;
3343                 pll = &dev_priv->shared_dplls[i];
3344
3345                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3346                               crtc->base.base.id, pll->name);
3347
3348                 goto found;
3349         }
3350
3351         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3352                 pll = &dev_priv->shared_dplls[i];
3353
3354                 /* Only want to check enabled timings first */
3355                 if (pll->refcount == 0)
3356                         continue;
3357
3358                 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3359                            sizeof(pll->hw_state)) == 0) {
3360                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3361                                       crtc->base.base.id,
3362                                       pll->name, pll->refcount, pll->active);
3363
3364                         goto found;
3365                 }
3366         }
3367
3368         /* Ok no matching timings, maybe there's a free one? */
3369         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3370                 pll = &dev_priv->shared_dplls[i];
3371                 if (pll->refcount == 0) {
3372                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3373                                       crtc->base.base.id, pll->name);
3374                         goto found;
3375                 }
3376         }
3377
3378         return NULL;
3379
3380 found:
3381         crtc->config.shared_dpll = i;
3382         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3383                          pipe_name(crtc->pipe));
3384
3385         if (pll->active == 0) {
3386                 memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3387                        sizeof(pll->hw_state));
3388
3389                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3390                 WARN_ON(pll->on);
3391                 assert_shared_dpll_disabled(dev_priv, pll);
3392
3393                 pll->mode_set(dev_priv, pll);
3394         }
3395         pll->refcount++;
3396
3397         return pll;
3398 }
3399
3400 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3401 {
3402         struct drm_i915_private *dev_priv = dev->dev_private;
3403         int dslreg = PIPEDSL(pipe);
3404         u32 temp;
3405
3406         temp = I915_READ(dslreg);
3407         udelay(500);
3408         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3409                 if (wait_for(I915_READ(dslreg) != temp, 5))
3410                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3411         }
3412 }
3413
3414 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3415 {
3416         struct drm_device *dev = crtc->base.dev;
3417         struct drm_i915_private *dev_priv = dev->dev_private;
3418         int pipe = crtc->pipe;
3419
3420         if (crtc->config.pch_pfit.enabled) {
3421                 /* Force use of hard-coded filter coefficients
3422                  * as some pre-programmed values are broken,
3423                  * e.g. x201.
3424                  */
3425                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3426                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3427                                                  PF_PIPE_SEL_IVB(pipe));
3428                 else
3429                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3430                 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3431                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3432         }
3433 }
3434
3435 static void intel_enable_planes(struct drm_crtc *crtc)
3436 {
3437         struct drm_device *dev = crtc->dev;
3438         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3439         struct intel_plane *intel_plane;
3440
3441         list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3442                 if (intel_plane->pipe == pipe)
3443                         intel_plane_restore(&intel_plane->base);
3444 }
3445
3446 static void intel_disable_planes(struct drm_crtc *crtc)
3447 {
3448         struct drm_device *dev = crtc->dev;
3449         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3450         struct intel_plane *intel_plane;
3451
3452         list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3453                 if (intel_plane->pipe == pipe)
3454                         intel_plane_disable(&intel_plane->base);
3455 }
3456
3457 void hsw_enable_ips(struct intel_crtc *crtc)
3458 {
3459         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3460
3461         if (!crtc->config.ips_enabled)
3462                 return;
3463
3464         /* We can only enable IPS after we enable a plane and wait for a vblank.
3465          * We guarantee that the plane is enabled by calling intel_enable_ips
3466          * only after intel_enable_plane. And intel_enable_plane already waits
3467          * for a vblank, so all we need to do here is to enable the IPS bit. */
3468         assert_plane_enabled(dev_priv, crtc->plane);
3469         if (IS_BROADWELL(crtc->base.dev)) {
3470                 mutex_lock(&dev_priv->rps.hw_lock);
3471                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3472                 mutex_unlock(&dev_priv->rps.hw_lock);
3473                 /* Quoting Art Runyan: "its not safe to expect any particular
3474                  * value in IPS_CTL bit 31 after enabling IPS through the
3475                  * mailbox." Moreover, the mailbox may return a bogus state,
3476                  * so we need to just enable it and continue on.
3477                  */
3478         } else {
3479                 I915_WRITE(IPS_CTL, IPS_ENABLE);
3480                 /* The bit only becomes 1 in the next vblank, so this wait here
3481                  * is essentially intel_wait_for_vblank. If we don't have this
3482                  * and don't wait for vblanks until the end of crtc_enable, then
3483                  * the HW state readout code will complain that the expected
3484                  * IPS_CTL value is not the one we read. */
3485                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3486                         DRM_ERROR("Timed out waiting for IPS enable\n");
3487         }
3488 }
3489
3490 void hsw_disable_ips(struct intel_crtc *crtc)
3491 {
3492         struct drm_device *dev = crtc->base.dev;
3493         struct drm_i915_private *dev_priv = dev->dev_private;
3494
3495         if (!crtc->config.ips_enabled)
3496                 return;
3497
3498         assert_plane_enabled(dev_priv, crtc->plane);
3499         if (IS_BROADWELL(crtc->base.dev)) {
3500                 mutex_lock(&dev_priv->rps.hw_lock);
3501                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3502                 mutex_unlock(&dev_priv->rps.hw_lock);
3503         } else {
3504                 I915_WRITE(IPS_CTL, 0);
3505                 POSTING_READ(IPS_CTL);
3506         }
3507
3508         /* We need to wait for a vblank before we can disable the plane. */
3509         intel_wait_for_vblank(dev, crtc->pipe);
3510 }
3511
3512 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3513 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3514 {
3515         struct drm_device *dev = crtc->dev;
3516         struct drm_i915_private *dev_priv = dev->dev_private;
3517         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3518         enum pipe pipe = intel_crtc->pipe;
3519         int palreg = PALETTE(pipe);
3520         int i;
3521         bool reenable_ips = false;
3522
3523         /* The clocks have to be on to load the palette. */
3524         if (!crtc->enabled || !intel_crtc->active)
3525                 return;
3526
3527         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3528                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3529                         assert_dsi_pll_enabled(dev_priv);
3530                 else
3531                         assert_pll_enabled(dev_priv, pipe);
3532         }
3533
3534         /* use legacy palette for Ironlake */
3535         if (HAS_PCH_SPLIT(dev))
3536                 palreg = LGC_PALETTE(pipe);
3537
3538         /* Workaround : Do not read or write the pipe palette/gamma data while
3539          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3540          */
3541         if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3542             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3543              GAMMA_MODE_MODE_SPLIT)) {
3544                 hsw_disable_ips(intel_crtc);
3545                 reenable_ips = true;
3546         }
3547
3548         for (i = 0; i < 256; i++) {
3549                 I915_WRITE(palreg + 4 * i,
3550                            (intel_crtc->lut_r[i] << 16) |
3551                            (intel_crtc->lut_g[i] << 8) |
3552                            intel_crtc->lut_b[i]);
3553         }
3554
3555         if (reenable_ips)
3556                 hsw_enable_ips(intel_crtc);
3557 }
3558
3559 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3560 {
3561         struct drm_device *dev = crtc->dev;
3562         struct drm_i915_private *dev_priv = dev->dev_private;
3563         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3564         struct intel_encoder *encoder;
3565         int pipe = intel_crtc->pipe;
3566         int plane = intel_crtc->plane;
3567
3568         WARN_ON(!crtc->enabled);
3569
3570         if (intel_crtc->active)
3571                 return;
3572
3573         intel_crtc->active = true;
3574
3575         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3576         intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3577
3578         for_each_encoder_on_crtc(dev, crtc, encoder)
3579                 if (encoder->pre_enable)
3580                         encoder->pre_enable(encoder);
3581
3582         if (intel_crtc->config.has_pch_encoder) {
3583                 /* Note: FDI PLL enabling _must_ be done before we enable the
3584                  * cpu pipes, hence this is separate from all the other fdi/pch
3585                  * enabling. */
3586                 ironlake_fdi_pll_enable(intel_crtc);
3587         } else {
3588                 assert_fdi_tx_disabled(dev_priv, pipe);
3589                 assert_fdi_rx_disabled(dev_priv, pipe);
3590         }
3591
3592         ironlake_pfit_enable(intel_crtc);
3593
3594         /*
3595          * On ILK+ LUT must be loaded before the pipe is running but with
3596          * clocks enabled
3597          */
3598         intel_crtc_load_lut(crtc);
3599
3600         intel_update_watermarks(crtc);
3601         intel_enable_pipe(dev_priv, pipe,
3602                           intel_crtc->config.has_pch_encoder, false);
3603         intel_enable_primary_plane(dev_priv, plane, pipe);
3604         intel_enable_planes(crtc);
3605         intel_crtc_update_cursor(crtc, true);
3606
3607         if (intel_crtc->config.has_pch_encoder)
3608                 ironlake_pch_enable(crtc);
3609
3610         mutex_lock(&dev->struct_mutex);
3611         intel_update_fbc(dev);
3612         mutex_unlock(&dev->struct_mutex);
3613
3614         for_each_encoder_on_crtc(dev, crtc, encoder)
3615                 encoder->enable(encoder);
3616
3617         if (HAS_PCH_CPT(dev))
3618                 cpt_verify_modeset(dev, intel_crtc->pipe);
3619
3620         /*
3621          * There seems to be a race in PCH platform hw (at least on some
3622          * outputs) where an enabled pipe still completes any pageflip right
3623          * away (as if the pipe is off) instead of waiting for vblank. As soon
3624          * as the first vblank happend, everything works as expected. Hence just
3625          * wait for one vblank before returning to avoid strange things
3626          * happening.
3627          */
3628         intel_wait_for_vblank(dev, intel_crtc->pipe);
3629 }
3630
3631 /* IPS only exists on ULT machines and is tied to pipe A. */
3632 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3633 {
3634         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3635 }
3636
3637 static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3638 {
3639         struct drm_device *dev = crtc->dev;
3640         struct drm_i915_private *dev_priv = dev->dev_private;
3641         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3642         int pipe = intel_crtc->pipe;
3643         int plane = intel_crtc->plane;
3644
3645         intel_enable_primary_plane(dev_priv, plane, pipe);
3646         intel_enable_planes(crtc);
3647         intel_crtc_update_cursor(crtc, true);
3648
3649         hsw_enable_ips(intel_crtc);
3650
3651         mutex_lock(&dev->struct_mutex);
3652         intel_update_fbc(dev);
3653         mutex_unlock(&dev->struct_mutex);
3654 }
3655
3656 static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3657 {
3658         struct drm_device *dev = crtc->dev;
3659         struct drm_i915_private *dev_priv = dev->dev_private;
3660         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3661         int pipe = intel_crtc->pipe;
3662         int plane = intel_crtc->plane;
3663
3664         intel_crtc_wait_for_pending_flips(crtc);
3665         drm_vblank_off(dev, pipe);
3666
3667         /* FBC must be disabled before disabling the plane on HSW. */
3668         if (dev_priv->fbc.plane == plane)
3669                 intel_disable_fbc(dev);
3670
3671         hsw_disable_ips(intel_crtc);
3672
3673         intel_crtc_update_cursor(crtc, false);
3674         intel_disable_planes(crtc);
3675         intel_disable_primary_plane(dev_priv, plane, pipe);
3676 }
3677
3678 /*
3679  * This implements the workaround described in the "notes" section of the mode
3680  * set sequence documentation. When going from no pipes or single pipe to
3681  * multiple pipes, and planes are enabled after the pipe, we need to wait at
3682  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3683  */
3684 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3685 {
3686         struct drm_device *dev = crtc->base.dev;
3687         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3688
3689         /* We want to get the other_active_crtc only if there's only 1 other
3690          * active crtc. */
3691         list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3692                 if (!crtc_it->active || crtc_it == crtc)
3693                         continue;
3694
3695                 if (other_active_crtc)
3696                         return;
3697
3698                 other_active_crtc = crtc_it;
3699         }
3700         if (!other_active_crtc)
3701                 return;
3702
3703         intel_wait_for_vblank(dev, other_active_crtc->pipe);
3704         intel_wait_for_vblank(dev, other_active_crtc->pipe);
3705 }
3706
3707 static void haswell_crtc_enable(struct drm_crtc *crtc)
3708 {
3709         struct drm_device *dev = crtc->dev;
3710         struct drm_i915_private *dev_priv = dev->dev_private;
3711         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3712         struct intel_encoder *encoder;
3713         int pipe = intel_crtc->pipe;
3714
3715         WARN_ON(!crtc->enabled);
3716
3717         if (intel_crtc->active)
3718                 return;
3719
3720         intel_crtc->active = true;
3721
3722         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3723         if (intel_crtc->config.has_pch_encoder)
3724                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3725
3726         if (intel_crtc->config.has_pch_encoder)
3727                 dev_priv->display.fdi_link_train(crtc);
3728
3729         for_each_encoder_on_crtc(dev, crtc, encoder)
3730                 if (encoder->pre_enable)
3731                         encoder->pre_enable(encoder);
3732
3733         intel_ddi_enable_pipe_clock(intel_crtc);
3734
3735         ironlake_pfit_enable(intel_crtc);
3736
3737         /*
3738          * On ILK+ LUT must be loaded before the pipe is running but with
3739          * clocks enabled
3740          */
3741         intel_crtc_load_lut(crtc);
3742
3743         intel_ddi_set_pipe_settings(crtc);
3744         intel_ddi_enable_transcoder_func(crtc);
3745
3746         intel_update_watermarks(crtc);
3747         intel_enable_pipe(dev_priv, pipe,
3748                           intel_crtc->config.has_pch_encoder, false);
3749
3750         if (intel_crtc->config.has_pch_encoder)
3751                 lpt_pch_enable(crtc);
3752
3753         for_each_encoder_on_crtc(dev, crtc, encoder) {
3754                 encoder->enable(encoder);
3755                 intel_opregion_notify_encoder(encoder, true);
3756         }
3757
3758         /* If we change the relative order between pipe/planes enabling, we need
3759          * to change the workaround. */
3760         haswell_mode_set_planes_workaround(intel_crtc);
3761         haswell_crtc_enable_planes(crtc);
3762
3763         /*
3764          * There seems to be a race in PCH platform hw (at least on some
3765          * outputs) where an enabled pipe still completes any pageflip right
3766          * away (as if the pipe is off) instead of waiting for vblank. As soon
3767          * as the first vblank happend, everything works as expected. Hence just
3768          * wait for one vblank before returning to avoid strange things
3769          * happening.
3770          */
3771         intel_wait_for_vblank(dev, intel_crtc->pipe);
3772 }
3773
3774 static void ironlake_pfit_disable(struct intel_crtc *crtc)
3775 {
3776         struct drm_device *dev = crtc->base.dev;
3777         struct drm_i915_private *dev_priv = dev->dev_private;
3778         int pipe = crtc->pipe;
3779
3780         /* To avoid upsetting the power well on haswell only disable the pfit if
3781          * it's in use. The hw state code will make sure we get this right. */
3782         if (crtc->config.pch_pfit.enabled) {
3783                 I915_WRITE(PF_CTL(pipe), 0);
3784                 I915_WRITE(PF_WIN_POS(pipe), 0);
3785                 I915_WRITE(PF_WIN_SZ(pipe), 0);
3786         }
3787 }
3788
3789 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3790 {
3791         struct drm_device *dev = crtc->dev;
3792         struct drm_i915_private *dev_priv = dev->dev_private;
3793         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3794         struct intel_encoder *encoder;
3795         int pipe = intel_crtc->pipe;
3796         int plane = intel_crtc->plane;
3797         u32 reg, temp;
3798
3799
3800         if (!intel_crtc->active)
3801                 return;
3802
3803         for_each_encoder_on_crtc(dev, crtc, encoder)
3804                 encoder->disable(encoder);
3805
3806         intel_crtc_wait_for_pending_flips(crtc);
3807         drm_vblank_off(dev, pipe);
3808
3809         if (dev_priv->fbc.plane == plane)
3810                 intel_disable_fbc(dev);
3811
3812         intel_crtc_update_cursor(crtc, false);
3813         intel_disable_planes(crtc);
3814         intel_disable_primary_plane(dev_priv, plane, pipe);
3815
3816         if (intel_crtc->config.has_pch_encoder)
3817                 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3818
3819         intel_disable_pipe(dev_priv, pipe);
3820
3821         ironlake_pfit_disable(intel_crtc);
3822
3823         for_each_encoder_on_crtc(dev, crtc, encoder)
3824                 if (encoder->post_disable)
3825                         encoder->post_disable(encoder);
3826
3827         if (intel_crtc->config.has_pch_encoder) {
3828                 ironlake_fdi_disable(crtc);
3829
3830                 ironlake_disable_pch_transcoder(dev_priv, pipe);
3831                 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3832
3833                 if (HAS_PCH_CPT(dev)) {
3834                         /* disable TRANS_DP_CTL */
3835                         reg = TRANS_DP_CTL(pipe);
3836                         temp = I915_READ(reg);
3837                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3838                                   TRANS_DP_PORT_SEL_MASK);
3839                         temp |= TRANS_DP_PORT_SEL_NONE;
3840                         I915_WRITE(reg, temp);
3841
3842                         /* disable DPLL_SEL */
3843                         temp = I915_READ(PCH_DPLL_SEL);
3844                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3845                         I915_WRITE(PCH_DPLL_SEL, temp);
3846                 }
3847
3848                 /* disable PCH DPLL */
3849                 intel_disable_shared_dpll(intel_crtc);
3850
3851                 ironlake_fdi_pll_disable(intel_crtc);
3852         }
3853
3854         intel_crtc->active = false;
3855         intel_update_watermarks(crtc);
3856
3857         mutex_lock(&dev->struct_mutex);
3858         intel_update_fbc(dev);
3859         mutex_unlock(&dev->struct_mutex);
3860 }
3861
3862 static void haswell_crtc_disable(struct drm_crtc *crtc)
3863 {
3864         struct drm_device *dev = crtc->dev;
3865         struct drm_i915_private *dev_priv = dev->dev_private;
3866         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3867         struct intel_encoder *encoder;
3868         int pipe = intel_crtc->pipe;
3869         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3870
3871         if (!intel_crtc->active)
3872                 return;
3873
3874         haswell_crtc_disable_planes(crtc);
3875
3876         for_each_encoder_on_crtc(dev, crtc, encoder) {
3877                 intel_opregion_notify_encoder(encoder, false);
3878                 encoder->disable(encoder);
3879         }
3880
3881         if (intel_crtc->config.has_pch_encoder)
3882                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3883         intel_disable_pipe(dev_priv, pipe);
3884
3885         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3886
3887         ironlake_pfit_disable(intel_crtc);
3888
3889         intel_ddi_disable_pipe_clock(intel_crtc);
3890
3891         for_each_encoder_on_crtc(dev, crtc, encoder)
3892                 if (encoder->post_disable)
3893                         encoder->post_disable(encoder);
3894
3895         if (intel_crtc->config.has_pch_encoder) {
3896                 lpt_disable_pch_transcoder(dev_priv);
3897                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3898                 intel_ddi_fdi_disable(crtc);
3899         }
3900
3901         intel_crtc->active = false;
3902         intel_update_watermarks(crtc);
3903
3904         mutex_lock(&dev->struct_mutex);
3905         intel_update_fbc(dev);
3906         mutex_unlock(&dev->struct_mutex);
3907 }
3908
3909 static void ironlake_crtc_off(struct drm_crtc *crtc)
3910 {
3911         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3912         intel_put_shared_dpll(intel_crtc);
3913 }
3914
3915 static void haswell_crtc_off(struct drm_crtc *crtc)
3916 {
3917         intel_ddi_put_crtc_pll(crtc);
3918 }
3919
3920 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3921 {
3922         if (!enable && intel_crtc->overlay) {
3923                 struct drm_device *dev = intel_crtc->base.dev;
3924                 struct drm_i915_private *dev_priv = dev->dev_private;
3925
3926                 mutex_lock(&dev->struct_mutex);
3927                 dev_priv->mm.interruptible = false;
3928                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3929                 dev_priv->mm.interruptible = true;
3930                 mutex_unlock(&dev->struct_mutex);
3931         }
3932
3933         /* Let userspace switch the overlay on again. In most cases userspace
3934          * has to recompute where to put it anyway.
3935          */
3936 }
3937
3938 /**
3939  * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3940  * cursor plane briefly if not already running after enabling the display
3941  * plane.
3942  * This workaround avoids occasional blank screens when self refresh is
3943  * enabled.
3944  */
3945 static void
3946 g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3947 {
3948         u32 cntl = I915_READ(CURCNTR(pipe));
3949
3950         if ((cntl & CURSOR_MODE) == 0) {
3951                 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3952
3953                 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3954                 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3955                 intel_wait_for_vblank(dev_priv->dev, pipe);
3956                 I915_WRITE(CURCNTR(pipe), cntl);
3957                 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3958                 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3959         }
3960 }
3961
3962 static void i9xx_pfit_enable(struct intel_crtc *crtc)
3963 {
3964         struct drm_device *dev = crtc->base.dev;
3965         struct drm_i915_private *dev_priv = dev->dev_private;
3966         struct intel_crtc_config *pipe_config = &crtc->config;
3967
3968         if (!crtc->config.gmch_pfit.control)
3969                 return;
3970
3971         /*
3972          * The panel fitter should only be adjusted whilst the pipe is disabled,
3973          * according to register description and PRM.
3974          */
3975         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3976         assert_pipe_disabled(dev_priv, crtc->pipe);
3977
3978         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3979         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3980
3981         /* Border color in case we don't scale up to the full screen. Black by
3982          * default, change to something else for debugging. */
3983         I915_WRITE(BCLRPAT(crtc->pipe), 0);
3984 }
3985
3986 int valleyview_get_vco(struct drm_i915_private *dev_priv)
3987 {
3988         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
3989
3990         /* Obtain SKU information */
3991         mutex_lock(&dev_priv->dpio_lock);
3992         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
3993                 CCK_FUSE_HPLL_FREQ_MASK;
3994         mutex_unlock(&dev_priv->dpio_lock);
3995
3996         return vco_freq[hpll_freq];
3997 }
3998
3999 /* Adjust CDclk dividers to allow high res or save power if possible */
4000 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4001 {
4002         struct drm_i915_private *dev_priv = dev->dev_private;
4003         u32 val, cmd;
4004
4005         if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
4006                 cmd = 2;
4007         else if (cdclk == 266)
4008                 cmd = 1;
4009         else
4010                 cmd = 0;
4011
4012         mutex_lock(&dev_priv->rps.hw_lock);
4013         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4014         val &= ~DSPFREQGUAR_MASK;
4015         val |= (cmd << DSPFREQGUAR_SHIFT);
4016         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4017         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4018                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4019                      50)) {
4020                 DRM_ERROR("timed out waiting for CDclk change\n");
4021         }
4022         mutex_unlock(&dev_priv->rps.hw_lock);
4023
4024         if (cdclk == 400) {
4025                 u32 divider, vco;
4026
4027                 vco = valleyview_get_vco(dev_priv);
4028                 divider = ((vco << 1) / cdclk) - 1;
4029
4030                 mutex_lock(&dev_priv->dpio_lock);
4031                 /* adjust cdclk divider */
4032                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4033                 val &= ~0xf;
4034                 val |= divider;
4035                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4036                 mutex_unlock(&dev_priv->dpio_lock);
4037         }
4038
4039         mutex_lock(&dev_priv->dpio_lock);
4040         /* adjust self-refresh exit latency value */
4041         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4042         val &= ~0x7f;
4043
4044         /*
4045          * For high bandwidth configs, we set a higher latency in the bunit
4046          * so that the core display fetch happens in time to avoid underruns.
4047          */
4048         if (cdclk == 400)
4049                 val |= 4500 / 250; /* 4.5 usec */
4050         else
4051                 val |= 3000 / 250; /* 3.0 usec */
4052         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4053         mutex_unlock(&dev_priv->dpio_lock);
4054
4055         /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
4056         intel_i2c_reset(dev);
4057 }
4058
4059 static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4060 {
4061         int cur_cdclk, vco;
4062         int divider;
4063
4064         vco = valleyview_get_vco(dev_priv);
4065
4066         mutex_lock(&dev_priv->dpio_lock);
4067         divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4068         mutex_unlock(&dev_priv->dpio_lock);
4069
4070         divider &= 0xf;
4071
4072         cur_cdclk = (vco << 1) / (divider + 1);
4073
4074         return cur_cdclk;
4075 }
4076
4077 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4078                                  int max_pixclk)
4079 {
4080         int cur_cdclk;
4081
4082         cur_cdclk = valleyview_cur_cdclk(dev_priv);
4083
4084         /*
4085          * Really only a few cases to deal with, as only 4 CDclks are supported:
4086          *   200MHz
4087          *   267MHz
4088          *   320MHz
4089          *   400MHz
4090          * So we check to see whether we're above 90% of the lower bin and
4091          * adjust if needed.
4092          */
4093         if (max_pixclk > 288000) {
4094                 return 400;
4095         } else if (max_pixclk > 240000) {
4096                 return 320;
4097         } else
4098                 return 266;
4099         /* Looks like the 200MHz CDclk freq doesn't work on some configs */
4100 }
4101
4102 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
4103                                  unsigned modeset_pipes,
4104                                  struct intel_crtc_config *pipe_config)
4105 {
4106         struct drm_device *dev = dev_priv->dev;
4107         struct intel_crtc *intel_crtc;
4108         int max_pixclk = 0;
4109
4110         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4111                             base.head) {
4112                 if (modeset_pipes & (1 << intel_crtc->pipe))
4113                         max_pixclk = max(max_pixclk,
4114                                          pipe_config->adjusted_mode.crtc_clock);
4115                 else if (intel_crtc->base.enabled)
4116                         max_pixclk = max(max_pixclk,
4117                                          intel_crtc->config.adjusted_mode.crtc_clock);
4118         }
4119
4120         return max_pixclk;
4121 }
4122
4123 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4124                                             unsigned *prepare_pipes,
4125                                             unsigned modeset_pipes,
4126                                             struct intel_crtc_config *pipe_config)
4127 {
4128         struct drm_i915_private *dev_priv = dev->dev_private;
4129         struct intel_crtc *intel_crtc;
4130         int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
4131                                                pipe_config);
4132         int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4133
4134         if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
4135                 return;
4136
4137         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4138                             base.head)
4139                 if (intel_crtc->base.enabled)
4140                         *prepare_pipes |= (1 << intel_crtc->pipe);
4141 }
4142
4143 static void valleyview_modeset_global_resources(struct drm_device *dev)
4144 {
4145         struct drm_i915_private *dev_priv = dev->dev_private;
4146         int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
4147         int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4148         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4149
4150         if (req_cdclk != cur_cdclk)
4151                 valleyview_set_cdclk(dev, req_cdclk);
4152 }
4153
4154 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4155 {
4156         struct drm_device *dev = crtc->dev;
4157         struct drm_i915_private *dev_priv = dev->dev_private;
4158         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4159         struct intel_encoder *encoder;
4160         int pipe = intel_crtc->pipe;
4161         int plane = intel_crtc->plane;
4162         bool is_dsi;
4163
4164         WARN_ON(!crtc->enabled);
4165
4166         if (intel_crtc->active)
4167                 return;
4168
4169         intel_crtc->active = true;
4170
4171         for_each_encoder_on_crtc(dev, crtc, encoder)
4172                 if (encoder->pre_pll_enable)
4173                         encoder->pre_pll_enable(encoder);
4174
4175         is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4176
4177         if (!is_dsi)
4178                 vlv_enable_pll(intel_crtc);
4179
4180         for_each_encoder_on_crtc(dev, crtc, encoder)
4181                 if (encoder->pre_enable)
4182                         encoder->pre_enable(encoder);
4183
4184         i9xx_pfit_enable(intel_crtc);
4185
4186         intel_crtc_load_lut(crtc);
4187
4188         intel_update_watermarks(crtc);
4189         intel_enable_pipe(dev_priv, pipe, false, is_dsi);
4190         intel_enable_primary_plane(dev_priv, plane, pipe);
4191         intel_enable_planes(crtc);
4192         intel_crtc_update_cursor(crtc, true);
4193
4194         intel_update_fbc(dev);
4195
4196         for_each_encoder_on_crtc(dev, crtc, encoder)
4197                 encoder->enable(encoder);
4198 }
4199
4200 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4201 {
4202         struct drm_device *dev = crtc->dev;
4203         struct drm_i915_private *dev_priv = dev->dev_private;
4204         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4205         struct intel_encoder *encoder;
4206         int pipe = intel_crtc->pipe;
4207         int plane = intel_crtc->plane;
4208
4209         WARN_ON(!crtc->enabled);
4210
4211         if (intel_crtc->active)
4212                 return;
4213
4214         intel_crtc->active = true;
4215
4216         for_each_encoder_on_crtc(dev, crtc, encoder)
4217                 if (encoder->pre_enable)
4218                         encoder->pre_enable(encoder);
4219
4220         i9xx_enable_pll(intel_crtc);
4221
4222         i9xx_pfit_enable(intel_crtc);
4223
4224         intel_crtc_load_lut(crtc);
4225
4226         intel_update_watermarks(crtc);
4227         intel_enable_pipe(dev_priv, pipe, false, false);
4228         intel_enable_primary_plane(dev_priv, plane, pipe);
4229         intel_enable_planes(crtc);
4230         /* The fixup needs to happen before cursor is enabled */
4231         if (IS_G4X(dev))
4232                 g4x_fixup_plane(dev_priv, pipe);
4233         intel_crtc_update_cursor(crtc, true);
4234
4235         /* Give the overlay scaler a chance to enable if it's on this pipe */
4236         intel_crtc_dpms_overlay(intel_crtc, true);
4237
4238         intel_update_fbc(dev);
4239
4240         for_each_encoder_on_crtc(dev, crtc, encoder)
4241                 encoder->enable(encoder);
4242 }
4243
4244 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4245 {
4246         struct drm_device *dev = crtc->base.dev;
4247         struct drm_i915_private *dev_priv = dev->dev_private;
4248
4249         if (!crtc->config.gmch_pfit.control)
4250                 return;
4251
4252         assert_pipe_disabled(dev_priv, crtc->pipe);
4253
4254         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4255                          I915_READ(PFIT_CONTROL));
4256         I915_WRITE(PFIT_CONTROL, 0);
4257 }
4258
4259 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4260 {
4261         struct drm_device *dev = crtc->dev;
4262         struct drm_i915_private *dev_priv = dev->dev_private;
4263         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4264         struct intel_encoder *encoder;
4265         int pipe = intel_crtc->pipe;
4266         int plane = intel_crtc->plane;
4267
4268         if (!intel_crtc->active)
4269                 return;
4270
4271         for_each_encoder_on_crtc(dev, crtc, encoder)
4272                 encoder->disable(encoder);
4273
4274         /* Give the overlay scaler a chance to disable if it's on this pipe */
4275         intel_crtc_wait_for_pending_flips(crtc);
4276         drm_vblank_off(dev, pipe);
4277
4278         if (dev_priv->fbc.plane == plane)
4279                 intel_disable_fbc(dev);
4280
4281         intel_crtc_dpms_overlay(intel_crtc, false);
4282         intel_crtc_update_cursor(crtc, false);
4283         intel_disable_planes(crtc);
4284         intel_disable_primary_plane(dev_priv, plane, pipe);
4285
4286         intel_disable_pipe(dev_priv, pipe);
4287
4288         i9xx_pfit_disable(intel_crtc);
4289
4290         for_each_encoder_on_crtc(dev, crtc, encoder)
4291                 if (encoder->post_disable)
4292                         encoder->post_disable(encoder);
4293
4294         if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4295                 vlv_disable_pll(dev_priv, pipe);
4296         else if (!IS_VALLEYVIEW(dev))
4297                 i9xx_disable_pll(dev_priv, pipe);
4298
4299         intel_crtc->active = false;
4300         intel_update_watermarks(crtc);
4301
4302         intel_update_fbc(dev);
4303 }
4304
4305 static void i9xx_crtc_off(struct drm_crtc *crtc)
4306 {
4307 }
4308
4309 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4310                                     bool enabled)
4311 {
4312         struct drm_device *dev = crtc->dev;
4313         struct drm_i915_master_private *master_priv;
4314         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4315         int pipe = intel_crtc->pipe;
4316
4317         if (!dev->primary->master)
4318                 return;
4319
4320         master_priv = dev->primary->master->driver_priv;
4321         if (!master_priv->sarea_priv)
4322                 return;
4323
4324         switch (pipe) {
4325         case 0:
4326                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4327                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4328                 break;
4329         case 1:
4330                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4331                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4332                 break;
4333         default:
4334                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4335                 break;
4336         }
4337 }
4338
4339 /**
4340  * Sets the power management mode of the pipe and plane.
4341  */
4342 void intel_crtc_update_dpms(struct drm_crtc *crtc)
4343 {
4344         struct drm_device *dev = crtc->dev;
4345         struct drm_i915_private *dev_priv = dev->dev_private;
4346         struct intel_encoder *intel_encoder;
4347         bool enable = false;
4348
4349         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4350                 enable |= intel_encoder->connectors_active;
4351
4352         if (enable)
4353                 dev_priv->display.crtc_enable(crtc);
4354         else
4355                 dev_priv->display.crtc_disable(crtc);
4356
4357         intel_crtc_update_sarea(crtc, enable);
4358 }
4359
4360 static void intel_crtc_disable(struct drm_crtc *crtc)
4361 {
4362         struct drm_device *dev = crtc->dev;
4363         struct drm_connector *connector;
4364         struct drm_i915_private *dev_priv = dev->dev_private;
4365         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4366
4367         /* crtc should still be enabled when we disable it. */
4368         WARN_ON(!crtc->enabled);
4369
4370         dev_priv->display.crtc_disable(crtc);
4371         intel_crtc->eld_vld = false;
4372         intel_crtc_update_sarea(crtc, false);
4373         dev_priv->display.off(crtc);
4374
4375         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4376         assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4377         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4378
4379         if (crtc->fb) {
4380                 mutex_lock(&dev->struct_mutex);
4381                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
4382                 mutex_unlock(&dev->struct_mutex);
4383                 crtc->fb = NULL;
4384         }
4385
4386         /* Update computed state. */
4387         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4388                 if (!connector->encoder || !connector->encoder->crtc)
4389                         continue;
4390
4391                 if (connector->encoder->crtc != crtc)
4392                         continue;
4393
4394                 connector->dpms = DRM_MODE_DPMS_OFF;
4395                 to_intel_encoder(connector->encoder)->connectors_active = false;
4396         }
4397 }
4398
4399 void intel_encoder_destroy(struct drm_encoder *encoder)
4400 {
4401         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4402
4403         drm_encoder_cleanup(encoder);
4404         kfree(intel_encoder);
4405 }
4406
4407 /* Simple dpms helper for encoders with just one connector, no cloning and only
4408  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4409  * state of the entire output pipe. */
4410 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4411 {
4412         if (mode == DRM_MODE_DPMS_ON) {
4413                 encoder->connectors_active = true;
4414
4415                 intel_crtc_update_dpms(encoder->base.crtc);
4416         } else {
4417                 encoder->connectors_active = false;
4418
4419                 intel_crtc_update_dpms(encoder->base.crtc);
4420         }
4421 }
4422
4423 /* Cross check the actual hw state with our own modeset state tracking (and it's
4424  * internal consistency). */
4425 static void intel_connector_check_state(struct intel_connector *connector)
4426 {
4427         if (connector->get_hw_state(connector)) {
4428                 struct intel_encoder *encoder = connector->encoder;
4429                 struct drm_crtc *crtc;
4430                 bool encoder_enabled;
4431                 enum pipe pipe;
4432
4433                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4434                               connector->base.base.id,
4435                               drm_get_connector_name(&connector->base));
4436
4437                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4438                      "wrong connector dpms state\n");
4439                 WARN(connector->base.encoder != &encoder->base,
4440                      "active connector not linked to encoder\n");
4441                 WARN(!encoder->connectors_active,
4442                      "encoder->connectors_active not set\n");
4443
4444                 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4445                 WARN(!encoder_enabled, "encoder not enabled\n");
4446                 if (WARN_ON(!encoder->base.crtc))
4447                         return;
4448
4449                 crtc = encoder->base.crtc;
4450
4451                 WARN(!crtc->enabled, "crtc not enabled\n");
4452                 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4453                 WARN(pipe != to_intel_crtc(crtc)->pipe,
4454                      "encoder active on the wrong pipe\n");
4455         }
4456 }
4457
4458 /* Even simpler default implementation, if there's really no special case to
4459  * consider. */
4460 void intel_connector_dpms(struct drm_connector *connector, int mode)
4461 {
4462         /* All the simple cases only support two dpms states. */
4463         if (mode != DRM_MODE_DPMS_ON)
4464                 mode = DRM_MODE_DPMS_OFF;
4465
4466         if (mode == connector->dpms)
4467                 return;
4468
4469         connector->dpms = mode;
4470
4471         /* Only need to change hw state when actually enabled */
4472         if (connector->encoder)
4473                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4474
4475         intel_modeset_check_state(connector->dev);
4476 }
4477
4478 /* Simple connector->get_hw_state implementation for encoders that support only
4479  * one connector and no cloning and hence the encoder state determines the state
4480  * of the connector. */
4481 bool intel_connector_get_hw_state(struct intel_connector *connector)
4482 {
4483         enum pipe pipe = 0;
4484         struct intel_encoder *encoder = connector->encoder;
4485
4486         return encoder->get_hw_state(encoder, &pipe);
4487 }
4488
4489 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4490                                      struct intel_crtc_config *pipe_config)
4491 {
4492         struct drm_i915_private *dev_priv = dev->dev_private;
4493         struct intel_crtc *pipe_B_crtc =
4494                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4495
4496         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4497                       pipe_name(pipe), pipe_config->fdi_lanes);
4498         if (pipe_config->fdi_lanes > 4) {
4499                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4500                               pipe_name(pipe), pipe_config->fdi_lanes);
4501                 return false;
4502         }
4503
4504         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4505                 if (pipe_config->fdi_lanes > 2) {
4506                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4507                                       pipe_config->fdi_lanes);
4508                         return false;
4509                 } else {
4510                         return true;
4511                 }
4512         }
4513
4514         if (INTEL_INFO(dev)->num_pipes == 2)
4515                 return true;
4516
4517         /* Ivybridge 3 pipe is really complicated */
4518         switch (pipe) {
4519         case PIPE_A:
4520                 return true;
4521         case PIPE_B:
4522                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4523                     pipe_config->fdi_lanes > 2) {
4524                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4525                                       pipe_name(pipe), pipe_config->fdi_lanes);
4526                         return false;
4527                 }
4528                 return true;
4529         case PIPE_C:
4530                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4531                     pipe_B_crtc->config.fdi_lanes <= 2) {
4532                         if (pipe_config->fdi_lanes > 2) {
4533                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4534                                               pipe_name(pipe), pipe_config->fdi_lanes);
4535                                 return false;
4536                         }
4537                 } else {
4538                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4539                         return false;
4540                 }
4541                 return true;
4542         default:
4543                 BUG();
4544         }
4545 }
4546
4547 #define RETRY 1
4548 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
4549                                        struct intel_crtc_config *pipe_config)
4550 {
4551         struct drm_device *dev = intel_crtc->base.dev;
4552         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4553         int lane, link_bw, fdi_dotclock;
4554         bool setup_ok, needs_recompute = false;
4555
4556 retry:
4557         /* FDI is a binary signal running at ~2.7GHz, encoding
4558          * each output octet as 10 bits. The actual frequency
4559          * is stored as a divider into a 100MHz clock, and the
4560          * mode pixel clock is stored in units of 1KHz.
4561          * Hence the bw of each lane in terms of the mode signal
4562          * is:
4563          */
4564         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4565
4566         fdi_dotclock = adjusted_mode->crtc_clock;
4567
4568         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4569                                            pipe_config->pipe_bpp);
4570
4571         pipe_config->fdi_lanes = lane;
4572
4573         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4574                                link_bw, &pipe_config->fdi_m_n);
4575
4576         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4577                                             intel_crtc->pipe, pipe_config);
4578         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4579                 pipe_config->pipe_bpp -= 2*3;
4580                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4581                               pipe_config->pipe_bpp);
4582                 needs_recompute = true;
4583                 pipe_config->bw_constrained = true;
4584
4585                 goto retry;
4586         }
4587
4588         if (needs_recompute)
4589                 return RETRY;
4590
4591         return setup_ok ? 0 : -EINVAL;
4592 }
4593
4594 static void hsw_compute_ips_config(struct intel_crtc *crtc,
4595                                    struct intel_crtc_config *pipe_config)
4596 {
4597         pipe_config->ips_enabled = i915_enable_ips &&
4598                                    hsw_crtc_supports_ips(crtc) &&
4599                                    pipe_config->pipe_bpp <= 24;
4600 }
4601
4602 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4603                                      struct intel_crtc_config *pipe_config)
4604 {
4605         struct drm_device *dev = crtc->base.dev;
4606         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4607
4608         /* FIXME should check pixel clock limits on all platforms */
4609         if (INTEL_INFO(dev)->gen < 4) {
4610                 struct drm_i915_private *dev_priv = dev->dev_private;
4611                 int clock_limit =
4612                         dev_priv->display.get_display_clock_speed(dev);
4613
4614                 /*
4615                  * Enable pixel doubling when the dot clock
4616                  * is > 90% of the (display) core speed.
4617                  *
4618                  * GDG double wide on either pipe,
4619                  * otherwise pipe A only.
4620                  */
4621                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4622                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4623                         clock_limit *= 2;
4624                         pipe_config->double_wide = true;
4625                 }
4626
4627                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4628                         return -EINVAL;
4629         }
4630
4631         /*
4632          * Pipe horizontal size must be even in:
4633          * - DVO ganged mode
4634          * - LVDS dual channel mode
4635          * - Double wide pipe
4636          */
4637         if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4638              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4639                 pipe_config->pipe_src_w &= ~1;
4640
4641         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4642          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4643          */
4644         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4645                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4646                 return -EINVAL;
4647
4648         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4649                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4650         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4651                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
4652                  * for lvds. */
4653                 pipe_config->pipe_bpp = 8*3;
4654         }
4655
4656         if (HAS_IPS(dev))
4657                 hsw_compute_ips_config(crtc, pipe_config);
4658
4659         /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4660          * clock survives for now. */
4661         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4662                 pipe_config->shared_dpll = crtc->config.shared_dpll;
4663
4664         if (pipe_config->has_pch_encoder)
4665                 return ironlake_fdi_compute_config(crtc, pipe_config);
4666
4667         return 0;
4668 }
4669
4670 static int valleyview_get_display_clock_speed(struct drm_device *dev)
4671 {
4672         return 400000; /* FIXME */
4673 }
4674
4675 static int i945_get_display_clock_speed(struct drm_device *dev)
4676 {
4677         return 400000;
4678 }
4679
4680 static int i915_get_display_clock_speed(struct drm_device *dev)
4681 {
4682         return 333000;
4683 }
4684
4685 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4686 {
4687         return 200000;
4688 }
4689
4690 static int pnv_get_display_clock_speed(struct drm_device *dev)
4691 {
4692         u16 gcfgc = 0;
4693
4694         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4695
4696         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4697         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4698                 return 267000;
4699         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4700                 return 333000;
4701         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4702                 return 444000;
4703         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4704                 return 200000;
4705         default:
4706                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4707         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4708                 return 133000;
4709         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4710                 return 167000;
4711         }
4712 }
4713
4714 static int i915gm_get_display_clock_speed(struct drm_device *dev)
4715 {
4716         u16 gcfgc = 0;
4717
4718         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4719
4720         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4721                 return 133000;
4722         else {
4723                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4724                 case GC_DISPLAY_CLOCK_333_MHZ:
4725                         return 333000;
4726                 default:
4727                 case GC_DISPLAY_CLOCK_190_200_MHZ:
4728                         return 190000;
4729                 }
4730         }
4731 }
4732
4733 static int i865_get_display_clock_speed(struct drm_device *dev)
4734 {
4735         return 266000;
4736 }
4737
4738 static int i855_get_display_clock_speed(struct drm_device *dev)
4739 {
4740         u16 hpllcc = 0;
4741         /* Assume that the hardware is in the high speed state.  This
4742          * should be the default.
4743          */
4744         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4745         case GC_CLOCK_133_200:
4746         case GC_CLOCK_100_200:
4747                 return 200000;
4748         case GC_CLOCK_166_250:
4749                 return 250000;
4750         case GC_CLOCK_100_133:
4751                 return 133000;
4752         }
4753
4754         /* Shouldn't happen */
4755         return 0;
4756 }
4757
4758 static int i830_get_display_clock_speed(struct drm_device *dev)
4759 {
4760         return 133000;
4761 }
4762
4763 static void
4764 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4765 {
4766         while (*num > DATA_LINK_M_N_MASK ||
4767                *den > DATA_LINK_M_N_MASK) {
4768                 *num >>= 1;
4769                 *den >>= 1;
4770         }
4771 }
4772
4773 static void compute_m_n(unsigned int m, unsigned int n,
4774                         uint32_t *ret_m, uint32_t *ret_n)
4775 {
4776         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4777         *ret_m = div_u64((uint64_t) m * *ret_n, n);
4778         intel_reduce_m_n_ratio(ret_m, ret_n);
4779 }
4780
4781 void
4782 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4783                        int pixel_clock, int link_clock,
4784                        struct intel_link_m_n *m_n)
4785 {
4786         m_n->tu = 64;
4787
4788         compute_m_n(bits_per_pixel * pixel_clock,
4789                     link_clock * nlanes * 8,
4790                     &m_n->gmch_m, &m_n->gmch_n);
4791
4792         compute_m_n(pixel_clock, link_clock,
4793                     &m_n->link_m, &m_n->link_n);
4794 }
4795
4796 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4797 {
4798         if (i915_panel_use_ssc >= 0)
4799                 return i915_panel_use_ssc != 0;
4800         return dev_priv->vbt.lvds_use_ssc
4801                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4802 }
4803
4804 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4805 {
4806         struct drm_device *dev = crtc->dev;
4807         struct drm_i915_private *dev_priv = dev->dev_private;
4808         int refclk;
4809
4810         if (IS_VALLEYVIEW(dev)) {
4811                 refclk = 100000;
4812         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4813             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4814                 refclk = dev_priv->vbt.lvds_ssc_freq;
4815                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
4816         } else if (!IS_GEN2(dev)) {
4817                 refclk = 96000;
4818         } else {
4819                 refclk = 48000;
4820         }
4821
4822         return refclk;
4823 }
4824
4825 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
4826 {
4827         return (1 << dpll->n) << 16 | dpll->m2;
4828 }
4829
4830 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4831 {
4832         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
4833 }
4834
4835 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4836                                      intel_clock_t *reduced_clock)
4837 {
4838         struct drm_device *dev = crtc->base.dev;
4839         struct drm_i915_private *dev_priv = dev->dev_private;
4840         int pipe = crtc->pipe;
4841         u32 fp, fp2 = 0;
4842
4843         if (IS_PINEVIEW(dev)) {
4844                 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
4845                 if (reduced_clock)
4846                         fp2 = pnv_dpll_compute_fp(reduced_clock);
4847         } else {
4848                 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
4849                 if (reduced_clock)
4850                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
4851         }
4852
4853         I915_WRITE(FP0(pipe), fp);
4854         crtc->config.dpll_hw_state.fp0 = fp;
4855
4856         crtc->lowfreq_avail = false;
4857         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4858             reduced_clock && i915_powersave) {
4859                 I915_WRITE(FP1(pipe), fp2);
4860                 crtc->config.dpll_hw_state.fp1 = fp2;
4861                 crtc->lowfreq_avail = true;
4862         } else {
4863                 I915_WRITE(FP1(pipe), fp);
4864                 crtc->config.dpll_hw_state.fp1 = fp;
4865         }
4866 }
4867
4868 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4869                 pipe)
4870 {
4871         u32 reg_val;
4872
4873         /*
4874          * PLLB opamp always calibrates to max value of 0x3f, force enable it
4875          * and set it to a reasonable value instead.
4876          */
4877         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4878         reg_val &= 0xffffff00;
4879         reg_val |= 0x00000030;
4880         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4881
4882         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4883         reg_val &= 0x8cffffff;
4884         reg_val = 0x8c000000;
4885         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4886
4887         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4888         reg_val &= 0xffffff00;
4889         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4890
4891         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4892         reg_val &= 0x00ffffff;
4893         reg_val |= 0xb0000000;
4894         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4895 }
4896
4897 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4898                                          struct intel_link_m_n *m_n)
4899 {
4900         struct drm_device *dev = crtc->base.dev;
4901         struct drm_i915_private *dev_priv = dev->dev_private;
4902         int pipe = crtc->pipe;
4903
4904         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4905         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4906         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4907         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4908 }
4909
4910 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4911                                          struct intel_link_m_n *m_n)
4912 {
4913         struct drm_device *dev = crtc->base.dev;
4914         struct drm_i915_private *dev_priv = dev->dev_private;
4915         int pipe = crtc->pipe;
4916         enum transcoder transcoder = crtc->config.cpu_transcoder;
4917
4918         if (INTEL_INFO(dev)->gen >= 5) {
4919                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4920                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4921                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4922                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4923         } else {
4924                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4925                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4926                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4927                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4928         }
4929 }
4930
4931 static void intel_dp_set_m_n(struct intel_crtc *crtc)
4932 {
4933         if (crtc->config.has_pch_encoder)
4934                 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4935         else
4936                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4937 }
4938
4939 static void vlv_update_pll(struct intel_crtc *crtc)
4940 {
4941         struct drm_device *dev = crtc->base.dev;
4942         struct drm_i915_private *dev_priv = dev->dev_private;
4943         int pipe = crtc->pipe;
4944         u32 dpll, mdiv;
4945         u32 bestn, bestm1, bestm2, bestp1, bestp2;
4946         u32 coreclk, reg_val, dpll_md;
4947
4948         mutex_lock(&dev_priv->dpio_lock);
4949
4950         bestn = crtc->config.dpll.n;
4951         bestm1 = crtc->config.dpll.m1;
4952         bestm2 = crtc->config.dpll.m2;
4953         bestp1 = crtc->config.dpll.p1;
4954         bestp2 = crtc->config.dpll.p2;
4955
4956         /* See eDP HDMI DPIO driver vbios notes doc */
4957
4958         /* PLL B needs special handling */
4959         if (pipe)
4960                 vlv_pllb_recal_opamp(dev_priv, pipe);
4961
4962         /* Set up Tx target for periodic Rcomp update */
4963         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4964
4965         /* Disable target IRef on PLL */
4966         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4967         reg_val &= 0x00ffffff;
4968         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4969
4970         /* Disable fast lock */
4971         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4972
4973         /* Set idtafcrecal before PLL is enabled */
4974         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4975         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4976         mdiv |= ((bestn << DPIO_N_SHIFT));
4977         mdiv |= (1 << DPIO_K_SHIFT);
4978
4979         /*
4980          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4981          * but we don't support that).
4982          * Note: don't use the DAC post divider as it seems unstable.
4983          */
4984         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4985         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4986
4987         mdiv |= DPIO_ENABLE_CALIBRATION;
4988         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4989
4990         /* Set HBR and RBR LPF coefficients */
4991         if (crtc->config.port_clock == 162000 ||
4992             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4993             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4994                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4995                                  0x009f0003);
4996         else
4997                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4998                                  0x00d0000f);
4999
5000         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5001             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5002                 /* Use SSC source */
5003                 if (!pipe)
5004                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5005                                          0x0df40000);
5006                 else
5007                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5008                                          0x0df70000);
5009         } else { /* HDMI or VGA */
5010                 /* Use bend source */
5011                 if (!pipe)
5012                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5013                                          0x0df70000);
5014                 else
5015                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5016                                          0x0df40000);
5017         }
5018
5019         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5020         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5021         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5022             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5023                 coreclk |= 0x01000000;
5024         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5025
5026         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5027
5028         /*
5029          * Enable DPIO clock input. We should never disable the reference
5030          * clock for pipe B, since VGA hotplug / manual detection depends
5031          * on it.
5032          */
5033         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5034                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5035         /* We should never disable this, set it here for state tracking */
5036         if (pipe == PIPE_B)
5037                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5038         dpll |= DPLL_VCO_ENABLE;
5039         crtc->config.dpll_hw_state.dpll = dpll;
5040
5041         dpll_md = (crtc->config.pixel_multiplier - 1)
5042                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5043         crtc->config.dpll_hw_state.dpll_md = dpll_md;
5044
5045         if (crtc->config.has_dp_encoder)
5046                 intel_dp_set_m_n(crtc);
5047
5048         mutex_unlock(&dev_priv->dpio_lock);
5049 }
5050
5051 static void i9xx_update_pll(struct intel_crtc *crtc,
5052                             intel_clock_t *reduced_clock,
5053                             int num_connectors)
5054 {
5055         struct drm_device *dev = crtc->base.dev;
5056         struct drm_i915_private *dev_priv = dev->dev_private;
5057         u32 dpll;
5058         bool is_sdvo;
5059         struct dpll *clock = &crtc->config.dpll;
5060
5061         i9xx_update_pll_dividers(crtc, reduced_clock);
5062
5063         is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5064                 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5065
5066         dpll = DPLL_VGA_MODE_DIS;
5067
5068         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5069                 dpll |= DPLLB_MODE_LVDS;
5070         else
5071                 dpll |= DPLLB_MODE_DAC_SERIAL;
5072
5073         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5074                 dpll |= (crtc->config.pixel_multiplier - 1)
5075                         << SDVO_MULTIPLIER_SHIFT_HIRES;
5076         }
5077
5078         if (is_sdvo)
5079                 dpll |= DPLL_SDVO_HIGH_SPEED;
5080
5081         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5082                 dpll |= DPLL_SDVO_HIGH_SPEED;
5083
5084         /* compute bitmask from p1 value */
5085         if (IS_PINEVIEW(dev))
5086                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5087         else {
5088                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5089                 if (IS_G4X(dev) && reduced_clock)
5090                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5091         }
5092         switch (clock->p2) {
5093         case 5:
5094                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5095                 break;
5096         case 7:
5097                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5098                 break;
5099         case 10:
5100                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5101                 break;
5102         case 14:
5103                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5104                 break;
5105         }
5106         if (INTEL_INFO(dev)->gen >= 4)
5107                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5108
5109         if (crtc->config.sdvo_tv_clock)
5110                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5111         else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5112                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5113                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5114         else
5115                 dpll |= PLL_REF_INPUT_DREFCLK;
5116
5117         dpll |= DPLL_VCO_ENABLE;
5118         crtc->config.dpll_hw_state.dpll = dpll;
5119
5120         if (INTEL_INFO(dev)->gen >= 4) {
5121                 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5122                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5123                 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5124         }
5125
5126         if (crtc->config.has_dp_encoder)
5127                 intel_dp_set_m_n(crtc);
5128 }
5129
5130 static void i8xx_update_pll(struct intel_crtc *crtc,
5131                             intel_clock_t *reduced_clock,
5132                             int num_connectors)
5133 {
5134         struct drm_device *dev = crtc->base.dev;
5135         struct drm_i915_private *dev_priv = dev->dev_private;
5136         u32 dpll;
5137         struct dpll *clock = &crtc->config.dpll;
5138
5139         i9xx_update_pll_dividers(crtc, reduced_clock);
5140
5141         dpll = DPLL_VGA_MODE_DIS;
5142
5143         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5144                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5145         } else {
5146                 if (clock->p1 == 2)
5147                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5148                 else
5149                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5150                 if (clock->p2 == 4)
5151                         dpll |= PLL_P2_DIVIDE_BY_4;
5152         }
5153
5154         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5155                 dpll |= DPLL_DVO_2X_MODE;
5156
5157         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5158                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5159                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5160         else
5161                 dpll |= PLL_REF_INPUT_DREFCLK;
5162
5163         dpll |= DPLL_VCO_ENABLE;
5164         crtc->config.dpll_hw_state.dpll = dpll;
5165 }
5166
5167 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5168 {
5169         struct drm_device *dev = intel_crtc->base.dev;
5170         struct drm_i915_private *dev_priv = dev->dev_private;
5171         enum pipe pipe = intel_crtc->pipe;
5172         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5173         struct drm_display_mode *adjusted_mode =
5174                 &intel_crtc->config.adjusted_mode;
5175         uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
5176
5177         /* We need to be careful not to changed the adjusted mode, for otherwise
5178          * the hw state checker will get angry at the mismatch. */
5179         crtc_vtotal = adjusted_mode->crtc_vtotal;
5180         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5181
5182         if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5183                 /* the chip adds 2 halflines automatically */
5184                 crtc_vtotal -= 1;
5185                 crtc_vblank_end -= 1;
5186                 vsyncshift = adjusted_mode->crtc_hsync_start
5187                              - adjusted_mode->crtc_htotal / 2;
5188         } else {
5189                 vsyncshift = 0;
5190         }
5191
5192         if (INTEL_INFO(dev)->gen > 3)
5193                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5194
5195         I915_WRITE(HTOTAL(cpu_transcoder),
5196                    (adjusted_mode->crtc_hdisplay - 1) |
5197                    ((adjusted_mode->crtc_htotal - 1) << 16));
5198         I915_WRITE(HBLANK(cpu_transcoder),
5199                    (adjusted_mode->crtc_hblank_start - 1) |
5200                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5201         I915_WRITE(HSYNC(cpu_transcoder),
5202                    (adjusted_mode->crtc_hsync_start - 1) |
5203                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5204
5205         I915_WRITE(VTOTAL(cpu_transcoder),
5206                    (adjusted_mode->crtc_vdisplay - 1) |
5207                    ((crtc_vtotal - 1) << 16));
5208         I915_WRITE(VBLANK(cpu_transcoder),
5209                    (adjusted_mode->crtc_vblank_start - 1) |
5210                    ((crtc_vblank_end - 1) << 16));
5211         I915_WRITE(VSYNC(cpu_transcoder),
5212                    (adjusted_mode->crtc_vsync_start - 1) |
5213                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5214
5215         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5216          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5217          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5218          * bits. */
5219         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5220             (pipe == PIPE_B || pipe == PIPE_C))
5221                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5222
5223         /* pipesrc controls the size that is scaled from, which should
5224          * always be the user's requested size.
5225          */
5226         I915_WRITE(PIPESRC(pipe),
5227                    ((intel_crtc->config.pipe_src_w - 1) << 16) |
5228                    (intel_crtc->config.pipe_src_h - 1));
5229 }
5230
5231 static void intel_get_pipe_timings(struct intel_crtc *crtc,
5232                                    struct intel_crtc_config *pipe_config)
5233 {
5234         struct drm_device *dev = crtc->base.dev;
5235         struct drm_i915_private *dev_priv = dev->dev_private;
5236         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5237         uint32_t tmp;
5238
5239         tmp = I915_READ(HTOTAL(cpu_transcoder));
5240         pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5241         pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5242         tmp = I915_READ(HBLANK(cpu_transcoder));
5243         pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5244         pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5245         tmp = I915_READ(HSYNC(cpu_transcoder));
5246         pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5247         pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5248
5249         tmp = I915_READ(VTOTAL(cpu_transcoder));
5250         pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5251         pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5252         tmp = I915_READ(VBLANK(cpu_transcoder));
5253         pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5254         pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5255         tmp = I915_READ(VSYNC(cpu_transcoder));
5256         pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5257         pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5258
5259         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5260                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5261                 pipe_config->adjusted_mode.crtc_vtotal += 1;
5262                 pipe_config->adjusted_mode.crtc_vblank_end += 1;
5263         }
5264
5265         tmp = I915_READ(PIPESRC(crtc->pipe));
5266         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5267         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5268
5269         pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5270         pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5271 }
5272
5273 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5274                                  struct intel_crtc_config *pipe_config)
5275 {
5276         mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5277         mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5278         mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5279         mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5280
5281         mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5282         mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5283         mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5284         mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5285
5286         mode->flags = pipe_config->adjusted_mode.flags;
5287
5288         mode->clock = pipe_config->adjusted_mode.crtc_clock;
5289         mode->flags |= pipe_config->adjusted_mode.flags;
5290 }
5291
5292 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5293 {
5294         struct drm_device *dev = intel_crtc->base.dev;
5295         struct drm_i915_private *dev_priv = dev->dev_private;
5296         uint32_t pipeconf;
5297
5298         pipeconf = 0;
5299
5300         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5301             I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5302                 pipeconf |= PIPECONF_ENABLE;
5303
5304         if (intel_crtc->config.double_wide)
5305                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5306
5307         /* only g4x and later have fancy bpc/dither controls */
5308         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5309                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5310                 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5311                         pipeconf |= PIPECONF_DITHER_EN |
5312                                     PIPECONF_DITHER_TYPE_SP;
5313
5314                 switch (intel_crtc->config.pipe_bpp) {
5315                 case 18:
5316                         pipeconf |= PIPECONF_6BPC;
5317                         break;
5318                 case 24:
5319                         pipeconf |= PIPECONF_8BPC;
5320                         break;
5321                 case 30:
5322                         pipeconf |= PIPECONF_10BPC;
5323                         break;
5324                 default:
5325                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5326                         BUG();
5327                 }
5328         }
5329
5330         if (HAS_PIPE_CXSR(dev)) {
5331                 if (intel_crtc->lowfreq_avail) {
5332                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5333                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5334                 } else {
5335                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5336                 }
5337         }
5338
5339         if (!IS_GEN2(dev) &&
5340             intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5341                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5342         else
5343                 pipeconf |= PIPECONF_PROGRESSIVE;
5344
5345         if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5346                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5347
5348         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5349         POSTING_READ(PIPECONF(intel_crtc->pipe));
5350 }
5351
5352 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5353                               int x, int y,
5354                               struct drm_framebuffer *fb)
5355 {
5356         struct drm_device *dev = crtc->dev;
5357         struct drm_i915_private *dev_priv = dev->dev_private;
5358         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5359         int pipe = intel_crtc->pipe;
5360         int plane = intel_crtc->plane;
5361         int refclk, num_connectors = 0;
5362         intel_clock_t clock, reduced_clock;
5363         u32 dspcntr;
5364         bool ok, has_reduced_clock = false;
5365         bool is_lvds = false, is_dsi = false;
5366         struct intel_encoder *encoder;
5367         const intel_limit_t *limit;
5368         int ret;
5369
5370         for_each_encoder_on_crtc(dev, crtc, encoder) {
5371                 switch (encoder->type) {
5372                 case INTEL_OUTPUT_LVDS:
5373                         is_lvds = true;
5374                         break;
5375                 case INTEL_OUTPUT_DSI:
5376                         is_dsi = true;
5377                         break;
5378                 }
5379
5380                 num_connectors++;
5381         }
5382
5383         if (is_dsi)
5384                 goto skip_dpll;
5385
5386         if (!intel_crtc->config.clock_set) {
5387                 refclk = i9xx_get_refclk(crtc, num_connectors);
5388
5389                 /*
5390                  * Returns a set of divisors for the desired target clock with
5391                  * the given refclk, or FALSE.  The returned values represent
5392                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5393                  * 2) / p1 / p2.
5394                  */
5395                 limit = intel_limit(crtc, refclk);
5396                 ok = dev_priv->display.find_dpll(limit, crtc,
5397                                                  intel_crtc->config.port_clock,
5398                                                  refclk, NULL, &clock);
5399                 if (!ok) {
5400                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
5401                         return -EINVAL;
5402                 }
5403
5404                 if (is_lvds && dev_priv->lvds_downclock_avail) {
5405                         /*
5406                          * Ensure we match the reduced clock's P to the target
5407                          * clock.  If the clocks don't match, we can't switch
5408                          * the display clock by using the FP0/FP1. In such case
5409                          * we will disable the LVDS downclock feature.
5410                          */
5411                         has_reduced_clock =
5412                                 dev_priv->display.find_dpll(limit, crtc,
5413                                                             dev_priv->lvds_downclock,
5414                                                             refclk, &clock,
5415                                                             &reduced_clock);
5416                 }
5417                 /* Compat-code for transition, will disappear. */
5418                 intel_crtc->config.dpll.n = clock.n;
5419                 intel_crtc->config.dpll.m1 = clock.m1;
5420                 intel_crtc->config.dpll.m2 = clock.m2;
5421                 intel_crtc->config.dpll.p1 = clock.p1;
5422                 intel_crtc->config.dpll.p2 = clock.p2;
5423         }
5424
5425         if (IS_GEN2(dev)) {
5426                 i8xx_update_pll(intel_crtc,
5427                                 has_reduced_clock ? &reduced_clock : NULL,
5428                                 num_connectors);
5429         } else if (IS_VALLEYVIEW(dev)) {
5430                 vlv_update_pll(intel_crtc);
5431         } else {
5432                 i9xx_update_pll(intel_crtc,
5433                                 has_reduced_clock ? &reduced_clock : NULL,
5434                                 num_connectors);
5435         }
5436
5437 skip_dpll:
5438         /* Set up the display plane register */
5439         dspcntr = DISPPLANE_GAMMA_ENABLE;
5440
5441         if (!IS_VALLEYVIEW(dev)) {
5442                 if (pipe == 0)
5443                         dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5444                 else
5445                         dspcntr |= DISPPLANE_SEL_PIPE_B;
5446         }
5447
5448         intel_set_pipe_timings(intel_crtc);
5449
5450         /* pipesrc and dspsize control the size that is scaled from,
5451          * which should always be the user's requested size.
5452          */
5453         I915_WRITE(DSPSIZE(plane),
5454                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
5455                    (intel_crtc->config.pipe_src_w - 1));
5456         I915_WRITE(DSPPOS(plane), 0);
5457
5458         i9xx_set_pipeconf(intel_crtc);
5459
5460         I915_WRITE(DSPCNTR(plane), dspcntr);
5461         POSTING_READ(DSPCNTR(plane));
5462
5463         ret = intel_pipe_set_base(crtc, x, y, fb);
5464
5465         return ret;
5466 }
5467
5468 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5469                                  struct intel_crtc_config *pipe_config)
5470 {
5471         struct drm_device *dev = crtc->base.dev;
5472         struct drm_i915_private *dev_priv = dev->dev_private;
5473         uint32_t tmp;
5474
5475         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
5476                 return;
5477
5478         tmp = I915_READ(PFIT_CONTROL);
5479         if (!(tmp & PFIT_ENABLE))
5480                 return;
5481
5482         /* Check whether the pfit is attached to our pipe. */
5483         if (INTEL_INFO(dev)->gen < 4) {
5484                 if (crtc->pipe != PIPE_B)
5485                         return;
5486         } else {
5487                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5488                         return;
5489         }
5490
5491         pipe_config->gmch_pfit.control = tmp;
5492         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5493         if (INTEL_INFO(dev)->gen < 5)
5494                 pipe_config->gmch_pfit.lvds_border_bits =
5495                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5496 }
5497
5498 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5499                                struct intel_crtc_config *pipe_config)
5500 {
5501         struct drm_device *dev = crtc->base.dev;
5502         struct drm_i915_private *dev_priv = dev->dev_private;
5503         int pipe = pipe_config->cpu_transcoder;
5504         intel_clock_t clock;
5505         u32 mdiv;
5506         int refclk = 100000;
5507
5508         mutex_lock(&dev_priv->dpio_lock);
5509         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5510         mutex_unlock(&dev_priv->dpio_lock);
5511
5512         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5513         clock.m2 = mdiv & DPIO_M2DIV_MASK;
5514         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5515         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5516         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5517
5518         vlv_clock(refclk, &clock);
5519
5520         /* clock.dot is the fast clock */
5521         pipe_config->port_clock = clock.dot / 5;
5522 }
5523
5524 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5525                                  struct intel_crtc_config *pipe_config)
5526 {
5527         struct drm_device *dev = crtc->base.dev;
5528         struct drm_i915_private *dev_priv = dev->dev_private;
5529         uint32_t tmp;
5530
5531         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5532         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5533
5534         tmp = I915_READ(PIPECONF(crtc->pipe));
5535         if (!(tmp & PIPECONF_ENABLE))
5536                 return false;
5537
5538         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5539                 switch (tmp & PIPECONF_BPC_MASK) {
5540                 case PIPECONF_6BPC:
5541                         pipe_config->pipe_bpp = 18;
5542                         break;
5543                 case PIPECONF_8BPC:
5544                         pipe_config->pipe_bpp = 24;
5545                         break;
5546                 case PIPECONF_10BPC:
5547                         pipe_config->pipe_bpp = 30;
5548                         break;
5549                 default:
5550                         break;
5551                 }
5552         }
5553
5554         if (INTEL_INFO(dev)->gen < 4)
5555                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5556
5557         intel_get_pipe_timings(crtc, pipe_config);
5558
5559         i9xx_get_pfit_config(crtc, pipe_config);
5560
5561         if (INTEL_INFO(dev)->gen >= 4) {
5562                 tmp = I915_READ(DPLL_MD(crtc->pipe));
5563                 pipe_config->pixel_multiplier =
5564                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5565                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5566                 pipe_config->dpll_hw_state.dpll_md = tmp;
5567         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5568                 tmp = I915_READ(DPLL(crtc->pipe));
5569                 pipe_config->pixel_multiplier =
5570                         ((tmp & SDVO_MULTIPLIER_MASK)
5571                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5572         } else {
5573                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5574                  * port and will be fixed up in the encoder->get_config
5575                  * function. */
5576                 pipe_config->pixel_multiplier = 1;
5577         }
5578         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5579         if (!IS_VALLEYVIEW(dev)) {
5580                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5581                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5582         } else {
5583                 /* Mask out read-only status bits. */
5584                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5585                                                      DPLL_PORTC_READY_MASK |
5586                                                      DPLL_PORTB_READY_MASK);
5587         }
5588
5589         if (IS_VALLEYVIEW(dev))
5590                 vlv_crtc_clock_get(crtc, pipe_config);
5591         else
5592                 i9xx_crtc_clock_get(crtc, pipe_config);
5593
5594         return true;
5595 }
5596
5597 static void ironlake_init_pch_refclk(struct drm_device *dev)
5598 {
5599         struct drm_i915_private *dev_priv = dev->dev_private;
5600         struct drm_mode_config *mode_config = &dev->mode_config;
5601         struct intel_encoder *encoder;
5602         u32 val, final;
5603         bool has_lvds = false;
5604         bool has_cpu_edp = false;
5605         bool has_panel = false;
5606         bool has_ck505 = false;
5607         bool can_ssc = false;
5608
5609         /* We need to take the global config into account */
5610         list_for_each_entry(encoder, &mode_config->encoder_list,
5611                             base.head) {
5612                 switch (encoder->type) {
5613                 case INTEL_OUTPUT_LVDS:
5614                         has_panel = true;
5615                         has_lvds = true;
5616                         break;
5617                 case INTEL_OUTPUT_EDP:
5618                         has_panel = true;
5619                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
5620                                 has_cpu_edp = true;
5621                         break;
5622                 }
5623         }
5624
5625         if (HAS_PCH_IBX(dev)) {
5626                 has_ck505 = dev_priv->vbt.display_clock_mode;
5627                 can_ssc = has_ck505;
5628         } else {
5629                 has_ck505 = false;
5630                 can_ssc = true;
5631         }
5632
5633         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5634                       has_panel, has_lvds, has_ck505);
5635
5636         /* Ironlake: try to setup display ref clock before DPLL
5637          * enabling. This is only under driver's control after
5638          * PCH B stepping, previous chipset stepping should be
5639          * ignoring this setting.
5640          */
5641         val = I915_READ(PCH_DREF_CONTROL);
5642
5643         /* As we must carefully and slowly disable/enable each source in turn,
5644          * compute the final state we want first and check if we need to
5645          * make any changes at all.
5646          */
5647         final = val;
5648         final &= ~DREF_NONSPREAD_SOURCE_MASK;
5649         if (has_ck505)
5650                 final |= DREF_NONSPREAD_CK505_ENABLE;
5651         else
5652                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5653
5654         final &= ~DREF_SSC_SOURCE_MASK;
5655         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5656         final &= ~DREF_SSC1_ENABLE;
5657
5658         if (has_panel) {
5659                 final |= DREF_SSC_SOURCE_ENABLE;
5660
5661                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5662                         final |= DREF_SSC1_ENABLE;
5663
5664                 if (has_cpu_edp) {
5665                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
5666                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5667                         else
5668                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5669                 } else
5670                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5671         } else {
5672                 final |= DREF_SSC_SOURCE_DISABLE;
5673                 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5674         }
5675
5676         if (final == val)
5677                 return;
5678
5679         /* Always enable nonspread source */
5680         val &= ~DREF_NONSPREAD_SOURCE_MASK;
5681
5682         if (has_ck505)
5683                 val |= DREF_NONSPREAD_CK505_ENABLE;
5684         else
5685                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
5686
5687         if (has_panel) {
5688                 val &= ~DREF_SSC_SOURCE_MASK;
5689                 val |= DREF_SSC_SOURCE_ENABLE;
5690
5691                 /* SSC must be turned on before enabling the CPU output  */
5692                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5693                         DRM_DEBUG_KMS("Using SSC on panel\n");
5694                         val |= DREF_SSC1_ENABLE;
5695                 } else
5696                         val &= ~DREF_SSC1_ENABLE;
5697
5698                 /* Get SSC going before enabling the outputs */
5699                 I915_WRITE(PCH_DREF_CONTROL, val);
5700                 POSTING_READ(PCH_DREF_CONTROL);
5701                 udelay(200);
5702
5703                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5704
5705                 /* Enable CPU source on CPU attached eDP */
5706                 if (has_cpu_edp) {
5707                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5708                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5709                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5710                         }
5711                         else
5712                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5713                 } else
5714                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5715
5716                 I915_WRITE(PCH_DREF_CONTROL, val);
5717                 POSTING_READ(PCH_DREF_CONTROL);
5718                 udelay(200);
5719         } else {
5720                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5721
5722                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5723
5724                 /* Turn off CPU output */
5725                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5726
5727                 I915_WRITE(PCH_DREF_CONTROL, val);
5728                 POSTING_READ(PCH_DREF_CONTROL);
5729                 udelay(200);
5730
5731                 /* Turn off the SSC source */
5732                 val &= ~DREF_SSC_SOURCE_MASK;
5733                 val |= DREF_SSC_SOURCE_DISABLE;
5734
5735                 /* Turn off SSC1 */
5736                 val &= ~DREF_SSC1_ENABLE;
5737
5738                 I915_WRITE(PCH_DREF_CONTROL, val);
5739                 POSTING_READ(PCH_DREF_CONTROL);
5740                 udelay(200);
5741         }
5742
5743         BUG_ON(val != final);
5744 }
5745
5746 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5747 {
5748         uint32_t tmp;
5749
5750         tmp = I915_READ(SOUTH_CHICKEN2);
5751         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5752         I915_WRITE(SOUTH_CHICKEN2, tmp);
5753
5754         if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5755                                FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5756                 DRM_ERROR("FDI mPHY reset assert timeout\n");
5757
5758         tmp = I915_READ(SOUTH_CHICKEN2);
5759         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5760         I915_WRITE(SOUTH_CHICKEN2, tmp);
5761
5762         if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5763                                 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5764                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5765 }
5766
5767 /* WaMPhyProgramming:hsw */
5768 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5769 {
5770         uint32_t tmp;
5771
5772         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5773         tmp &= ~(0xFF << 24);
5774         tmp |= (0x12 << 24);
5775         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5776
5777         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5778         tmp |= (1 << 11);
5779         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5780
5781         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5782         tmp |= (1 << 11);
5783         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5784
5785         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5786         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5787         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5788
5789         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5790         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5791         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5792
5793         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5794         tmp &= ~(7 << 13);
5795         tmp |= (5 << 13);
5796         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5797
5798         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5799         tmp &= ~(7 << 13);
5800         tmp |= (5 << 13);
5801         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5802
5803         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5804         tmp &= ~0xFF;
5805         tmp |= 0x1C;
5806         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5807
5808         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5809         tmp &= ~0xFF;
5810         tmp |= 0x1C;
5811         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5812
5813         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5814         tmp &= ~(0xFF << 16);
5815         tmp |= (0x1C << 16);
5816         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5817
5818         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5819         tmp &= ~(0xFF << 16);
5820         tmp |= (0x1C << 16);
5821         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5822
5823         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5824         tmp |= (1 << 27);
5825         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5826
5827         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5828         tmp |= (1 << 27);
5829         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5830
5831         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5832         tmp &= ~(0xF << 28);
5833         tmp |= (4 << 28);
5834         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5835
5836         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5837         tmp &= ~(0xF << 28);
5838         tmp |= (4 << 28);
5839         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5840 }
5841
5842 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5843  * Programming" based on the parameters passed:
5844  * - Sequence to enable CLKOUT_DP
5845  * - Sequence to enable CLKOUT_DP without spread
5846  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5847  */
5848 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5849                                  bool with_fdi)
5850 {
5851         struct drm_i915_private *dev_priv = dev->dev_private;
5852         uint32_t reg, tmp;
5853
5854         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5855                 with_spread = true;
5856         if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5857                  with_fdi, "LP PCH doesn't have FDI\n"))
5858                 with_fdi = false;
5859
5860         mutex_lock(&dev_priv->dpio_lock);
5861
5862         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5863         tmp &= ~SBI_SSCCTL_DISABLE;
5864         tmp |= SBI_SSCCTL_PATHALT;
5865         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5866
5867         udelay(24);
5868
5869         if (with_spread) {
5870                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5871                 tmp &= ~SBI_SSCCTL_PATHALT;
5872                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5873
5874                 if (with_fdi) {
5875                         lpt_reset_fdi_mphy(dev_priv);
5876                         lpt_program_fdi_mphy(dev_priv);
5877                 }
5878         }
5879
5880         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5881                SBI_GEN0 : SBI_DBUFF0;
5882         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5883         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5884         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5885
5886         mutex_unlock(&dev_priv->dpio_lock);
5887 }
5888
5889 /* Sequence to disable CLKOUT_DP */
5890 static void lpt_disable_clkout_dp(struct drm_device *dev)
5891 {
5892         struct drm_i915_private *dev_priv = dev->dev_private;
5893         uint32_t reg, tmp;
5894
5895         mutex_lock(&dev_priv->dpio_lock);
5896
5897         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5898                SBI_GEN0 : SBI_DBUFF0;
5899         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5900         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5901         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5902
5903         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5904         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5905                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5906                         tmp |= SBI_SSCCTL_PATHALT;
5907                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5908                         udelay(32);
5909                 }
5910                 tmp |= SBI_SSCCTL_DISABLE;
5911                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5912         }
5913
5914         mutex_unlock(&dev_priv->dpio_lock);
5915 }
5916
5917 static void lpt_init_pch_refclk(struct drm_device *dev)
5918 {
5919         struct drm_mode_config *mode_config = &dev->mode_config;
5920         struct intel_encoder *encoder;
5921         bool has_vga = false;
5922
5923         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5924                 switch (encoder->type) {
5925                 case INTEL_OUTPUT_ANALOG:
5926                         has_vga = true;
5927                         break;
5928                 }
5929         }
5930
5931         if (has_vga)
5932                 lpt_enable_clkout_dp(dev, true, true);
5933         else
5934                 lpt_disable_clkout_dp(dev);
5935 }
5936
5937 /*
5938  * Initialize reference clocks when the driver loads
5939  */
5940 void intel_init_pch_refclk(struct drm_device *dev)
5941 {
5942         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943                 ironlake_init_pch_refclk(dev);
5944         else if (HAS_PCH_LPT(dev))
5945                 lpt_init_pch_refclk(dev);
5946 }
5947
5948 static int ironlake_get_refclk(struct drm_crtc *crtc)
5949 {
5950         struct drm_device *dev = crtc->dev;
5951         struct drm_i915_private *dev_priv = dev->dev_private;
5952         struct intel_encoder *encoder;
5953         int num_connectors = 0;
5954         bool is_lvds = false;
5955
5956         for_each_encoder_on_crtc(dev, crtc, encoder) {
5957                 switch (encoder->type) {
5958                 case INTEL_OUTPUT_LVDS:
5959                         is_lvds = true;
5960                         break;
5961                 }
5962                 num_connectors++;
5963         }
5964
5965         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5966                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
5967                               dev_priv->vbt.lvds_ssc_freq);
5968                 return dev_priv->vbt.lvds_ssc_freq;
5969         }
5970
5971         return 120000;
5972 }
5973
5974 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
5975 {
5976         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5977         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5978         int pipe = intel_crtc->pipe;
5979         uint32_t val;
5980
5981         val = 0;
5982
5983         switch (intel_crtc->config.pipe_bpp) {
5984         case 18:
5985                 val |= PIPECONF_6BPC;
5986                 break;
5987         case 24:
5988                 val |= PIPECONF_8BPC;
5989                 break;
5990         case 30:
5991                 val |= PIPECONF_10BPC;
5992                 break;
5993         case 36:
5994                 val |= PIPECONF_12BPC;
5995                 break;
5996         default:
5997                 /* Case prevented by intel_choose_pipe_bpp_dither. */
5998                 BUG();
5999         }
6000
6001         if (intel_crtc->config.dither)
6002                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6003
6004         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6005                 val |= PIPECONF_INTERLACED_ILK;
6006         else
6007                 val |= PIPECONF_PROGRESSIVE;
6008
6009         if (intel_crtc->config.limited_color_range)
6010                 val |= PIPECONF_COLOR_RANGE_SELECT;
6011
6012         I915_WRITE(PIPECONF(pipe), val);
6013         POSTING_READ(PIPECONF(pipe));
6014 }
6015
6016 /*
6017  * Set up the pipe CSC unit.
6018  *
6019  * Currently only full range RGB to limited range RGB conversion
6020  * is supported, but eventually this should handle various
6021  * RGB<->YCbCr scenarios as well.
6022  */
6023 static void intel_set_pipe_csc(struct drm_crtc *crtc)
6024 {
6025         struct drm_device *dev = crtc->dev;
6026         struct drm_i915_private *dev_priv = dev->dev_private;
6027         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6028         int pipe = intel_crtc->pipe;
6029         uint16_t coeff = 0x7800; /* 1.0 */
6030
6031         /*
6032          * TODO: Check what kind of values actually come out of the pipe
6033          * with these coeff/postoff values and adjust to get the best
6034          * accuracy. Perhaps we even need to take the bpc value into
6035          * consideration.
6036          */
6037
6038         if (intel_crtc->config.limited_color_range)
6039                 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6040
6041         /*
6042          * GY/GU and RY/RU should be the other way around according
6043          * to BSpec, but reality doesn't agree. Just set them up in
6044          * a way that results in the correct picture.
6045          */
6046         I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6047         I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6048
6049         I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6050         I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6051
6052         I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6053         I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6054
6055         I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6056         I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6057         I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6058
6059         if (INTEL_INFO(dev)->gen > 6) {
6060                 uint16_t postoff = 0;
6061
6062                 if (intel_crtc->config.limited_color_range)
6063                         postoff = (16 * (1 << 12) / 255) & 0x1fff;
6064
6065                 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6066                 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6067                 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6068
6069                 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6070         } else {
6071                 uint32_t mode = CSC_MODE_YUV_TO_RGB;
6072
6073                 if (intel_crtc->config.limited_color_range)
6074                         mode |= CSC_BLACK_SCREEN_OFFSET;
6075
6076                 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6077         }
6078 }
6079
6080 static void haswell_set_pipeconf(struct drm_crtc *crtc)
6081 {
6082         struct drm_device *dev = crtc->dev;
6083         struct drm_i915_private *dev_priv = dev->dev_private;
6084         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6085         enum pipe pipe = intel_crtc->pipe;
6086         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6087         uint32_t val;
6088
6089         val = 0;
6090
6091         if (IS_HASWELL(dev) && intel_crtc->config.dither)
6092                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6093
6094         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6095                 val |= PIPECONF_INTERLACED_ILK;
6096         else
6097                 val |= PIPECONF_PROGRESSIVE;
6098
6099         I915_WRITE(PIPECONF(cpu_transcoder), val);
6100         POSTING_READ(PIPECONF(cpu_transcoder));
6101
6102         I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6103         POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6104
6105         if (IS_BROADWELL(dev)) {
6106                 val = 0;
6107
6108                 switch (intel_crtc->config.pipe_bpp) {
6109                 case 18:
6110                         val |= PIPEMISC_DITHER_6_BPC;
6111                         break;
6112                 case 24:
6113                         val |= PIPEMISC_DITHER_8_BPC;
6114                         break;
6115                 case 30:
6116                         val |= PIPEMISC_DITHER_10_BPC;
6117                         break;
6118                 case 36:
6119                         val |= PIPEMISC_DITHER_12_BPC;
6120                         break;
6121                 default:
6122                         /* Case prevented by pipe_config_set_bpp. */
6123                         BUG();
6124                 }
6125
6126                 if (intel_crtc->config.dither)
6127                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6128
6129                 I915_WRITE(PIPEMISC(pipe), val);
6130         }
6131 }
6132
6133 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6134                                     intel_clock_t *clock,
6135                                     bool *has_reduced_clock,
6136                                     intel_clock_t *reduced_clock)
6137 {
6138         struct drm_device *dev = crtc->dev;
6139         struct drm_i915_private *dev_priv = dev->dev_private;
6140         struct intel_encoder *intel_encoder;
6141         int refclk;
6142         const intel_limit_t *limit;
6143         bool ret, is_lvds = false;
6144
6145         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6146                 switch (intel_encoder->type) {
6147                 case INTEL_OUTPUT_LVDS:
6148                         is_lvds = true;
6149                         break;
6150                 }
6151         }
6152
6153         refclk = ironlake_get_refclk(crtc);
6154
6155         /*
6156          * Returns a set of divisors for the desired target clock with the given
6157          * refclk, or FALSE.  The returned values represent the clock equation:
6158          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6159          */
6160         limit = intel_limit(crtc, refclk);
6161         ret = dev_priv->display.find_dpll(limit, crtc,
6162                                           to_intel_crtc(crtc)->config.port_clock,
6163                                           refclk, NULL, clock);
6164         if (!ret)
6165                 return false;
6166
6167         if (is_lvds && dev_priv->lvds_downclock_avail) {
6168                 /*
6169                  * Ensure we match the reduced clock's P to the target clock.
6170                  * If the clocks don't match, we can't switch the display clock
6171                  * by using the FP0/FP1. In such case we will disable the LVDS
6172                  * downclock feature.
6173                 */
6174                 *has_reduced_clock =
6175                         dev_priv->display.find_dpll(limit, crtc,
6176                                                     dev_priv->lvds_downclock,
6177                                                     refclk, clock,
6178                                                     reduced_clock);
6179         }
6180
6181         return true;
6182 }
6183
6184 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6185 {
6186         /*
6187          * Account for spread spectrum to avoid
6188          * oversubscribing the link. Max center spread
6189          * is 2.5%; use 5% for safety's sake.
6190          */
6191         u32 bps = target_clock * bpp * 21 / 20;
6192         return bps / (link_bw * 8) + 1;
6193 }
6194
6195 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6196 {
6197         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6198 }
6199
6200 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6201                                       u32 *fp,
6202                                       intel_clock_t *reduced_clock, u32 *fp2)
6203 {
6204         struct drm_crtc *crtc = &intel_crtc->base;
6205         struct drm_device *dev = crtc->dev;
6206         struct drm_i915_private *dev_priv = dev->dev_private;
6207         struct intel_encoder *intel_encoder;
6208         uint32_t dpll;
6209         int factor, num_connectors = 0;
6210         bool is_lvds = false, is_sdvo = false;
6211
6212         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6213                 switch (intel_encoder->type) {
6214                 case INTEL_OUTPUT_LVDS:
6215                         is_lvds = true;
6216                         break;
6217                 case INTEL_OUTPUT_SDVO:
6218                 case INTEL_OUTPUT_HDMI:
6219                         is_sdvo = true;
6220                         break;
6221                 }
6222
6223                 num_connectors++;
6224         }
6225
6226         /* Enable autotuning of the PLL clock (if permissible) */
6227         factor = 21;
6228         if (is_lvds) {
6229                 if ((intel_panel_use_ssc(dev_priv) &&
6230                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
6231                     (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6232                         factor = 25;
6233         } else if (intel_crtc->config.sdvo_tv_clock)
6234                 factor = 20;
6235
6236         if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6237                 *fp |= FP_CB_TUNE;
6238
6239         if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6240                 *fp2 |= FP_CB_TUNE;
6241
6242         dpll = 0;
6243
6244         if (is_lvds)
6245                 dpll |= DPLLB_MODE_LVDS;
6246         else
6247                 dpll |= DPLLB_MODE_DAC_SERIAL;
6248
6249         dpll |= (intel_crtc->config.pixel_multiplier - 1)
6250                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6251
6252         if (is_sdvo)
6253                 dpll |= DPLL_SDVO_HIGH_SPEED;
6254         if (intel_crtc->config.has_dp_encoder)
6255                 dpll |= DPLL_SDVO_HIGH_SPEED;
6256
6257         /* compute bitmask from p1 value */
6258         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6259         /* also FPA1 */
6260         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6261
6262         switch (intel_crtc->config.dpll.p2) {
6263         case 5:
6264                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6265                 break;
6266         case 7:
6267                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6268                 break;
6269         case 10:
6270                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6271                 break;
6272         case 14:
6273                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6274                 break;
6275         }
6276
6277         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6278                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6279         else
6280                 dpll |= PLL_REF_INPUT_DREFCLK;
6281
6282         return dpll | DPLL_VCO_ENABLE;
6283 }
6284
6285 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6286                                   int x, int y,
6287                                   struct drm_framebuffer *fb)
6288 {
6289         struct drm_device *dev = crtc->dev;
6290         struct drm_i915_private *dev_priv = dev->dev_private;
6291         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6292         int pipe = intel_crtc->pipe;
6293         int plane = intel_crtc->plane;
6294         int num_connectors = 0;
6295         intel_clock_t clock, reduced_clock;
6296         u32 dpll = 0, fp = 0, fp2 = 0;
6297         bool ok, has_reduced_clock = false;
6298         bool is_lvds = false;
6299         struct intel_encoder *encoder;
6300         struct intel_shared_dpll *pll;
6301         int ret;
6302
6303         for_each_encoder_on_crtc(dev, crtc, encoder) {
6304                 switch (encoder->type) {
6305                 case INTEL_OUTPUT_LVDS:
6306                         is_lvds = true;
6307                         break;
6308                 }
6309
6310                 num_connectors++;
6311         }
6312
6313         WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
6314              "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
6315
6316         ok = ironlake_compute_clocks(crtc, &clock,
6317                                      &has_reduced_clock, &reduced_clock);
6318         if (!ok && !intel_crtc->config.clock_set) {
6319                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
6320                 return -EINVAL;
6321         }
6322         /* Compat-code for transition, will disappear. */
6323         if (!intel_crtc->config.clock_set) {
6324                 intel_crtc->config.dpll.n = clock.n;
6325                 intel_crtc->config.dpll.m1 = clock.m1;
6326                 intel_crtc->config.dpll.m2 = clock.m2;
6327                 intel_crtc->config.dpll.p1 = clock.p1;
6328                 intel_crtc->config.dpll.p2 = clock.p2;
6329         }
6330
6331         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6332         if (intel_crtc->config.has_pch_encoder) {
6333                 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
6334                 if (has_reduced_clock)
6335                         fp2 = i9xx_dpll_compute_fp(&reduced_clock);
6336
6337                 dpll = ironlake_compute_dpll(intel_crtc,
6338                                              &fp, &reduced_clock,
6339                                              has_reduced_clock ? &fp2 : NULL);
6340
6341                 intel_crtc->config.dpll_hw_state.dpll = dpll;
6342                 intel_crtc->config.dpll_hw_state.fp0 = fp;
6343                 if (has_reduced_clock)
6344                         intel_crtc->config.dpll_hw_state.fp1 = fp2;
6345                 else
6346                         intel_crtc->config.dpll_hw_state.fp1 = fp;
6347
6348                 pll = intel_get_shared_dpll(intel_crtc);
6349                 if (pll == NULL) {
6350                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6351                                          pipe_name(pipe));
6352                         return -EINVAL;
6353                 }
6354         } else
6355                 intel_put_shared_dpll(intel_crtc);
6356
6357         if (intel_crtc->config.has_dp_encoder)
6358                 intel_dp_set_m_n(intel_crtc);
6359
6360         if (is_lvds && has_reduced_clock && i915_powersave)
6361                 intel_crtc->lowfreq_avail = true;
6362         else
6363                 intel_crtc->lowfreq_avail = false;
6364
6365         intel_set_pipe_timings(intel_crtc);
6366
6367         if (intel_crtc->config.has_pch_encoder) {
6368                 intel_cpu_transcoder_set_m_n(intel_crtc,
6369                                              &intel_crtc->config.fdi_m_n);
6370         }
6371
6372         ironlake_set_pipeconf(crtc);
6373
6374         /* Set up the display plane register */
6375         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
6376         POSTING_READ(DSPCNTR(plane));
6377
6378         ret = intel_pipe_set_base(crtc, x, y, fb);
6379
6380         return ret;
6381 }
6382
6383 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6384                                          struct intel_link_m_n *m_n)
6385 {
6386         struct drm_device *dev = crtc->base.dev;
6387         struct drm_i915_private *dev_priv = dev->dev_private;
6388         enum pipe pipe = crtc->pipe;
6389
6390         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6391         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6392         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6393                 & ~TU_SIZE_MASK;
6394         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6395         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6396                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6397 }
6398
6399 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6400                                          enum transcoder transcoder,
6401                                          struct intel_link_m_n *m_n)
6402 {
6403         struct drm_device *dev = crtc->base.dev;
6404         struct drm_i915_private *dev_priv = dev->dev_private;
6405         enum pipe pipe = crtc->pipe;
6406
6407         if (INTEL_INFO(dev)->gen >= 5) {
6408                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6409                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6410                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6411                         & ~TU_SIZE_MASK;
6412                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6413                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6414                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6415         } else {
6416                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6417                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6418                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6419                         & ~TU_SIZE_MASK;
6420                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6421                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6422                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6423         }
6424 }
6425
6426 void intel_dp_get_m_n(struct intel_crtc *crtc,
6427                       struct intel_crtc_config *pipe_config)
6428 {
6429         if (crtc->config.has_pch_encoder)
6430                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6431         else
6432                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6433                                              &pipe_config->dp_m_n);
6434 }
6435
6436 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6437                                         struct intel_crtc_config *pipe_config)
6438 {
6439         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6440                                      &pipe_config->fdi_m_n);
6441 }
6442
6443 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6444                                      struct intel_crtc_config *pipe_config)
6445 {
6446         struct drm_device *dev = crtc->base.dev;
6447         struct drm_i915_private *dev_priv = dev->dev_private;
6448         uint32_t tmp;
6449
6450         tmp = I915_READ(PF_CTL(crtc->pipe));
6451
6452         if (tmp & PF_ENABLE) {
6453                 pipe_config->pch_pfit.enabled = true;
6454                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
6455                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
6456
6457                 /* We currently do not free assignements of panel fitters on
6458                  * ivb/hsw (since we don't use the higher upscaling modes which
6459                  * differentiates them) so just WARN about this case for now. */
6460                 if (IS_GEN7(dev)) {
6461                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
6462                                 PF_PIPE_SEL_IVB(crtc->pipe));
6463                 }
6464         }
6465 }
6466
6467 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6468                                      struct intel_crtc_config *pipe_config)
6469 {
6470         struct drm_device *dev = crtc->base.dev;
6471         struct drm_i915_private *dev_priv = dev->dev_private;
6472         uint32_t tmp;
6473
6474         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6475         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6476
6477         tmp = I915_READ(PIPECONF(crtc->pipe));
6478         if (!(tmp & PIPECONF_ENABLE))
6479                 return false;
6480
6481         switch (tmp & PIPECONF_BPC_MASK) {
6482         case PIPECONF_6BPC:
6483                 pipe_config->pipe_bpp = 18;
6484                 break;
6485         case PIPECONF_8BPC:
6486                 pipe_config->pipe_bpp = 24;
6487                 break;
6488         case PIPECONF_10BPC:
6489                 pipe_config->pipe_bpp = 30;
6490                 break;
6491         case PIPECONF_12BPC:
6492                 pipe_config->pipe_bpp = 36;
6493                 break;
6494         default:
6495                 break;
6496         }
6497
6498         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6499                 struct intel_shared_dpll *pll;
6500
6501                 pipe_config->has_pch_encoder = true;
6502
6503                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
6504                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6505                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
6506
6507                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6508
6509                 if (HAS_PCH_IBX(dev_priv->dev)) {
6510                         pipe_config->shared_dpll =
6511                                 (enum intel_dpll_id) crtc->pipe;
6512                 } else {
6513                         tmp = I915_READ(PCH_DPLL_SEL);
6514                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6515                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
6516                         else
6517                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
6518                 }
6519
6520                 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
6521
6522                 WARN_ON(!pll->get_hw_state(dev_priv, pll,
6523                                            &pipe_config->dpll_hw_state));
6524
6525                 tmp = pipe_config->dpll_hw_state.dpll;
6526                 pipe_config->pixel_multiplier =
6527                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6528                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6529
6530                 ironlake_pch_clock_get(crtc, pipe_config);
6531         } else {
6532                 pipe_config->pixel_multiplier = 1;
6533         }
6534
6535         intel_get_pipe_timings(crtc, pipe_config);
6536
6537         ironlake_get_pfit_config(crtc, pipe_config);
6538
6539         return true;
6540 }
6541
6542 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6543 {
6544         struct drm_device *dev = dev_priv->dev;
6545         struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6546         struct intel_crtc *crtc;
6547         unsigned long irqflags;
6548         uint32_t val;
6549
6550         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6551                 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6552                      pipe_name(crtc->pipe));
6553
6554         WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6555         WARN(plls->spll_refcount, "SPLL enabled\n");
6556         WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6557         WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6558         WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6559         WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6560              "CPU PWM1 enabled\n");
6561         WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6562              "CPU PWM2 enabled\n");
6563         WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6564              "PCH PWM1 enabled\n");
6565         WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6566              "Utility pin enabled\n");
6567         WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6568
6569         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6570         val = I915_READ(DEIMR);
6571         WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
6572              "Unexpected DEIMR bits enabled: 0x%x\n", val);
6573         val = I915_READ(SDEIMR);
6574         WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6575              "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6576         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6577 }
6578
6579 /*
6580  * This function implements pieces of two sequences from BSpec:
6581  * - Sequence for display software to disable LCPLL
6582  * - Sequence for display software to allow package C8+
6583  * The steps implemented here are just the steps that actually touch the LCPLL
6584  * register. Callers should take care of disabling all the display engine
6585  * functions, doing the mode unset, fixing interrupts, etc.
6586  */
6587 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6588                               bool switch_to_fclk, bool allow_power_down)
6589 {
6590         uint32_t val;
6591
6592         assert_can_disable_lcpll(dev_priv);
6593
6594         val = I915_READ(LCPLL_CTL);
6595
6596         if (switch_to_fclk) {
6597                 val |= LCPLL_CD_SOURCE_FCLK;
6598                 I915_WRITE(LCPLL_CTL, val);
6599
6600                 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6601                                        LCPLL_CD_SOURCE_FCLK_DONE, 1))
6602                         DRM_ERROR("Switching to FCLK failed\n");
6603
6604                 val = I915_READ(LCPLL_CTL);
6605         }
6606
6607         val |= LCPLL_PLL_DISABLE;
6608         I915_WRITE(LCPLL_CTL, val);
6609         POSTING_READ(LCPLL_CTL);
6610
6611         if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6612                 DRM_ERROR("LCPLL still locked\n");
6613
6614         val = I915_READ(D_COMP);
6615         val |= D_COMP_COMP_DISABLE;
6616         mutex_lock(&dev_priv->rps.hw_lock);
6617         if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6618                 DRM_ERROR("Failed to disable D_COMP\n");
6619         mutex_unlock(&dev_priv->rps.hw_lock);
6620         POSTING_READ(D_COMP);
6621         ndelay(100);
6622
6623         if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6624                 DRM_ERROR("D_COMP RCOMP still in progress\n");
6625
6626         if (allow_power_down) {
6627                 val = I915_READ(LCPLL_CTL);
6628                 val |= LCPLL_POWER_DOWN_ALLOW;
6629                 I915_WRITE(LCPLL_CTL, val);
6630                 POSTING_READ(LCPLL_CTL);
6631         }
6632 }
6633
6634 /*
6635  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6636  * source.
6637  */
6638 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6639 {
6640         uint32_t val;
6641
6642         val = I915_READ(LCPLL_CTL);
6643
6644         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6645                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6646                 return;
6647
6648         /* Make sure we're not on PC8 state before disabling PC8, otherwise
6649          * we'll hang the machine! */
6650         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
6651
6652         if (val & LCPLL_POWER_DOWN_ALLOW) {
6653                 val &= ~LCPLL_POWER_DOWN_ALLOW;
6654                 I915_WRITE(LCPLL_CTL, val);
6655                 POSTING_READ(LCPLL_CTL);
6656         }
6657
6658         val = I915_READ(D_COMP);
6659         val |= D_COMP_COMP_FORCE;
6660         val &= ~D_COMP_COMP_DISABLE;
6661         mutex_lock(&dev_priv->rps.hw_lock);
6662         if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6663                 DRM_ERROR("Failed to enable D_COMP\n");
6664         mutex_unlock(&dev_priv->rps.hw_lock);
6665         POSTING_READ(D_COMP);
6666
6667         val = I915_READ(LCPLL_CTL);
6668         val &= ~LCPLL_PLL_DISABLE;
6669         I915_WRITE(LCPLL_CTL, val);
6670
6671         if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6672                 DRM_ERROR("LCPLL not locked yet\n");
6673
6674         if (val & LCPLL_CD_SOURCE_FCLK) {
6675                 val = I915_READ(LCPLL_CTL);
6676                 val &= ~LCPLL_CD_SOURCE_FCLK;
6677                 I915_WRITE(LCPLL_CTL, val);
6678
6679                 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6680                                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6681                         DRM_ERROR("Switching back to LCPLL failed\n");
6682         }
6683
6684         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
6685 }
6686
6687 void hsw_enable_pc8_work(struct work_struct *__work)
6688 {
6689         struct drm_i915_private *dev_priv =
6690                 container_of(to_delayed_work(__work), struct drm_i915_private,
6691                              pc8.enable_work);
6692         struct drm_device *dev = dev_priv->dev;
6693         uint32_t val;
6694
6695         WARN_ON(!HAS_PC8(dev));
6696
6697         if (dev_priv->pc8.enabled)
6698                 return;
6699
6700         DRM_DEBUG_KMS("Enabling package C8+\n");
6701
6702         dev_priv->pc8.enabled = true;
6703
6704         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6705                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6706                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6707                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6708         }
6709
6710         lpt_disable_clkout_dp(dev);
6711         hsw_pc8_disable_interrupts(dev);
6712         hsw_disable_lcpll(dev_priv, true, true);
6713
6714         intel_runtime_pm_put(dev_priv);
6715 }
6716
6717 static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6718 {
6719         WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6720         WARN(dev_priv->pc8.disable_count < 1,
6721              "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6722
6723         dev_priv->pc8.disable_count--;
6724         if (dev_priv->pc8.disable_count != 0)
6725                 return;
6726
6727         schedule_delayed_work(&dev_priv->pc8.enable_work,
6728                               msecs_to_jiffies(i915_pc8_timeout));
6729 }
6730
6731 static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6732 {
6733         struct drm_device *dev = dev_priv->dev;
6734         uint32_t val;
6735
6736         WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6737         WARN(dev_priv->pc8.disable_count < 0,
6738              "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6739
6740         dev_priv->pc8.disable_count++;
6741         if (dev_priv->pc8.disable_count != 1)
6742                 return;
6743
6744         WARN_ON(!HAS_PC8(dev));
6745
6746         cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6747         if (!dev_priv->pc8.enabled)
6748                 return;
6749
6750         DRM_DEBUG_KMS("Disabling package C8+\n");
6751
6752         intel_runtime_pm_get(dev_priv);
6753
6754         hsw_restore_lcpll(dev_priv);
6755         hsw_pc8_restore_interrupts(dev);
6756         lpt_init_pch_refclk(dev);
6757
6758         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6759                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6760                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6761                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6762         }
6763
6764         intel_prepare_ddi(dev);
6765         i915_gem_init_swizzling(dev);
6766         mutex_lock(&dev_priv->rps.hw_lock);
6767         gen6_update_ring_freq(dev);
6768         mutex_unlock(&dev_priv->rps.hw_lock);
6769         dev_priv->pc8.enabled = false;
6770 }
6771
6772 void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6773 {
6774         if (!HAS_PC8(dev_priv->dev))
6775                 return;
6776
6777         mutex_lock(&dev_priv->pc8.lock);
6778         __hsw_enable_package_c8(dev_priv);
6779         mutex_unlock(&dev_priv->pc8.lock);
6780 }
6781
6782 void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6783 {
6784         if (!HAS_PC8(dev_priv->dev))
6785                 return;
6786
6787         mutex_lock(&dev_priv->pc8.lock);
6788         __hsw_disable_package_c8(dev_priv);
6789         mutex_unlock(&dev_priv->pc8.lock);
6790 }
6791
6792 static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6793 {
6794         struct drm_device *dev = dev_priv->dev;
6795         struct intel_crtc *crtc;
6796         uint32_t val;
6797
6798         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6799                 if (crtc->base.enabled)
6800                         return false;
6801
6802         /* This case is still possible since we have the i915.disable_power_well
6803          * parameter and also the KVMr or something else might be requesting the
6804          * power well. */
6805         val = I915_READ(HSW_PWR_WELL_DRIVER);
6806         if (val != 0) {
6807                 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6808                 return false;
6809         }
6810
6811         return true;
6812 }
6813
6814 /* Since we're called from modeset_global_resources there's no way to
6815  * symmetrically increase and decrease the refcount, so we use
6816  * dev_priv->pc8.requirements_met to track whether we already have the refcount
6817  * or not.
6818  */
6819 static void hsw_update_package_c8(struct drm_device *dev)
6820 {
6821         struct drm_i915_private *dev_priv = dev->dev_private;
6822         bool allow;
6823
6824         if (!HAS_PC8(dev_priv->dev))
6825                 return;
6826
6827         if (!i915_enable_pc8)
6828                 return;
6829
6830         mutex_lock(&dev_priv->pc8.lock);
6831
6832         allow = hsw_can_enable_package_c8(dev_priv);
6833
6834         if (allow == dev_priv->pc8.requirements_met)
6835                 goto done;
6836
6837         dev_priv->pc8.requirements_met = allow;
6838
6839         if (allow)
6840                 __hsw_enable_package_c8(dev_priv);
6841         else
6842                 __hsw_disable_package_c8(dev_priv);
6843
6844 done:
6845         mutex_unlock(&dev_priv->pc8.lock);
6846 }
6847
6848 static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6849 {
6850         if (!HAS_PC8(dev_priv->dev))
6851                 return;
6852
6853         mutex_lock(&dev_priv->pc8.lock);
6854         if (!dev_priv->pc8.gpu_idle) {
6855                 dev_priv->pc8.gpu_idle = true;
6856                 __hsw_enable_package_c8(dev_priv);
6857         }
6858         mutex_unlock(&dev_priv->pc8.lock);
6859 }
6860
6861 static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6862 {
6863         if (!HAS_PC8(dev_priv->dev))
6864                 return;
6865
6866         mutex_lock(&dev_priv->pc8.lock);
6867         if (dev_priv->pc8.gpu_idle) {
6868                 dev_priv->pc8.gpu_idle = false;
6869                 __hsw_disable_package_c8(dev_priv);
6870         }
6871         mutex_unlock(&dev_priv->pc8.lock);
6872 }
6873
6874 #define for_each_power_domain(domain, mask)                             \
6875         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
6876                 if ((1 << (domain)) & (mask))
6877
6878 static unsigned long get_pipe_power_domains(struct drm_device *dev,
6879                                             enum pipe pipe, bool pfit_enabled)
6880 {
6881         unsigned long mask;
6882         enum transcoder transcoder;
6883
6884         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6885
6886         mask = BIT(POWER_DOMAIN_PIPE(pipe));
6887         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6888         if (pfit_enabled)
6889                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6890
6891         return mask;
6892 }
6893
6894 void intel_display_set_init_power(struct drm_device *dev, bool enable)
6895 {
6896         struct drm_i915_private *dev_priv = dev->dev_private;
6897
6898         if (dev_priv->power_domains.init_power_on == enable)
6899                 return;
6900
6901         if (enable)
6902                 intel_display_power_get(dev, POWER_DOMAIN_INIT);
6903         else
6904                 intel_display_power_put(dev, POWER_DOMAIN_INIT);
6905
6906         dev_priv->power_domains.init_power_on = enable;
6907 }
6908
6909 static void modeset_update_power_wells(struct drm_device *dev)
6910 {
6911         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6912         struct intel_crtc *crtc;
6913
6914         /*
6915          * First get all needed power domains, then put all unneeded, to avoid
6916          * any unnecessary toggling of the power wells.
6917          */
6918         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6919                 enum intel_display_power_domain domain;
6920
6921                 if (!crtc->base.enabled)
6922                         continue;
6923
6924                 pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6925                                                 crtc->pipe,
6926                                                 crtc->config.pch_pfit.enabled);
6927
6928                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
6929                         intel_display_power_get(dev, domain);
6930         }
6931
6932         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6933                 enum intel_display_power_domain domain;
6934
6935                 for_each_power_domain(domain, crtc->enabled_power_domains)
6936                         intel_display_power_put(dev, domain);
6937
6938                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6939         }
6940
6941         intel_display_set_init_power(dev, false);
6942 }
6943
6944 static void haswell_modeset_global_resources(struct drm_device *dev)
6945 {
6946         modeset_update_power_wells(dev);
6947         hsw_update_package_c8(dev);
6948 }
6949
6950 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6951                                  int x, int y,
6952                                  struct drm_framebuffer *fb)
6953 {
6954         struct drm_device *dev = crtc->dev;
6955         struct drm_i915_private *dev_priv = dev->dev_private;
6956         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6957         int plane = intel_crtc->plane;
6958         int ret;
6959
6960         if (!intel_ddi_pll_select(intel_crtc))
6961                 return -EINVAL;
6962         intel_ddi_pll_enable(intel_crtc);
6963
6964         if (intel_crtc->config.has_dp_encoder)
6965                 intel_dp_set_m_n(intel_crtc);
6966
6967         intel_crtc->lowfreq_avail = false;
6968
6969         intel_set_pipe_timings(intel_crtc);
6970
6971         if (intel_crtc->config.has_pch_encoder) {
6972                 intel_cpu_transcoder_set_m_n(intel_crtc,
6973                                              &intel_crtc->config.fdi_m_n);
6974         }
6975
6976         haswell_set_pipeconf(crtc);
6977
6978         intel_set_pipe_csc(crtc);
6979
6980         /* Set up the display plane register */
6981         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
6982         POSTING_READ(DSPCNTR(plane));
6983
6984         ret = intel_pipe_set_base(crtc, x, y, fb);
6985
6986         return ret;
6987 }
6988
6989 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6990                                     struct intel_crtc_config *pipe_config)
6991 {
6992         struct drm_device *dev = crtc->base.dev;
6993         struct drm_i915_private *dev_priv = dev->dev_private;
6994         enum intel_display_power_domain pfit_domain;
6995         uint32_t tmp;
6996
6997         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6998         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6999
7000         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7001         if (tmp & TRANS_DDI_FUNC_ENABLE) {
7002                 enum pipe trans_edp_pipe;
7003                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7004                 default:
7005                         WARN(1, "unknown pipe linked to edp transcoder\n");
7006                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
7007                 case TRANS_DDI_EDP_INPUT_A_ON:
7008                         trans_edp_pipe = PIPE_A;
7009                         break;
7010                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
7011                         trans_edp_pipe = PIPE_B;
7012                         break;
7013                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
7014                         trans_edp_pipe = PIPE_C;
7015                         break;
7016                 }
7017
7018                 if (trans_edp_pipe == crtc->pipe)
7019                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
7020         }
7021
7022         if (!intel_display_power_enabled(dev,
7023                         POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7024                 return false;
7025
7026         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7027         if (!(tmp & PIPECONF_ENABLE))
7028                 return false;
7029
7030         /*
7031          * Haswell has only FDI/PCH transcoder A. It is which is connected to
7032          * DDI E. So just check whether this pipe is wired to DDI E and whether
7033          * the PCH transcoder is on.
7034          */
7035         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7036         if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7037             I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7038                 pipe_config->has_pch_encoder = true;
7039
7040                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7041                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7042                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7043
7044                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7045         }
7046
7047         intel_get_pipe_timings(crtc, pipe_config);
7048
7049         pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7050         if (intel_display_power_enabled(dev, pfit_domain))
7051                 ironlake_get_pfit_config(crtc, pipe_config);
7052
7053         if (IS_HASWELL(dev))
7054                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7055                         (I915_READ(IPS_CTL) & IPS_ENABLE);
7056
7057         pipe_config->pixel_multiplier = 1;
7058
7059         return true;
7060 }
7061
7062 static int intel_crtc_mode_set(struct drm_crtc *crtc,
7063                                int x, int y,
7064                                struct drm_framebuffer *fb)
7065 {
7066         struct drm_device *dev = crtc->dev;
7067         struct drm_i915_private *dev_priv = dev->dev_private;
7068         struct intel_encoder *encoder;
7069         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7070         struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
7071         int pipe = intel_crtc->pipe;
7072         int ret;
7073
7074         drm_vblank_pre_modeset(dev, pipe);
7075
7076         ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
7077
7078         drm_vblank_post_modeset(dev, pipe);
7079
7080         if (ret != 0)
7081                 return ret;
7082
7083         for_each_encoder_on_crtc(dev, crtc, encoder) {
7084                 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7085                         encoder->base.base.id,
7086                         drm_get_encoder_name(&encoder->base),
7087                         mode->base.id, mode->name);
7088                 encoder->mode_set(encoder);
7089         }
7090
7091         return 0;
7092 }
7093
7094 static struct {
7095         int clock;
7096         u32 config;
7097 } hdmi_audio_clock[] = {
7098         { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7099         { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7100         { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7101         { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7102         { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7103         { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7104         { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7105         { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7106         { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7107         { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7108 };
7109
7110 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7111 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7112 {
7113         int i;
7114
7115         for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7116                 if (mode->clock == hdmi_audio_clock[i].clock)
7117                         break;
7118         }
7119
7120         if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7121                 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7122                 i = 1;
7123         }
7124
7125         DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7126                       hdmi_audio_clock[i].clock,
7127                       hdmi_audio_clock[i].config);
7128
7129         return hdmi_audio_clock[i].config;
7130 }
7131
7132 static bool intel_eld_uptodate(struct drm_connector *connector,
7133                                int reg_eldv, uint32_t bits_eldv,
7134                                int reg_elda, uint32_t bits_elda,
7135                                int reg_edid)
7136 {
7137         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7138         uint8_t *eld = connector->eld;
7139         uint32_t i;
7140
7141         i = I915_READ(reg_eldv);
7142         i &= bits_eldv;
7143
7144         if (!eld[0])
7145                 return !i;
7146
7147         if (!i)
7148                 return false;
7149
7150         i = I915_READ(reg_elda);
7151         i &= ~bits_elda;
7152         I915_WRITE(reg_elda, i);
7153
7154         for (i = 0; i < eld[2]; i++)
7155                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7156                         return false;
7157
7158         return true;
7159 }
7160
7161 static void g4x_write_eld(struct drm_connector *connector,
7162                           struct drm_crtc *crtc,
7163                           struct drm_display_mode *mode)
7164 {
7165         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7166         uint8_t *eld = connector->eld;
7167         uint32_t eldv;
7168         uint32_t len;
7169         uint32_t i;
7170
7171         i = I915_READ(G4X_AUD_VID_DID);
7172
7173         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7174                 eldv = G4X_ELDV_DEVCL_DEVBLC;
7175         else
7176                 eldv = G4X_ELDV_DEVCTG;
7177
7178         if (intel_eld_uptodate(connector,
7179                                G4X_AUD_CNTL_ST, eldv,
7180                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7181                                G4X_HDMIW_HDMIEDID))
7182                 return;
7183
7184         i = I915_READ(G4X_AUD_CNTL_ST);
7185         i &= ~(eldv | G4X_ELD_ADDR);
7186         len = (i >> 9) & 0x1f;          /* ELD buffer size */
7187         I915_WRITE(G4X_AUD_CNTL_ST, i);
7188
7189         if (!eld[0])
7190                 return;
7191
7192         len = min_t(uint8_t, eld[2], len);
7193         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7194         for (i = 0; i < len; i++)
7195                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7196
7197         i = I915_READ(G4X_AUD_CNTL_ST);
7198         i |= eldv;
7199         I915_WRITE(G4X_AUD_CNTL_ST, i);
7200 }
7201
7202 static void haswell_write_eld(struct drm_connector *connector,
7203                               struct drm_crtc *crtc,
7204                               struct drm_display_mode *mode)
7205 {
7206         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7207         uint8_t *eld = connector->eld;
7208         struct drm_device *dev = crtc->dev;
7209         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7210         uint32_t eldv;
7211         uint32_t i;
7212         int len;
7213         int pipe = to_intel_crtc(crtc)->pipe;
7214         int tmp;
7215
7216         int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7217         int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7218         int aud_config = HSW_AUD_CFG(pipe);
7219         int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7220
7221
7222         DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7223
7224         /* Audio output enable */
7225         DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7226         tmp = I915_READ(aud_cntrl_st2);
7227         tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7228         I915_WRITE(aud_cntrl_st2, tmp);
7229
7230         /* Wait for 1 vertical blank */
7231         intel_wait_for_vblank(dev, pipe);
7232
7233         /* Set ELD valid state */
7234         tmp = I915_READ(aud_cntrl_st2);
7235         DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7236         tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7237         I915_WRITE(aud_cntrl_st2, tmp);
7238         tmp = I915_READ(aud_cntrl_st2);
7239         DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7240
7241         /* Enable HDMI mode */
7242         tmp = I915_READ(aud_config);
7243         DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7244         /* clear N_programing_enable and N_value_index */
7245         tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7246         I915_WRITE(aud_config, tmp);
7247
7248         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7249
7250         eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7251         intel_crtc->eld_vld = true;
7252
7253         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7254                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7255                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7256                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7257         } else {
7258                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7259         }
7260
7261         if (intel_eld_uptodate(connector,
7262                                aud_cntrl_st2, eldv,
7263                                aud_cntl_st, IBX_ELD_ADDRESS,
7264                                hdmiw_hdmiedid))
7265                 return;
7266
7267         i = I915_READ(aud_cntrl_st2);
7268         i &= ~eldv;
7269         I915_WRITE(aud_cntrl_st2, i);
7270
7271         if (!eld[0])
7272                 return;
7273
7274         i = I915_READ(aud_cntl_st);
7275         i &= ~IBX_ELD_ADDRESS;
7276         I915_WRITE(aud_cntl_st, i);
7277         i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
7278         DRM_DEBUG_DRIVER("port num:%d\n", i);
7279
7280         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7281         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7282         for (i = 0; i < len; i++)
7283                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7284
7285         i = I915_READ(aud_cntrl_st2);
7286         i |= eldv;
7287         I915_WRITE(aud_cntrl_st2, i);
7288
7289 }
7290
7291 static void ironlake_write_eld(struct drm_connector *connector,
7292                                struct drm_crtc *crtc,
7293                                struct drm_display_mode *mode)
7294 {
7295         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7296         uint8_t *eld = connector->eld;
7297         uint32_t eldv;
7298         uint32_t i;
7299         int len;
7300         int hdmiw_hdmiedid;
7301         int aud_config;
7302         int aud_cntl_st;
7303         int aud_cntrl_st2;
7304         int pipe = to_intel_crtc(crtc)->pipe;
7305
7306         if (HAS_PCH_IBX(connector->dev)) {
7307                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7308                 aud_config = IBX_AUD_CFG(pipe);
7309                 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7310                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7311         } else if (IS_VALLEYVIEW(connector->dev)) {
7312                 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7313                 aud_config = VLV_AUD_CFG(pipe);
7314                 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7315                 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7316         } else {
7317                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7318                 aud_config = CPT_AUD_CFG(pipe);
7319                 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7320                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7321         }
7322
7323         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7324
7325         if (IS_VALLEYVIEW(connector->dev))  {
7326                 struct intel_encoder *intel_encoder;
7327                 struct intel_digital_port *intel_dig_port;
7328
7329                 intel_encoder = intel_attached_encoder(connector);
7330                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7331                 i = intel_dig_port->port;
7332         } else {
7333                 i = I915_READ(aud_cntl_st);
7334                 i = (i >> 29) & DIP_PORT_SEL_MASK;
7335                 /* DIP_Port_Select, 0x1 = PortB */
7336         }
7337
7338         if (!i) {
7339                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7340                 /* operate blindly on all ports */
7341                 eldv = IBX_ELD_VALIDB;
7342                 eldv |= IBX_ELD_VALIDB << 4;
7343                 eldv |= IBX_ELD_VALIDB << 8;
7344         } else {
7345                 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7346                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7347         }
7348
7349         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7350                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7351                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7352                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7353         } else {
7354                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7355         }
7356
7357         if (intel_eld_uptodate(connector,
7358                                aud_cntrl_st2, eldv,
7359                                aud_cntl_st, IBX_ELD_ADDRESS,
7360                                hdmiw_hdmiedid))
7361                 return;
7362
7363         i = I915_READ(aud_cntrl_st2);
7364         i &= ~eldv;
7365         I915_WRITE(aud_cntrl_st2, i);
7366
7367         if (!eld[0])
7368                 return;
7369
7370         i = I915_READ(aud_cntl_st);
7371         i &= ~IBX_ELD_ADDRESS;
7372         I915_WRITE(aud_cntl_st, i);
7373
7374         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7375         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7376         for (i = 0; i < len; i++)
7377                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7378
7379         i = I915_READ(aud_cntrl_st2);
7380         i |= eldv;
7381         I915_WRITE(aud_cntrl_st2, i);
7382 }
7383
7384 void intel_write_eld(struct drm_encoder *encoder,
7385                      struct drm_display_mode *mode)
7386 {
7387         struct drm_crtc *crtc = encoder->crtc;
7388         struct drm_connector *connector;
7389         struct drm_device *dev = encoder->dev;
7390         struct drm_i915_private *dev_priv = dev->dev_private;
7391
7392         connector = drm_select_eld(encoder, mode);
7393         if (!connector)
7394                 return;
7395
7396         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7397                          connector->base.id,
7398                          drm_get_connector_name(connector),
7399                          connector->encoder->base.id,
7400                          drm_get_encoder_name(connector->encoder));
7401
7402         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7403
7404         if (dev_priv->display.write_eld)
7405                 dev_priv->display.write_eld(connector, crtc, mode);
7406 }
7407
7408 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7409 {
7410         struct drm_device *dev = crtc->dev;
7411         struct drm_i915_private *dev_priv = dev->dev_private;
7412         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7413         bool visible = base != 0;
7414         u32 cntl;
7415
7416         if (intel_crtc->cursor_visible == visible)
7417                 return;
7418
7419         cntl = I915_READ(_CURACNTR);
7420         if (visible) {
7421                 /* On these chipsets we can only modify the base whilst
7422                  * the cursor is disabled.
7423                  */
7424                 I915_WRITE(_CURABASE, base);
7425
7426                 cntl &= ~(CURSOR_FORMAT_MASK);
7427                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
7428                 cntl |= CURSOR_ENABLE |
7429                         CURSOR_GAMMA_ENABLE |
7430                         CURSOR_FORMAT_ARGB;
7431         } else
7432                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
7433         I915_WRITE(_CURACNTR, cntl);
7434
7435         intel_crtc->cursor_visible = visible;
7436 }
7437
7438 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7439 {
7440         struct drm_device *dev = crtc->dev;
7441         struct drm_i915_private *dev_priv = dev->dev_private;
7442         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7443         int pipe = intel_crtc->pipe;
7444         bool visible = base != 0;
7445
7446         if (intel_crtc->cursor_visible != visible) {
7447                 uint32_t cntl = I915_READ(CURCNTR(pipe));
7448                 if (base) {
7449                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7450                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7451                         cntl |= pipe << 28; /* Connect to correct pipe */
7452                 } else {
7453                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7454                         cntl |= CURSOR_MODE_DISABLE;
7455                 }
7456                 I915_WRITE(CURCNTR(pipe), cntl);
7457
7458                 intel_crtc->cursor_visible = visible;
7459         }
7460         /* and commit changes on next vblank */
7461         POSTING_READ(CURCNTR(pipe));
7462         I915_WRITE(CURBASE(pipe), base);
7463         POSTING_READ(CURBASE(pipe));
7464 }
7465
7466 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7467 {
7468         struct drm_device *dev = crtc->dev;
7469         struct drm_i915_private *dev_priv = dev->dev_private;
7470         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7471         int pipe = intel_crtc->pipe;
7472         bool visible = base != 0;
7473
7474         if (intel_crtc->cursor_visible != visible) {
7475                 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7476                 if (base) {
7477                         cntl &= ~CURSOR_MODE;
7478                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7479                 } else {
7480                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7481                         cntl |= CURSOR_MODE_DISABLE;
7482                 }
7483                 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7484                         cntl |= CURSOR_PIPE_CSC_ENABLE;
7485                         cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7486                 }
7487                 I915_WRITE(CURCNTR_IVB(pipe), cntl);
7488
7489                 intel_crtc->cursor_visible = visible;
7490         }
7491         /* and commit changes on next vblank */
7492         POSTING_READ(CURCNTR_IVB(pipe));
7493         I915_WRITE(CURBASE_IVB(pipe), base);
7494         POSTING_READ(CURBASE_IVB(pipe));
7495 }
7496
7497 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
7498 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7499                                      bool on)
7500 {
7501         struct drm_device *dev = crtc->dev;
7502         struct drm_i915_private *dev_priv = dev->dev_private;
7503         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7504         int pipe = intel_crtc->pipe;
7505         int x = intel_crtc->cursor_x;
7506         int y = intel_crtc->cursor_y;
7507         u32 base = 0, pos = 0;
7508         bool visible;
7509
7510         if (on)
7511                 base = intel_crtc->cursor_addr;
7512
7513         if (x >= intel_crtc->config.pipe_src_w)
7514                 base = 0;
7515
7516         if (y >= intel_crtc->config.pipe_src_h)
7517                 base = 0;
7518
7519         if (x < 0) {
7520                 if (x + intel_crtc->cursor_width <= 0)
7521                         base = 0;
7522
7523                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
7524                 x = -x;
7525         }
7526         pos |= x << CURSOR_X_SHIFT;
7527
7528         if (y < 0) {
7529                 if (y + intel_crtc->cursor_height <= 0)
7530                         base = 0;
7531
7532                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
7533                 y = -y;
7534         }
7535         pos |= y << CURSOR_Y_SHIFT;
7536
7537         visible = base != 0;
7538         if (!visible && !intel_crtc->cursor_visible)
7539                 return;
7540
7541         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7542                 I915_WRITE(CURPOS_IVB(pipe), pos);
7543                 ivb_update_cursor(crtc, base);
7544         } else {
7545                 I915_WRITE(CURPOS(pipe), pos);
7546                 if (IS_845G(dev) || IS_I865G(dev))
7547                         i845_update_cursor(crtc, base);
7548                 else
7549                         i9xx_update_cursor(crtc, base);
7550         }
7551 }
7552
7553 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7554                                  struct drm_file *file,
7555                                  uint32_t handle,
7556                                  uint32_t width, uint32_t height)
7557 {
7558         struct drm_device *dev = crtc->dev;
7559         struct drm_i915_private *dev_priv = dev->dev_private;
7560         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7561         struct drm_i915_gem_object *obj;
7562         uint32_t addr;
7563         int ret;
7564
7565         /* if we want to turn off the cursor ignore width and height */
7566         if (!handle) {
7567                 DRM_DEBUG_KMS("cursor off\n");
7568                 addr = 0;
7569                 obj = NULL;
7570                 mutex_lock(&dev->struct_mutex);
7571                 goto finish;
7572         }
7573
7574         /* Currently we only support 64x64 cursors */
7575         if (width != 64 || height != 64) {
7576                 DRM_ERROR("we currently only support 64x64 cursors\n");
7577                 return -EINVAL;
7578         }
7579
7580         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
7581         if (&obj->base == NULL)
7582                 return -ENOENT;
7583
7584         if (obj->base.size < width * height * 4) {
7585                 DRM_ERROR("buffer is to small\n");
7586                 ret = -ENOMEM;
7587                 goto fail;
7588         }
7589
7590         /* we only need to pin inside GTT if cursor is non-phy */
7591         mutex_lock(&dev->struct_mutex);
7592         if (!dev_priv->info->cursor_needs_physical) {
7593                 unsigned alignment;
7594
7595                 if (obj->tiling_mode) {
7596                         DRM_ERROR("cursor cannot be tiled\n");
7597                         ret = -EINVAL;
7598                         goto fail_locked;
7599                 }
7600
7601                 /* Note that the w/a also requires 2 PTE of padding following
7602                  * the bo. We currently fill all unused PTE with the shadow
7603                  * page and so we should always have valid PTE following the
7604                  * cursor preventing the VT-d warning.
7605                  */
7606                 alignment = 0;
7607                 if (need_vtd_wa(dev))
7608                         alignment = 64*1024;
7609
7610                 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
7611                 if (ret) {
7612                         DRM_ERROR("failed to move cursor bo into the GTT\n");
7613                         goto fail_locked;
7614                 }
7615
7616                 ret = i915_gem_object_put_fence(obj);
7617                 if (ret) {
7618                         DRM_ERROR("failed to release fence for cursor");
7619                         goto fail_unpin;
7620                 }
7621
7622                 addr = i915_gem_obj_ggtt_offset(obj);
7623         } else {
7624                 int align = IS_I830(dev) ? 16 * 1024 : 256;
7625                 ret = i915_gem_attach_phys_object(dev, obj,
7626                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7627                                                   align);
7628                 if (ret) {
7629                         DRM_ERROR("failed to attach phys object\n");
7630                         goto fail_locked;
7631                 }
7632                 addr = obj->phys_obj->handle->busaddr;
7633         }
7634
7635         if (IS_GEN2(dev))
7636                 I915_WRITE(CURSIZE, (height << 12) | width);
7637
7638  finish:
7639         if (intel_crtc->cursor_bo) {
7640                 if (dev_priv->info->cursor_needs_physical) {
7641                         if (intel_crtc->cursor_bo != obj)
7642                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7643                 } else
7644                         i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7645                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7646         }
7647
7648         mutex_unlock(&dev->struct_mutex);
7649
7650         intel_crtc->cursor_addr = addr;
7651         intel_crtc->cursor_bo = obj;
7652         intel_crtc->cursor_width = width;
7653         intel_crtc->cursor_height = height;
7654
7655         if (intel_crtc->active)
7656                 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7657
7658         return 0;
7659 fail_unpin:
7660         i915_gem_object_unpin_from_display_plane(obj);
7661 fail_locked:
7662         mutex_unlock(&dev->struct_mutex);
7663 fail:
7664         drm_gem_object_unreference_unlocked(&obj->base);
7665         return ret;
7666 }
7667
7668 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7669 {
7670         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7671
7672         intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
7673         intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
7674
7675         if (intel_crtc->active)
7676                 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7677
7678         return 0;
7679 }
7680
7681 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7682                                  u16 *blue, uint32_t start, uint32_t size)
7683 {
7684         int end = (start + size > 256) ? 256 : start + size, i;
7685         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7686
7687         for (i = start; i < end; i++) {
7688                 intel_crtc->lut_r[i] = red[i] >> 8;
7689                 intel_crtc->lut_g[i] = green[i] >> 8;
7690                 intel_crtc->lut_b[i] = blue[i] >> 8;
7691         }
7692
7693         intel_crtc_load_lut(crtc);
7694 }
7695
7696 /* VESA 640x480x72Hz mode to set on the pipe */
7697 static struct drm_display_mode load_detect_mode = {
7698         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7699                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7700 };
7701
7702 static struct drm_framebuffer *
7703 intel_framebuffer_create(struct drm_device *dev,
7704                          struct drm_mode_fb_cmd2 *mode_cmd,
7705                          struct drm_i915_gem_object *obj)
7706 {
7707         struct intel_framebuffer *intel_fb;
7708         int ret;
7709
7710         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7711         if (!intel_fb) {
7712                 drm_gem_object_unreference_unlocked(&obj->base);
7713                 return ERR_PTR(-ENOMEM);
7714         }
7715
7716         ret = i915_mutex_lock_interruptible(dev);
7717         if (ret)
7718                 goto err;
7719
7720         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7721         mutex_unlock(&dev->struct_mutex);
7722         if (ret)
7723                 goto err;
7724
7725         return &intel_fb->base;
7726 err:
7727         drm_gem_object_unreference_unlocked(&obj->base);
7728         kfree(intel_fb);
7729
7730         return ERR_PTR(ret);
7731 }
7732
7733 static u32
7734 intel_framebuffer_pitch_for_width(int width, int bpp)
7735 {
7736         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7737         return ALIGN(pitch, 64);
7738 }
7739
7740 static u32
7741 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7742 {
7743         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7744         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7745 }
7746
7747 static struct drm_framebuffer *
7748 intel_framebuffer_create_for_mode(struct drm_device *dev,
7749                                   struct drm_display_mode *mode,
7750                                   int depth, int bpp)
7751 {
7752         struct drm_i915_gem_object *obj;
7753         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
7754
7755         obj = i915_gem_alloc_object(dev,
7756                                     intel_framebuffer_size_for_mode(mode, bpp));
7757         if (obj == NULL)
7758                 return ERR_PTR(-ENOMEM);
7759
7760         mode_cmd.width = mode->hdisplay;
7761         mode_cmd.height = mode->vdisplay;
7762         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
7763                                                                 bpp);
7764         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
7765
7766         return intel_framebuffer_create(dev, &mode_cmd, obj);
7767 }
7768
7769 static struct drm_framebuffer *
7770 mode_fits_in_fbdev(struct drm_device *dev,
7771                    struct drm_display_mode *mode)
7772 {
7773 #ifdef CONFIG_DRM_I915_FBDEV
7774         struct drm_i915_private *dev_priv = dev->dev_private;
7775         struct drm_i915_gem_object *obj;
7776         struct drm_framebuffer *fb;
7777
7778         if (dev_priv->fbdev == NULL)
7779                 return NULL;
7780
7781         obj = dev_priv->fbdev->ifb.obj;
7782         if (obj == NULL)
7783                 return NULL;
7784
7785         fb = &dev_priv->fbdev->ifb.base;
7786         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7787                                                                fb->bits_per_pixel))
7788                 return NULL;
7789
7790         if (obj->base.size < mode->vdisplay * fb->pitches[0])
7791                 return NULL;
7792
7793         return fb;
7794 #else
7795         return NULL;
7796 #endif
7797 }
7798
7799 bool intel_get_load_detect_pipe(struct drm_connector *connector,
7800                                 struct drm_display_mode *mode,
7801                                 struct intel_load_detect_pipe *old)
7802 {
7803         struct intel_crtc *intel_crtc;
7804         struct intel_encoder *intel_encoder =
7805                 intel_attached_encoder(connector);
7806         struct drm_crtc *possible_crtc;
7807         struct drm_encoder *encoder = &intel_encoder->base;
7808         struct drm_crtc *crtc = NULL;
7809         struct drm_device *dev = encoder->dev;
7810         struct drm_framebuffer *fb;
7811         int i = -1;
7812
7813         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7814                       connector->base.id, drm_get_connector_name(connector),
7815                       encoder->base.id, drm_get_encoder_name(encoder));
7816
7817         /*
7818          * Algorithm gets a little messy:
7819          *
7820          *   - if the connector already has an assigned crtc, use it (but make
7821          *     sure it's on first)
7822          *
7823          *   - try to find the first unused crtc that can drive this connector,
7824          *     and use that if we find one
7825          */
7826
7827         /* See if we already have a CRTC for this connector */
7828         if (encoder->crtc) {
7829                 crtc = encoder->crtc;
7830
7831                 mutex_lock(&crtc->mutex);
7832
7833                 old->dpms_mode = connector->dpms;
7834                 old->load_detect_temp = false;
7835
7836                 /* Make sure the crtc and connector are running */
7837                 if (connector->dpms != DRM_MODE_DPMS_ON)
7838                         connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
7839
7840                 return true;
7841         }
7842
7843         /* Find an unused one (if possible) */
7844         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7845                 i++;
7846                 if (!(encoder->possible_crtcs & (1 << i)))
7847                         continue;
7848                 if (!possible_crtc->enabled) {
7849                         crtc = possible_crtc;
7850                         break;
7851                 }
7852         }
7853
7854         /*
7855          * If we didn't find an unused CRTC, don't use any.
7856          */
7857         if (!crtc) {
7858                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
7859                 return false;
7860         }
7861
7862         mutex_lock(&crtc->mutex);
7863         intel_encoder->new_crtc = to_intel_crtc(crtc);
7864         to_intel_connector(connector)->new_encoder = intel_encoder;
7865
7866         intel_crtc = to_intel_crtc(crtc);
7867         old->dpms_mode = connector->dpms;
7868         old->load_detect_temp = true;
7869         old->release_fb = NULL;
7870
7871         if (!mode)
7872                 mode = &load_detect_mode;
7873
7874         /* We need a framebuffer large enough to accommodate all accesses
7875          * that the plane may generate whilst we perform load detection.
7876          * We can not rely on the fbcon either being present (we get called
7877          * during its initialisation to detect all boot displays, or it may
7878          * not even exist) or that it is large enough to satisfy the
7879          * requested mode.
7880          */
7881         fb = mode_fits_in_fbdev(dev, mode);
7882         if (fb == NULL) {
7883                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
7884                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7885                 old->release_fb = fb;
7886         } else
7887                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7888         if (IS_ERR(fb)) {
7889                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7890                 mutex_unlock(&crtc->mutex);
7891                 return false;
7892         }
7893
7894         if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7895                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7896                 if (old->release_fb)
7897                         old->release_fb->funcs->destroy(old->release_fb);
7898                 mutex_unlock(&crtc->mutex);
7899                 return false;
7900         }
7901
7902         /* let the connector get through one full cycle before testing */
7903         intel_wait_for_vblank(dev, intel_crtc->pipe);
7904         return true;
7905 }
7906
7907 void intel_release_load_detect_pipe(struct drm_connector *connector,
7908                                     struct intel_load_detect_pipe *old)
7909 {
7910         struct intel_encoder *intel_encoder =
7911                 intel_attached_encoder(connector);
7912         struct drm_encoder *encoder = &intel_encoder->base;
7913         struct drm_crtc *crtc = encoder->crtc;
7914
7915         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7916                       connector->base.id, drm_get_connector_name(connector),
7917                       encoder->base.id, drm_get_encoder_name(encoder));
7918
7919         if (old->load_detect_temp) {
7920                 to_intel_connector(connector)->new_encoder = NULL;
7921                 intel_encoder->new_crtc = NULL;
7922                 intel_set_mode(crtc, NULL, 0, 0, NULL);
7923
7924                 if (old->release_fb) {
7925                         drm_framebuffer_unregister_private(old->release_fb);
7926                         drm_framebuffer_unreference(old->release_fb);
7927                 }
7928
7929                 mutex_unlock(&crtc->mutex);
7930                 return;
7931         }
7932
7933         /* Switch crtc and encoder back off if necessary */
7934         if (old->dpms_mode != DRM_MODE_DPMS_ON)
7935                 connector->funcs->dpms(connector, old->dpms_mode);
7936
7937         mutex_unlock(&crtc->mutex);
7938 }
7939
7940 static int i9xx_pll_refclk(struct drm_device *dev,
7941                            const struct intel_crtc_config *pipe_config)
7942 {
7943         struct drm_i915_private *dev_priv = dev->dev_private;
7944         u32 dpll = pipe_config->dpll_hw_state.dpll;
7945
7946         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7947                 return dev_priv->vbt.lvds_ssc_freq;
7948         else if (HAS_PCH_SPLIT(dev))
7949                 return 120000;
7950         else if (!IS_GEN2(dev))
7951                 return 96000;
7952         else
7953                 return 48000;
7954 }
7955
7956 /* Returns the clock of the currently programmed mode of the given pipe. */
7957 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7958                                 struct intel_crtc_config *pipe_config)
7959 {
7960         struct drm_device *dev = crtc->base.dev;
7961         struct drm_i915_private *dev_priv = dev->dev_private;
7962         int pipe = pipe_config->cpu_transcoder;
7963         u32 dpll = pipe_config->dpll_hw_state.dpll;
7964         u32 fp;
7965         intel_clock_t clock;
7966         int refclk = i9xx_pll_refclk(dev, pipe_config);
7967
7968         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7969                 fp = pipe_config->dpll_hw_state.fp0;
7970         else
7971                 fp = pipe_config->dpll_hw_state.fp1;
7972
7973         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7974         if (IS_PINEVIEW(dev)) {
7975                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7976                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7977         } else {
7978                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7979                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7980         }
7981
7982         if (!IS_GEN2(dev)) {
7983                 if (IS_PINEVIEW(dev))
7984                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7985                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7986                 else
7987                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7988                                DPLL_FPA01_P1_POST_DIV_SHIFT);
7989
7990                 switch (dpll & DPLL_MODE_MASK) {
7991                 case DPLLB_MODE_DAC_SERIAL:
7992                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7993                                 5 : 10;
7994                         break;
7995                 case DPLLB_MODE_LVDS:
7996                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7997                                 7 : 14;
7998                         break;
7999                 default:
8000                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8001                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
8002                         return;
8003                 }
8004
8005                 if (IS_PINEVIEW(dev))
8006                         pineview_clock(refclk, &clock);
8007                 else
8008                         i9xx_clock(refclk, &clock);
8009         } else {
8010                 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8011                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8012
8013                 if (is_lvds) {
8014                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8015                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
8016
8017                         if (lvds & LVDS_CLKB_POWER_UP)
8018                                 clock.p2 = 7;
8019                         else
8020                                 clock.p2 = 14;
8021                 } else {
8022                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
8023                                 clock.p1 = 2;
8024                         else {
8025                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8026                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8027                         }
8028                         if (dpll & PLL_P2_DIVIDE_BY_4)
8029                                 clock.p2 = 4;
8030                         else
8031                                 clock.p2 = 2;
8032                 }
8033
8034                 i9xx_clock(refclk, &clock);
8035         }
8036
8037         /*
8038          * This value includes pixel_multiplier. We will use
8039          * port_clock to compute adjusted_mode.crtc_clock in the
8040          * encoder's get_config() function.
8041          */
8042         pipe_config->port_clock = clock.dot;
8043 }
8044
8045 int intel_dotclock_calculate(int link_freq,
8046                              const struct intel_link_m_n *m_n)
8047 {
8048         /*
8049          * The calculation for the data clock is:
8050          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8051          * But we want to avoid losing precison if possible, so:
8052          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8053          *
8054          * and the link clock is simpler:
8055          * link_clock = (m * link_clock) / n
8056          */
8057
8058         if (!m_n->link_n)
8059                 return 0;
8060
8061         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8062 }
8063
8064 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8065                                    struct intel_crtc_config *pipe_config)
8066 {
8067         struct drm_device *dev = crtc->base.dev;
8068
8069         /* read out port_clock from the DPLL */
8070         i9xx_crtc_clock_get(crtc, pipe_config);
8071
8072         /*
8073          * This value does not include pixel_multiplier.
8074          * We will check that port_clock and adjusted_mode.crtc_clock
8075          * agree once we know their relationship in the encoder's
8076          * get_config() function.
8077          */
8078         pipe_config->adjusted_mode.crtc_clock =
8079                 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8080                                          &pipe_config->fdi_m_n);
8081 }
8082
8083 /** Returns the currently programmed mode of the given pipe. */
8084 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8085                                              struct drm_crtc *crtc)
8086 {
8087         struct drm_i915_private *dev_priv = dev->dev_private;
8088         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8089         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8090         struct drm_display_mode *mode;
8091         struct intel_crtc_config pipe_config;
8092         int htot = I915_READ(HTOTAL(cpu_transcoder));
8093         int hsync = I915_READ(HSYNC(cpu_transcoder));
8094         int vtot = I915_READ(VTOTAL(cpu_transcoder));
8095         int vsync = I915_READ(VSYNC(cpu_transcoder));
8096         enum pipe pipe = intel_crtc->pipe;
8097
8098         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8099         if (!mode)
8100                 return NULL;
8101
8102         /*
8103          * Construct a pipe_config sufficient for getting the clock info
8104          * back out of crtc_clock_get.
8105          *
8106          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8107          * to use a real value here instead.
8108          */
8109         pipe_config.cpu_transcoder = (enum transcoder) pipe;
8110         pipe_config.pixel_multiplier = 1;
8111         pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8112         pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8113         pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8114         i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8115
8116         mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8117         mode->hdisplay = (htot & 0xffff) + 1;
8118         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8119         mode->hsync_start = (hsync & 0xffff) + 1;
8120         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8121         mode->vdisplay = (vtot & 0xffff) + 1;
8122         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8123         mode->vsync_start = (vsync & 0xffff) + 1;
8124         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8125
8126         drm_mode_set_name(mode);
8127
8128         return mode;
8129 }
8130
8131 static void intel_increase_pllclock(struct drm_crtc *crtc)
8132 {
8133         struct drm_device *dev = crtc->dev;
8134         drm_i915_private_t *dev_priv = dev->dev_private;
8135         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8136         int pipe = intel_crtc->pipe;
8137         int dpll_reg = DPLL(pipe);
8138         int dpll;
8139
8140         if (HAS_PCH_SPLIT(dev))
8141                 return;
8142
8143         if (!dev_priv->lvds_downclock_avail)
8144                 return;
8145
8146         dpll = I915_READ(dpll_reg);
8147         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8148                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
8149
8150                 assert_panel_unlocked(dev_priv, pipe);
8151
8152                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8153                 I915_WRITE(dpll_reg, dpll);
8154                 intel_wait_for_vblank(dev, pipe);
8155
8156                 dpll = I915_READ(dpll_reg);
8157                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
8158                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8159         }
8160 }
8161
8162 static void intel_decrease_pllclock(struct drm_crtc *crtc)
8163 {
8164         struct drm_device *dev = crtc->dev;
8165         drm_i915_private_t *dev_priv = dev->dev_private;
8166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8167
8168         if (HAS_PCH_SPLIT(dev))
8169                 return;
8170
8171         if (!dev_priv->lvds_downclock_avail)
8172                 return;
8173
8174         /*
8175          * Since this is called by a timer, we should never get here in
8176          * the manual case.
8177          */
8178         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8179                 int pipe = intel_crtc->pipe;
8180                 int dpll_reg = DPLL(pipe);
8181                 int dpll;
8182
8183                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
8184
8185                 assert_panel_unlocked(dev_priv, pipe);
8186
8187                 dpll = I915_READ(dpll_reg);
8188                 dpll |= DISPLAY_RATE_SELECT_FPA1;
8189                 I915_WRITE(dpll_reg, dpll);
8190                 intel_wait_for_vblank(dev, pipe);
8191                 dpll = I915_READ(dpll_reg);
8192                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8193                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8194         }
8195
8196 }
8197
8198 void intel_mark_busy(struct drm_device *dev)
8199 {
8200         struct drm_i915_private *dev_priv = dev->dev_private;
8201
8202         hsw_package_c8_gpu_busy(dev_priv);
8203         i915_update_gfx_val(dev_priv);
8204 }
8205
8206 void intel_mark_idle(struct drm_device *dev)
8207 {
8208         struct drm_i915_private *dev_priv = dev->dev_private;
8209         struct drm_crtc *crtc;
8210
8211         hsw_package_c8_gpu_idle(dev_priv);
8212
8213         if (!i915_powersave)
8214                 return;
8215
8216         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8217                 if (!crtc->fb)
8218                         continue;
8219
8220                 intel_decrease_pllclock(crtc);
8221         }
8222
8223         if (dev_priv->info->gen >= 6)
8224                 gen6_rps_idle(dev->dev_private);
8225 }
8226
8227 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8228                         struct intel_ring_buffer *ring)
8229 {
8230         struct drm_device *dev = obj->base.dev;
8231         struct drm_crtc *crtc;
8232
8233         if (!i915_powersave)
8234                 return;
8235
8236         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8237                 if (!crtc->fb)
8238                         continue;
8239
8240                 if (to_intel_framebuffer(crtc->fb)->obj != obj)
8241                         continue;
8242
8243                 intel_increase_pllclock(crtc);
8244                 if (ring && intel_fbc_enabled(dev))
8245                         ring->fbc_dirty = true;
8246         }
8247 }
8248
8249 static void intel_crtc_destroy(struct drm_crtc *crtc)
8250 {
8251         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8252         struct drm_device *dev = crtc->dev;
8253         struct intel_unpin_work *work;
8254         unsigned long flags;
8255
8256         spin_lock_irqsave(&dev->event_lock, flags);
8257         work = intel_crtc->unpin_work;
8258         intel_crtc->unpin_work = NULL;
8259         spin_unlock_irqrestore(&dev->event_lock, flags);
8260
8261         if (work) {
8262                 cancel_work_sync(&work->work);
8263                 kfree(work);
8264         }
8265
8266         intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
8267
8268         drm_crtc_cleanup(crtc);
8269
8270         kfree(intel_crtc);
8271 }
8272
8273 static void intel_unpin_work_fn(struct work_struct *__work)
8274 {
8275         struct intel_unpin_work *work =
8276                 container_of(__work, struct intel_unpin_work, work);
8277         struct drm_device *dev = work->crtc->dev;
8278
8279         mutex_lock(&dev->struct_mutex);
8280         intel_unpin_fb_obj(work->old_fb_obj);
8281         drm_gem_object_unreference(&work->pending_flip_obj->base);
8282         drm_gem_object_unreference(&work->old_fb_obj->base);
8283
8284         intel_update_fbc(dev);
8285         mutex_unlock(&dev->struct_mutex);
8286
8287         BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8288         atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8289
8290         kfree(work);
8291 }
8292
8293 static void do_intel_finish_page_flip(struct drm_device *dev,
8294                                       struct drm_crtc *crtc)
8295 {
8296         drm_i915_private_t *dev_priv = dev->dev_private;
8297         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8298         struct intel_unpin_work *work;
8299         unsigned long flags;
8300
8301         /* Ignore early vblank irqs */
8302         if (intel_crtc == NULL)
8303                 return;
8304
8305         spin_lock_irqsave(&dev->event_lock, flags);
8306         work = intel_crtc->unpin_work;
8307
8308         /* Ensure we don't miss a work->pending update ... */
8309         smp_rmb();
8310
8311         if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
8312                 spin_unlock_irqrestore(&dev->event_lock, flags);
8313                 return;
8314         }
8315
8316         /* and that the unpin work is consistent wrt ->pending. */
8317         smp_rmb();
8318
8319         intel_crtc->unpin_work = NULL;
8320
8321         if (work->event)
8322                 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
8323
8324         drm_vblank_put(dev, intel_crtc->pipe);
8325
8326         spin_unlock_irqrestore(&dev->event_lock, flags);
8327
8328         wake_up_all(&dev_priv->pending_flip_queue);
8329
8330         queue_work(dev_priv->wq, &work->work);
8331
8332         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
8333 }
8334
8335 void intel_finish_page_flip(struct drm_device *dev, int pipe)
8336 {
8337         drm_i915_private_t *dev_priv = dev->dev_private;
8338         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8339
8340         do_intel_finish_page_flip(dev, crtc);
8341 }
8342
8343 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8344 {
8345         drm_i915_private_t *dev_priv = dev->dev_private;
8346         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8347
8348         do_intel_finish_page_flip(dev, crtc);
8349 }
8350
8351 void intel_prepare_page_flip(struct drm_device *dev, int plane)
8352 {
8353         drm_i915_private_t *dev_priv = dev->dev_private;
8354         struct intel_crtc *intel_crtc =
8355                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8356         unsigned long flags;
8357
8358         /* NB: An MMIO update of the plane base pointer will also
8359          * generate a page-flip completion irq, i.e. every modeset
8360          * is also accompanied by a spurious intel_prepare_page_flip().
8361          */
8362         spin_lock_irqsave(&dev->event_lock, flags);
8363         if (intel_crtc->unpin_work)
8364                 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
8365         spin_unlock_irqrestore(&dev->event_lock, flags);
8366 }
8367
8368 inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8369 {
8370         /* Ensure that the work item is consistent when activating it ... */
8371         smp_wmb();
8372         atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
8373         /* and that it is marked active as soon as the irq could fire. */
8374         smp_wmb();
8375 }
8376
8377 static int intel_gen2_queue_flip(struct drm_device *dev,
8378                                  struct drm_crtc *crtc,
8379                                  struct drm_framebuffer *fb,
8380                                  struct drm_i915_gem_object *obj,
8381                                  uint32_t flags)
8382 {
8383         struct drm_i915_private *dev_priv = dev->dev_private;
8384         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8385         u32 flip_mask;
8386         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8387         int ret;
8388
8389         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8390         if (ret)
8391                 goto err;
8392
8393         ret = intel_ring_begin(ring, 6);
8394         if (ret)
8395                 goto err_unpin;
8396
8397         /* Can't queue multiple flips, so wait for the previous
8398          * one to finish before executing the next.
8399          */
8400         if (intel_crtc->plane)
8401                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8402         else
8403                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8404         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8405         intel_ring_emit(ring, MI_NOOP);
8406         intel_ring_emit(ring, MI_DISPLAY_FLIP |
8407                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8408         intel_ring_emit(ring, fb->pitches[0]);
8409         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8410         intel_ring_emit(ring, 0); /* aux display base address, unused */
8411
8412         intel_mark_page_flip_active(intel_crtc);
8413         __intel_ring_advance(ring);
8414         return 0;
8415
8416 err_unpin:
8417         intel_unpin_fb_obj(obj);
8418 err:
8419         return ret;
8420 }
8421
8422 static int intel_gen3_queue_flip(struct drm_device *dev,
8423                                  struct drm_crtc *crtc,
8424                                  struct drm_framebuffer *fb,
8425                                  struct drm_i915_gem_object *obj,
8426                                  uint32_t flags)
8427 {
8428         struct drm_i915_private *dev_priv = dev->dev_private;
8429         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8430         u32 flip_mask;
8431         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8432         int ret;
8433
8434         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8435         if (ret)
8436                 goto err;
8437
8438         ret = intel_ring_begin(ring, 6);
8439         if (ret)
8440                 goto err_unpin;
8441
8442         if (intel_crtc->plane)
8443                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8444         else
8445                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8446         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8447         intel_ring_emit(ring, MI_NOOP);
8448         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
8449                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8450         intel_ring_emit(ring, fb->pitches[0]);
8451         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8452         intel_ring_emit(ring, MI_NOOP);
8453
8454         intel_mark_page_flip_active(intel_crtc);
8455         __intel_ring_advance(ring);
8456         return 0;
8457
8458 err_unpin:
8459         intel_unpin_fb_obj(obj);
8460 err:
8461         return ret;
8462 }
8463
8464 static int intel_gen4_queue_flip(struct drm_device *dev,
8465                                  struct drm_crtc *crtc,
8466                                  struct drm_framebuffer *fb,
8467                                  struct drm_i915_gem_object *obj,
8468                                  uint32_t flags)
8469 {
8470         struct drm_i915_private *dev_priv = dev->dev_private;
8471         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8472         uint32_t pf, pipesrc;
8473         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8474         int ret;
8475
8476         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8477         if (ret)
8478                 goto err;
8479
8480         ret = intel_ring_begin(ring, 4);
8481         if (ret)
8482                 goto err_unpin;
8483
8484         /* i965+ uses the linear or tiled offsets from the
8485          * Display Registers (which do not change across a page-flip)
8486          * so we need only reprogram the base address.
8487          */
8488         intel_ring_emit(ring, MI_DISPLAY_FLIP |
8489                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8490         intel_ring_emit(ring, fb->pitches[0]);
8491         intel_ring_emit(ring,
8492                         (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
8493                         obj->tiling_mode);
8494
8495         /* XXX Enabling the panel-fitter across page-flip is so far
8496          * untested on non-native modes, so ignore it for now.
8497          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
8498          */
8499         pf = 0;
8500         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8501         intel_ring_emit(ring, pf | pipesrc);
8502
8503         intel_mark_page_flip_active(intel_crtc);
8504         __intel_ring_advance(ring);
8505         return 0;
8506
8507 err_unpin:
8508         intel_unpin_fb_obj(obj);
8509 err:
8510         return ret;
8511 }
8512
8513 static int intel_gen6_queue_flip(struct drm_device *dev,
8514                                  struct drm_crtc *crtc,
8515                                  struct drm_framebuffer *fb,
8516                                  struct drm_i915_gem_object *obj,
8517                                  uint32_t flags)
8518 {
8519         struct drm_i915_private *dev_priv = dev->dev_private;
8520         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8521         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8522         uint32_t pf, pipesrc;
8523         int ret;
8524
8525         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8526         if (ret)
8527                 goto err;
8528
8529         ret = intel_ring_begin(ring, 4);
8530         if (ret)
8531                 goto err_unpin;
8532
8533         intel_ring_emit(ring, MI_DISPLAY_FLIP |
8534                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8535         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
8536         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8537
8538         /* Contrary to the suggestions in the documentation,
8539          * "Enable Panel Fitter" does not seem to be required when page
8540          * flipping with a non-native mode, and worse causes a normal
8541          * modeset to fail.
8542          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
8543          */
8544         pf = 0;
8545         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8546         intel_ring_emit(ring, pf | pipesrc);
8547
8548         intel_mark_page_flip_active(intel_crtc);
8549         __intel_ring_advance(ring);
8550         return 0;
8551
8552 err_unpin:
8553         intel_unpin_fb_obj(obj);
8554 err:
8555         return ret;
8556 }
8557
8558 static int intel_gen7_queue_flip(struct drm_device *dev,
8559                                  struct drm_crtc *crtc,
8560                                  struct drm_framebuffer *fb,
8561                                  struct drm_i915_gem_object *obj,
8562                                  uint32_t flags)
8563 {
8564         struct drm_i915_private *dev_priv = dev->dev_private;
8565         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8566         struct intel_ring_buffer *ring;
8567         uint32_t plane_bit = 0;
8568         int len, ret;
8569
8570         ring = obj->ring;
8571         if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
8572                 ring = &dev_priv->ring[BCS];
8573
8574         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8575         if (ret)
8576                 goto err;
8577
8578         switch(intel_crtc->plane) {
8579         case PLANE_A:
8580                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8581                 break;
8582         case PLANE_B:
8583                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
8584                 break;
8585         case PLANE_C:
8586                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
8587                 break;
8588         default:
8589                 WARN_ONCE(1, "unknown plane in flip command\n");
8590                 ret = -ENODEV;
8591                 goto err_unpin;
8592         }
8593
8594         len = 4;
8595         if (ring->id == RCS)
8596                 len += 6;
8597
8598         /*
8599          * BSpec MI_DISPLAY_FLIP for IVB:
8600          * "The full packet must be contained within the same cache line."
8601          *
8602          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
8603          * cacheline, if we ever start emitting more commands before
8604          * the MI_DISPLAY_FLIP we may need to first emit everything else,
8605          * then do the cacheline alignment, and finally emit the
8606          * MI_DISPLAY_FLIP.
8607          */
8608         ret = intel_ring_cacheline_align(ring);
8609         if (ret)
8610                 goto err_unpin;
8611
8612         ret = intel_ring_begin(ring, len);
8613         if (ret)
8614                 goto err_unpin;
8615
8616         /* Unmask the flip-done completion message. Note that the bspec says that
8617          * we should do this for both the BCS and RCS, and that we must not unmask
8618          * more than one flip event at any time (or ensure that one flip message
8619          * can be sent by waiting for flip-done prior to queueing new flips).
8620          * Experimentation says that BCS works despite DERRMR masking all
8621          * flip-done completion events and that unmasking all planes at once
8622          * for the RCS also doesn't appear to drop events. Setting the DERRMR
8623          * to zero does lead to lockups within MI_DISPLAY_FLIP.
8624          */
8625         if (ring->id == RCS) {
8626                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
8627                 intel_ring_emit(ring, DERRMR);
8628                 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8629                                         DERRMR_PIPEB_PRI_FLIP_DONE |
8630                                         DERRMR_PIPEC_PRI_FLIP_DONE));
8631                 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8632                                 MI_SRM_LRM_GLOBAL_GTT);
8633                 intel_ring_emit(ring, DERRMR);
8634                 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8635         }
8636
8637         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
8638         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
8639         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8640         intel_ring_emit(ring, (MI_NOOP));
8641
8642         intel_mark_page_flip_active(intel_crtc);
8643         __intel_ring_advance(ring);
8644         return 0;
8645
8646 err_unpin:
8647         intel_unpin_fb_obj(obj);
8648 err:
8649         return ret;
8650 }
8651
8652 static int intel_default_queue_flip(struct drm_device *dev,
8653                                     struct drm_crtc *crtc,
8654                                     struct drm_framebuffer *fb,
8655                                     struct drm_i915_gem_object *obj,
8656                                     uint32_t flags)
8657 {
8658         return -ENODEV;
8659 }
8660
8661 static int intel_crtc_page_flip(struct drm_crtc *crtc,
8662                                 struct drm_framebuffer *fb,
8663                                 struct drm_pending_vblank_event *event,
8664                                 uint32_t page_flip_flags)
8665 {
8666         struct drm_device *dev = crtc->dev;
8667         struct drm_i915_private *dev_priv = dev->dev_private;
8668         struct drm_framebuffer *old_fb = crtc->fb;
8669         struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8670         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8671         struct intel_unpin_work *work;
8672         unsigned long flags;
8673         int ret;
8674
8675         /* Can't change pixel format via MI display flips. */
8676         if (fb->pixel_format != crtc->fb->pixel_format)
8677                 return -EINVAL;
8678
8679         /*
8680          * TILEOFF/LINOFF registers can't be changed via MI display flips.
8681          * Note that pitch changes could also affect these register.
8682          */
8683         if (INTEL_INFO(dev)->gen > 3 &&
8684             (fb->offsets[0] != crtc->fb->offsets[0] ||
8685              fb->pitches[0] != crtc->fb->pitches[0]))
8686                 return -EINVAL;
8687
8688         work = kzalloc(sizeof(*work), GFP_KERNEL);
8689         if (work == NULL)
8690                 return -ENOMEM;
8691
8692         work->event = event;
8693         work->crtc = crtc;
8694         work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
8695         INIT_WORK(&work->work, intel_unpin_work_fn);
8696
8697         ret = drm_vblank_get(dev, intel_crtc->pipe);
8698         if (ret)
8699                 goto free_work;
8700
8701         /* We borrow the event spin lock for protecting unpin_work */
8702         spin_lock_irqsave(&dev->event_lock, flags);
8703         if (intel_crtc->unpin_work) {
8704                 spin_unlock_irqrestore(&dev->event_lock, flags);
8705                 kfree(work);
8706                 drm_vblank_put(dev, intel_crtc->pipe);
8707
8708                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8709                 return -EBUSY;
8710         }
8711         intel_crtc->unpin_work = work;
8712         spin_unlock_irqrestore(&dev->event_lock, flags);
8713
8714         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8715                 flush_workqueue(dev_priv->wq);
8716
8717         ret = i915_mutex_lock_interruptible(dev);
8718         if (ret)
8719                 goto cleanup;
8720
8721         /* Reference the objects for the scheduled work. */
8722         drm_gem_object_reference(&work->old_fb_obj->base);
8723         drm_gem_object_reference(&obj->base);
8724
8725         crtc->fb = fb;
8726
8727         work->pending_flip_obj = obj;
8728
8729         work->enable_stall_check = true;
8730
8731         atomic_inc(&intel_crtc->unpin_work_count);
8732         intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
8733
8734         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
8735         if (ret)
8736                 goto cleanup_pending;
8737
8738         intel_disable_fbc(dev);
8739         intel_mark_fb_busy(obj, NULL);
8740         mutex_unlock(&dev->struct_mutex);
8741
8742         trace_i915_flip_request(intel_crtc->plane, obj);
8743
8744         return 0;
8745
8746 cleanup_pending:
8747         atomic_dec(&intel_crtc->unpin_work_count);
8748         crtc->fb = old_fb;
8749         drm_gem_object_unreference(&work->old_fb_obj->base);
8750         drm_gem_object_unreference(&obj->base);
8751         mutex_unlock(&dev->struct_mutex);
8752
8753 cleanup:
8754         spin_lock_irqsave(&dev->event_lock, flags);
8755         intel_crtc->unpin_work = NULL;
8756         spin_unlock_irqrestore(&dev->event_lock, flags);
8757
8758         drm_vblank_put(dev, intel_crtc->pipe);
8759 free_work:
8760         kfree(work);
8761
8762         return ret;
8763 }
8764
8765 static struct drm_crtc_helper_funcs intel_helper_funcs = {
8766         .mode_set_base_atomic = intel_pipe_set_base_atomic,
8767         .load_lut = intel_crtc_load_lut,
8768 };
8769
8770 /**
8771  * intel_modeset_update_staged_output_state
8772  *
8773  * Updates the staged output configuration state, e.g. after we've read out the
8774  * current hw state.
8775  */
8776 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8777 {
8778         struct intel_encoder *encoder;
8779         struct intel_connector *connector;
8780
8781         list_for_each_entry(connector, &dev->mode_config.connector_list,
8782                             base.head) {
8783                 connector->new_encoder =
8784                         to_intel_encoder(connector->base.encoder);
8785         }
8786
8787         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8788                             base.head) {
8789                 encoder->new_crtc =
8790                         to_intel_crtc(encoder->base.crtc);
8791         }
8792 }
8793
8794 /**
8795  * intel_modeset_commit_output_state
8796  *
8797  * This function copies the stage display pipe configuration to the real one.
8798  */
8799 static void intel_modeset_commit_output_state(struct drm_device *dev)
8800 {
8801         struct intel_encoder *encoder;
8802         struct intel_connector *connector;
8803
8804         list_for_each_entry(connector, &dev->mode_config.connector_list,
8805                             base.head) {
8806                 connector->base.encoder = &connector->new_encoder->base;
8807         }
8808
8809         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8810                             base.head) {
8811                 encoder->base.crtc = &encoder->new_crtc->base;
8812         }
8813 }
8814
8815 static void
8816 connected_sink_compute_bpp(struct intel_connector * connector,
8817                            struct intel_crtc_config *pipe_config)
8818 {
8819         int bpp = pipe_config->pipe_bpp;
8820
8821         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8822                 connector->base.base.id,
8823                 drm_get_connector_name(&connector->base));
8824
8825         /* Don't use an invalid EDID bpc value */
8826         if (connector->base.display_info.bpc &&
8827             connector->base.display_info.bpc * 3 < bpp) {
8828                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8829                               bpp, connector->base.display_info.bpc*3);
8830                 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8831         }
8832
8833         /* Clamp bpp to 8 on screens without EDID 1.4 */
8834         if (connector->base.display_info.bpc == 0 && bpp > 24) {
8835                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8836                               bpp);
8837                 pipe_config->pipe_bpp = 24;
8838         }
8839 }
8840
8841 static int
8842 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8843                           struct drm_framebuffer *fb,
8844                           struct intel_crtc_config *pipe_config)
8845 {
8846         struct drm_device *dev = crtc->base.dev;
8847         struct intel_connector *connector;
8848         int bpp;
8849
8850         switch (fb->pixel_format) {
8851         case DRM_FORMAT_C8:
8852                 bpp = 8*3; /* since we go through a colormap */
8853                 break;
8854         case DRM_FORMAT_XRGB1555:
8855         case DRM_FORMAT_ARGB1555:
8856                 /* checked in intel_framebuffer_init already */
8857                 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8858                         return -EINVAL;
8859         case DRM_FORMAT_RGB565:
8860                 bpp = 6*3; /* min is 18bpp */
8861                 break;
8862         case DRM_FORMAT_XBGR8888:
8863         case DRM_FORMAT_ABGR8888:
8864                 /* checked in intel_framebuffer_init already */
8865                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8866                         return -EINVAL;
8867         case DRM_FORMAT_XRGB8888:
8868         case DRM_FORMAT_ARGB8888:
8869                 bpp = 8*3;
8870                 break;
8871         case DRM_FORMAT_XRGB2101010:
8872         case DRM_FORMAT_ARGB2101010:
8873         case DRM_FORMAT_XBGR2101010:
8874         case DRM_FORMAT_ABGR2101010:
8875                 /* checked in intel_framebuffer_init already */
8876                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8877                         return -EINVAL;
8878                 bpp = 10*3;
8879                 break;
8880         /* TODO: gen4+ supports 16 bpc floating point, too. */
8881         default:
8882                 DRM_DEBUG_KMS("unsupported depth\n");
8883                 return -EINVAL;
8884         }
8885
8886         pipe_config->pipe_bpp = bpp;
8887
8888         /* Clamp display bpp to EDID value */
8889         list_for_each_entry(connector, &dev->mode_config.connector_list,
8890                             base.head) {
8891                 if (!connector->new_encoder ||
8892                     connector->new_encoder->new_crtc != crtc)
8893                         continue;
8894
8895                 connected_sink_compute_bpp(connector, pipe_config);
8896         }
8897
8898         return bpp;
8899 }
8900
8901 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8902 {
8903         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8904                         "type: 0x%x flags: 0x%x\n",
8905                 mode->crtc_clock,
8906                 mode->crtc_hdisplay, mode->crtc_hsync_start,
8907                 mode->crtc_hsync_end, mode->crtc_htotal,
8908                 mode->crtc_vdisplay, mode->crtc_vsync_start,
8909                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8910 }
8911
8912 static void intel_dump_pipe_config(struct intel_crtc *crtc,
8913                                    struct intel_crtc_config *pipe_config,
8914                                    const char *context)
8915 {
8916         DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8917                       context, pipe_name(crtc->pipe));
8918
8919         DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8920         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8921                       pipe_config->pipe_bpp, pipe_config->dither);
8922         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8923                       pipe_config->has_pch_encoder,
8924                       pipe_config->fdi_lanes,
8925                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8926                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8927                       pipe_config->fdi_m_n.tu);
8928         DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8929                       pipe_config->has_dp_encoder,
8930                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8931                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8932                       pipe_config->dp_m_n.tu);
8933         DRM_DEBUG_KMS("requested mode:\n");
8934         drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8935         DRM_DEBUG_KMS("adjusted mode:\n");
8936         drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8937         intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8938         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8939         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8940                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8941         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8942                       pipe_config->gmch_pfit.control,
8943                       pipe_config->gmch_pfit.pgm_ratios,
8944                       pipe_config->gmch_pfit.lvds_border_bits);
8945         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8946                       pipe_config->pch_pfit.pos,
8947                       pipe_config->pch_pfit.size,
8948                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8949         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8950         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8951 }
8952
8953 static bool check_encoder_cloning(struct drm_crtc *crtc)
8954 {
8955         int num_encoders = 0;
8956         bool uncloneable_encoders = false;
8957         struct intel_encoder *encoder;
8958
8959         list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8960                             base.head) {
8961                 if (&encoder->new_crtc->base != crtc)
8962                         continue;
8963
8964                 num_encoders++;
8965                 if (!encoder->cloneable)
8966                         uncloneable_encoders = true;
8967         }
8968
8969         return !(num_encoders > 1 && uncloneable_encoders);
8970 }
8971
8972 static struct intel_crtc_config *
8973 intel_modeset_pipe_config(struct drm_crtc *crtc,
8974                           struct drm_framebuffer *fb,
8975                           struct drm_display_mode *mode)
8976 {
8977         struct drm_device *dev = crtc->dev;
8978         struct intel_encoder *encoder;
8979         struct intel_crtc_config *pipe_config;
8980         int plane_bpp, ret = -EINVAL;
8981         bool retry = true;
8982
8983         if (!check_encoder_cloning(crtc)) {
8984                 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8985                 return ERR_PTR(-EINVAL);
8986         }
8987
8988         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8989         if (!pipe_config)
8990                 return ERR_PTR(-ENOMEM);
8991
8992         drm_mode_copy(&pipe_config->adjusted_mode, mode);
8993         drm_mode_copy(&pipe_config->requested_mode, mode);
8994
8995         pipe_config->cpu_transcoder =
8996                 (enum transcoder) to_intel_crtc(crtc)->pipe;
8997         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8998
8999         /*
9000          * Sanitize sync polarity flags based on requested ones. If neither
9001          * positive or negative polarity is requested, treat this as meaning
9002          * negative polarity.
9003          */
9004         if (!(pipe_config->adjusted_mode.flags &
9005               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9006                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9007
9008         if (!(pipe_config->adjusted_mode.flags &
9009               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9010                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9011
9012         /* Compute a starting value for pipe_config->pipe_bpp taking the source
9013          * plane pixel format and any sink constraints into account. Returns the
9014          * source plane bpp so that dithering can be selected on mismatches
9015          * after encoders and crtc also have had their say. */
9016         plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9017                                               fb, pipe_config);
9018         if (plane_bpp < 0)
9019                 goto fail;
9020
9021         /*
9022          * Determine the real pipe dimensions. Note that stereo modes can
9023          * increase the actual pipe size due to the frame doubling and
9024          * insertion of additional space for blanks between the frame. This
9025          * is stored in the crtc timings. We use the requested mode to do this
9026          * computation to clearly distinguish it from the adjusted mode, which
9027          * can be changed by the connectors in the below retry loop.
9028          */
9029         drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9030         pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9031         pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9032
9033 encoder_retry:
9034         /* Ensure the port clock defaults are reset when retrying. */
9035         pipe_config->port_clock = 0;
9036         pipe_config->pixel_multiplier = 1;
9037
9038         /* Fill in default crtc timings, allow encoders to overwrite them. */
9039         drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9040
9041         /* Pass our mode to the connectors and the CRTC to give them a chance to
9042          * adjust it according to limitations or connector properties, and also
9043          * a chance to reject the mode entirely.
9044          */
9045         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9046                             base.head) {
9047
9048                 if (&encoder->new_crtc->base != crtc)
9049                         continue;
9050
9051                 if (!(encoder->compute_config(encoder, pipe_config))) {
9052                         DRM_DEBUG_KMS("Encoder config failure\n");
9053                         goto fail;
9054                 }
9055         }
9056
9057         /* Set default port clock if not overwritten by the encoder. Needs to be
9058          * done afterwards in case the encoder adjusts the mode. */
9059         if (!pipe_config->port_clock)
9060                 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9061                         * pipe_config->pixel_multiplier;
9062
9063         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9064         if (ret < 0) {
9065                 DRM_DEBUG_KMS("CRTC fixup failed\n");
9066                 goto fail;
9067         }
9068
9069         if (ret == RETRY) {
9070                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
9071                         ret = -EINVAL;
9072                         goto fail;
9073                 }
9074
9075                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9076                 retry = false;
9077                 goto encoder_retry;
9078         }
9079
9080         pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9081         DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9082                       plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9083
9084         return pipe_config;
9085 fail:
9086         kfree(pipe_config);
9087         return ERR_PTR(ret);
9088 }
9089
9090 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
9091  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9092 static void
9093 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9094                              unsigned *prepare_pipes, unsigned *disable_pipes)
9095 {
9096         struct intel_crtc *intel_crtc;
9097         struct drm_device *dev = crtc->dev;
9098         struct intel_encoder *encoder;
9099         struct intel_connector *connector;
9100         struct drm_crtc *tmp_crtc;
9101
9102         *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9103
9104         /* Check which crtcs have changed outputs connected to them, these need
9105          * to be part of the prepare_pipes mask. We don't (yet) support global
9106          * modeset across multiple crtcs, so modeset_pipes will only have one
9107          * bit set at most. */
9108         list_for_each_entry(connector, &dev->mode_config.connector_list,
9109                             base.head) {
9110                 if (connector->base.encoder == &connector->new_encoder->base)
9111                         continue;
9112
9113                 if (connector->base.encoder) {
9114                         tmp_crtc = connector->base.encoder->crtc;
9115
9116                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9117                 }
9118
9119                 if (connector->new_encoder)
9120                         *prepare_pipes |=
9121                                 1 << connector->new_encoder->new_crtc->pipe;
9122         }
9123
9124         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9125                             base.head) {
9126                 if (encoder->base.crtc == &encoder->new_crtc->base)
9127                         continue;
9128
9129                 if (encoder->base.crtc) {
9130                         tmp_crtc = encoder->base.crtc;
9131
9132                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9133                 }
9134
9135                 if (encoder->new_crtc)
9136                         *prepare_pipes |= 1 << encoder->new_crtc->pipe;
9137         }
9138
9139         /* Check for any pipes that will be fully disabled ... */
9140         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9141                             base.head) {
9142                 bool used = false;
9143
9144                 /* Don't try to disable disabled crtcs. */
9145                 if (!intel_crtc->base.enabled)
9146                         continue;
9147
9148                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9149                                     base.head) {
9150                         if (encoder->new_crtc == intel_crtc)
9151                                 used = true;
9152                 }
9153
9154                 if (!used)
9155                         *disable_pipes |= 1 << intel_crtc->pipe;
9156         }
9157
9158
9159         /* set_mode is also used to update properties on life display pipes. */
9160         intel_crtc = to_intel_crtc(crtc);
9161         if (crtc->enabled)
9162                 *prepare_pipes |= 1 << intel_crtc->pipe;
9163
9164         /*
9165          * For simplicity do a full modeset on any pipe where the output routing
9166          * changed. We could be more clever, but that would require us to be
9167          * more careful with calling the relevant encoder->mode_set functions.
9168          */
9169         if (*prepare_pipes)
9170                 *modeset_pipes = *prepare_pipes;
9171
9172         /* ... and mask these out. */
9173         *modeset_pipes &= ~(*disable_pipes);
9174         *prepare_pipes &= ~(*disable_pipes);
9175
9176         /*
9177          * HACK: We don't (yet) fully support global modesets. intel_set_config
9178          * obies this rule, but the modeset restore mode of
9179          * intel_modeset_setup_hw_state does not.
9180          */
9181         *modeset_pipes &= 1 << intel_crtc->pipe;
9182         *prepare_pipes &= 1 << intel_crtc->pipe;
9183
9184         DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
9185                       *modeset_pipes, *prepare_pipes, *disable_pipes);
9186 }
9187
9188 static bool intel_crtc_in_use(struct drm_crtc *crtc)
9189 {
9190         struct drm_encoder *encoder;
9191         struct drm_device *dev = crtc->dev;
9192
9193         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
9194                 if (encoder->crtc == crtc)
9195                         return true;
9196
9197         return false;
9198 }
9199
9200 static void
9201 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9202 {
9203         struct intel_encoder *intel_encoder;
9204         struct intel_crtc *intel_crtc;
9205         struct drm_connector *connector;
9206
9207         list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
9208                             base.head) {
9209                 if (!intel_encoder->base.crtc)
9210                         continue;
9211
9212                 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
9213
9214                 if (prepare_pipes & (1 << intel_crtc->pipe))
9215                         intel_encoder->connectors_active = false;
9216         }
9217
9218         intel_modeset_commit_output_state(dev);
9219
9220         /* Update computed state. */
9221         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9222                             base.head) {
9223                 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
9224         }
9225
9226         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9227                 if (!connector->encoder || !connector->encoder->crtc)
9228                         continue;
9229
9230                 intel_crtc = to_intel_crtc(connector->encoder->crtc);
9231
9232                 if (prepare_pipes & (1 << intel_crtc->pipe)) {
9233                         struct drm_property *dpms_property =
9234                                 dev->mode_config.dpms_property;
9235
9236                         connector->dpms = DRM_MODE_DPMS_ON;
9237                         drm_object_property_set_value(&connector->base,
9238                                                          dpms_property,
9239                                                          DRM_MODE_DPMS_ON);
9240
9241                         intel_encoder = to_intel_encoder(connector->encoder);
9242                         intel_encoder->connectors_active = true;
9243                 }
9244         }
9245
9246 }
9247
9248 static bool intel_fuzzy_clock_check(int clock1, int clock2)
9249 {
9250         int diff;
9251
9252         if (clock1 == clock2)
9253                 return true;
9254
9255         if (!clock1 || !clock2)
9256                 return false;
9257
9258         diff = abs(clock1 - clock2);
9259
9260         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9261                 return true;
9262
9263         return false;
9264 }
9265
9266 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
9267         list_for_each_entry((intel_crtc), \
9268                             &(dev)->mode_config.crtc_list, \
9269                             base.head) \
9270                 if (mask & (1 <<(intel_crtc)->pipe))
9271
9272 static bool
9273 intel_pipe_config_compare(struct drm_device *dev,
9274                           struct intel_crtc_config *current_config,
9275                           struct intel_crtc_config *pipe_config)
9276 {
9277 #define PIPE_CONF_CHECK_X(name) \
9278         if (current_config->name != pipe_config->name) { \
9279                 DRM_ERROR("mismatch in " #name " " \
9280                           "(expected 0x%08x, found 0x%08x)\n", \
9281                           current_config->name, \
9282                           pipe_config->name); \
9283                 return false; \
9284         }
9285
9286 #define PIPE_CONF_CHECK_I(name) \
9287         if (current_config->name != pipe_config->name) { \
9288                 DRM_ERROR("mismatch in " #name " " \
9289                           "(expected %i, found %i)\n", \
9290                           current_config->name, \
9291                           pipe_config->name); \
9292                 return false; \
9293         }
9294
9295 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
9296         if ((current_config->name ^ pipe_config->name) & (mask)) { \
9297                 DRM_ERROR("mismatch in " #name "(" #mask ") "      \
9298                           "(expected %i, found %i)\n", \
9299                           current_config->name & (mask), \
9300                           pipe_config->name & (mask)); \
9301                 return false; \
9302         }
9303
9304 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
9305         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9306                 DRM_ERROR("mismatch in " #name " " \
9307                           "(expected %i, found %i)\n", \
9308                           current_config->name, \
9309                           pipe_config->name); \
9310                 return false; \
9311         }
9312
9313 #define PIPE_CONF_QUIRK(quirk)  \
9314         ((current_config->quirks | pipe_config->quirks) & (quirk))
9315
9316         PIPE_CONF_CHECK_I(cpu_transcoder);
9317
9318         PIPE_CONF_CHECK_I(has_pch_encoder);
9319         PIPE_CONF_CHECK_I(fdi_lanes);
9320         PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
9321         PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
9322         PIPE_CONF_CHECK_I(fdi_m_n.link_m);
9323         PIPE_CONF_CHECK_I(fdi_m_n.link_n);
9324         PIPE_CONF_CHECK_I(fdi_m_n.tu);
9325
9326         PIPE_CONF_CHECK_I(has_dp_encoder);
9327         PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
9328         PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
9329         PIPE_CONF_CHECK_I(dp_m_n.link_m);
9330         PIPE_CONF_CHECK_I(dp_m_n.link_n);
9331         PIPE_CONF_CHECK_I(dp_m_n.tu);
9332
9333         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
9334         PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
9335         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
9336         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
9337         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
9338         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
9339
9340         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
9341         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
9342         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
9343         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
9344         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
9345         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9346
9347         PIPE_CONF_CHECK_I(pixel_multiplier);
9348
9349         PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9350                               DRM_MODE_FLAG_INTERLACE);
9351
9352         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9353                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9354                                       DRM_MODE_FLAG_PHSYNC);
9355                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9356                                       DRM_MODE_FLAG_NHSYNC);
9357                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9358                                       DRM_MODE_FLAG_PVSYNC);
9359                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9360                                       DRM_MODE_FLAG_NVSYNC);
9361         }
9362
9363         PIPE_CONF_CHECK_I(pipe_src_w);
9364         PIPE_CONF_CHECK_I(pipe_src_h);
9365
9366         PIPE_CONF_CHECK_I(gmch_pfit.control);
9367         /* pfit ratios are autocomputed by the hw on gen4+ */
9368         if (INTEL_INFO(dev)->gen < 4)
9369                 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9370         PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9371         PIPE_CONF_CHECK_I(pch_pfit.enabled);
9372         if (current_config->pch_pfit.enabled) {
9373                 PIPE_CONF_CHECK_I(pch_pfit.pos);
9374                 PIPE_CONF_CHECK_I(pch_pfit.size);
9375         }
9376
9377         /* BDW+ don't expose a synchronous way to read the state */
9378         if (IS_HASWELL(dev))
9379                 PIPE_CONF_CHECK_I(ips_enabled);
9380
9381         PIPE_CONF_CHECK_I(double_wide);
9382
9383         PIPE_CONF_CHECK_I(shared_dpll);
9384         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9385         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9386         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9387         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9388
9389         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9390                 PIPE_CONF_CHECK_I(pipe_bpp);
9391
9392         if (!HAS_DDI(dev)) {
9393                 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9394                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9395         }
9396
9397 #undef PIPE_CONF_CHECK_X
9398 #undef PIPE_CONF_CHECK_I
9399 #undef PIPE_CONF_CHECK_FLAGS
9400 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9401 #undef PIPE_CONF_QUIRK
9402
9403         return true;
9404 }
9405
9406 static void
9407 check_connector_state(struct drm_device *dev)
9408 {
9409         struct intel_connector *connector;
9410
9411         list_for_each_entry(connector, &dev->mode_config.connector_list,
9412                             base.head) {
9413                 /* This also checks the encoder/connector hw state with the
9414                  * ->get_hw_state callbacks. */
9415                 intel_connector_check_state(connector);
9416
9417                 WARN(&connector->new_encoder->base != connector->base.encoder,
9418                      "connector's staged encoder doesn't match current encoder\n");
9419         }
9420 }
9421
9422 static void
9423 check_encoder_state(struct drm_device *dev)
9424 {
9425         struct intel_encoder *encoder;
9426         struct intel_connector *connector;
9427
9428         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9429                             base.head) {
9430                 bool enabled = false;
9431                 bool active = false;
9432                 enum pipe pipe, tracked_pipe;
9433
9434                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
9435                               encoder->base.base.id,
9436                               drm_get_encoder_name(&encoder->base));
9437
9438                 WARN(&encoder->new_crtc->base != encoder->base.crtc,
9439                      "encoder's stage crtc doesn't match current crtc\n");
9440                 WARN(encoder->connectors_active && !encoder->base.crtc,
9441                      "encoder's active_connectors set, but no crtc\n");
9442
9443                 list_for_each_entry(connector, &dev->mode_config.connector_list,
9444                                     base.head) {
9445                         if (connector->base.encoder != &encoder->base)
9446                                 continue;
9447                         enabled = true;
9448                         if (connector->base.dpms != DRM_MODE_DPMS_OFF)
9449                                 active = true;
9450                 }
9451                 WARN(!!encoder->base.crtc != enabled,
9452                      "encoder's enabled state mismatch "
9453                      "(expected %i, found %i)\n",
9454                      !!encoder->base.crtc, enabled);
9455                 WARN(active && !encoder->base.crtc,
9456                      "active encoder with no crtc\n");
9457
9458                 WARN(encoder->connectors_active != active,
9459                      "encoder's computed active state doesn't match tracked active state "
9460                      "(expected %i, found %i)\n", active, encoder->connectors_active);
9461
9462                 active = encoder->get_hw_state(encoder, &pipe);
9463                 WARN(active != encoder->connectors_active,
9464                      "encoder's hw state doesn't match sw tracking "
9465                      "(expected %i, found %i)\n",
9466                      encoder->connectors_active, active);
9467
9468                 if (!encoder->base.crtc)
9469                         continue;
9470
9471                 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
9472                 WARN(active && pipe != tracked_pipe,
9473                      "active encoder's pipe doesn't match"
9474                      "(expected %i, found %i)\n",
9475                      tracked_pipe, pipe);
9476
9477         }
9478 }
9479
9480 static void
9481 check_crtc_state(struct drm_device *dev)
9482 {
9483         drm_i915_private_t *dev_priv = dev->dev_private;
9484         struct intel_crtc *crtc;
9485         struct intel_encoder *encoder;
9486         struct intel_crtc_config pipe_config;
9487
9488         list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9489                             base.head) {
9490                 bool enabled = false;
9491                 bool active = false;
9492
9493                 memset(&pipe_config, 0, sizeof(pipe_config));
9494
9495                 DRM_DEBUG_KMS("[CRTC:%d]\n",
9496                               crtc->base.base.id);
9497
9498                 WARN(crtc->active && !crtc->base.enabled,
9499                      "active crtc, but not enabled in sw tracking\n");
9500
9501                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9502                                     base.head) {
9503                         if (encoder->base.crtc != &crtc->base)
9504                                 continue;
9505                         enabled = true;
9506                         if (encoder->connectors_active)
9507                                 active = true;
9508                 }
9509
9510                 WARN(active != crtc->active,
9511                      "crtc's computed active state doesn't match tracked active state "
9512                      "(expected %i, found %i)\n", active, crtc->active);
9513                 WARN(enabled != crtc->base.enabled,
9514                      "crtc's computed enabled state doesn't match tracked enabled state "
9515                      "(expected %i, found %i)\n", enabled, crtc->base.enabled);
9516
9517                 active = dev_priv->display.get_pipe_config(crtc,
9518                                                            &pipe_config);
9519
9520                 /* hw state is inconsistent with the pipe A quirk */
9521                 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
9522                         active = crtc->active;
9523
9524                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9525                                     base.head) {
9526                         enum pipe pipe;
9527                         if (encoder->base.crtc != &crtc->base)
9528                                 continue;
9529                         if (encoder->get_hw_state(encoder, &pipe))
9530                                 encoder->get_config(encoder, &pipe_config);
9531                 }
9532
9533                 WARN(crtc->active != active,
9534                      "crtc active state doesn't match with hw state "
9535                      "(expected %i, found %i)\n", crtc->active, active);
9536
9537                 if (active &&
9538                     !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
9539                         WARN(1, "pipe state doesn't match!\n");
9540                         intel_dump_pipe_config(crtc, &pipe_config,
9541                                                "[hw state]");
9542                         intel_dump_pipe_config(crtc, &crtc->config,
9543                                                "[sw state]");
9544                 }
9545         }
9546 }
9547
9548 static void
9549 check_shared_dpll_state(struct drm_device *dev)
9550 {
9551         drm_i915_private_t *dev_priv = dev->dev_private;
9552         struct intel_crtc *crtc;
9553         struct intel_dpll_hw_state dpll_hw_state;
9554         int i;
9555
9556         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9557                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9558                 int enabled_crtcs = 0, active_crtcs = 0;
9559                 bool active;
9560
9561                 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9562
9563                 DRM_DEBUG_KMS("%s\n", pll->name);
9564
9565                 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
9566
9567                 WARN(pll->active > pll->refcount,
9568                      "more active pll users than references: %i vs %i\n",
9569                      pll->active, pll->refcount);
9570                 WARN(pll->active && !pll->on,
9571                      "pll in active use but not on in sw tracking\n");
9572                 WARN(pll->on && !pll->active,
9573                      "pll in on but not on in use in sw tracking\n");
9574                 WARN(pll->on != active,
9575                      "pll on state mismatch (expected %i, found %i)\n",
9576                      pll->on, active);
9577
9578                 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9579                                     base.head) {
9580                         if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9581                                 enabled_crtcs++;
9582                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9583                                 active_crtcs++;
9584                 }
9585                 WARN(pll->active != active_crtcs,
9586                      "pll active crtcs mismatch (expected %i, found %i)\n",
9587                      pll->active, active_crtcs);
9588                 WARN(pll->refcount != enabled_crtcs,
9589                      "pll enabled crtcs mismatch (expected %i, found %i)\n",
9590                      pll->refcount, enabled_crtcs);
9591
9592                 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
9593                                        sizeof(dpll_hw_state)),
9594                      "pll hw state mismatch\n");
9595         }
9596 }
9597
9598 void
9599 intel_modeset_check_state(struct drm_device *dev)
9600 {
9601         check_connector_state(dev);
9602         check_encoder_state(dev);
9603         check_crtc_state(dev);
9604         check_shared_dpll_state(dev);
9605 }
9606
9607 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9608                                      int dotclock)
9609 {
9610         /*
9611          * FDI already provided one idea for the dotclock.
9612          * Yell if the encoder disagrees.
9613          */
9614         WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9615              "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9616              pipe_config->adjusted_mode.crtc_clock, dotclock);
9617 }
9618
9619 static int __intel_set_mode(struct drm_crtc *crtc,
9620                             struct drm_display_mode *mode,
9621                             int x, int y, struct drm_framebuffer *fb)
9622 {
9623         struct drm_device *dev = crtc->dev;
9624         drm_i915_private_t *dev_priv = dev->dev_private;
9625         struct drm_display_mode *saved_mode;
9626         struct intel_crtc_config *pipe_config = NULL;
9627         struct intel_crtc *intel_crtc;
9628         unsigned disable_pipes, prepare_pipes, modeset_pipes;
9629         int ret = 0;
9630
9631         saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
9632         if (!saved_mode)
9633                 return -ENOMEM;
9634
9635         intel_modeset_affected_pipes(crtc, &modeset_pipes,
9636                                      &prepare_pipes, &disable_pipes);
9637
9638         *saved_mode = crtc->mode;
9639
9640         /* Hack: Because we don't (yet) support global modeset on multiple
9641          * crtcs, we don't keep track of the new mode for more than one crtc.
9642          * Hence simply check whether any bit is set in modeset_pipes in all the
9643          * pieces of code that are not yet converted to deal with mutliple crtcs
9644          * changing their mode at the same time. */
9645         if (modeset_pipes) {
9646                 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
9647                 if (IS_ERR(pipe_config)) {
9648                         ret = PTR_ERR(pipe_config);
9649                         pipe_config = NULL;
9650
9651                         goto out;
9652                 }
9653                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9654                                        "[modeset]");
9655         }
9656
9657         /*
9658          * See if the config requires any additional preparation, e.g.
9659          * to adjust global state with pipes off.  We need to do this
9660          * here so we can get the modeset_pipe updated config for the new
9661          * mode set on this crtc.  For other crtcs we need to use the
9662          * adjusted_mode bits in the crtc directly.
9663          */
9664         if (IS_VALLEYVIEW(dev)) {
9665                 valleyview_modeset_global_pipes(dev, &prepare_pipes,
9666                                                 modeset_pipes, pipe_config);
9667
9668                 /* may have added more to prepare_pipes than we should */
9669                 prepare_pipes &= ~disable_pipes;
9670         }
9671
9672         for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
9673                 intel_crtc_disable(&intel_crtc->base);
9674
9675         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
9676                 if (intel_crtc->base.enabled)
9677                         dev_priv->display.crtc_disable(&intel_crtc->base);
9678         }
9679
9680         /* crtc->mode is already used by the ->mode_set callbacks, hence we need
9681          * to set it here already despite that we pass it down the callchain.
9682          */
9683         if (modeset_pipes) {
9684                 crtc->mode = *mode;
9685                 /* mode_set/enable/disable functions rely on a correct pipe
9686                  * config. */
9687                 to_intel_crtc(crtc)->config = *pipe_config;
9688
9689                 /*
9690                  * Calculate and store various constants which
9691                  * are later needed by vblank and swap-completion
9692                  * timestamping. They are derived from true hwmode.
9693                  */
9694                 drm_calc_timestamping_constants(crtc,
9695                                                 &pipe_config->adjusted_mode);
9696         }
9697
9698         /* Only after disabling all output pipelines that will be changed can we
9699          * update the the output configuration. */
9700         intel_modeset_update_state(dev, prepare_pipes);
9701
9702         if (dev_priv->display.modeset_global_resources)
9703                 dev_priv->display.modeset_global_resources(dev);
9704
9705         /* Set up the DPLL and any encoders state that needs to adjust or depend
9706          * on the DPLL.
9707          */
9708         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
9709                 ret = intel_crtc_mode_set(&intel_crtc->base,
9710                                           x, y, fb);
9711                 if (ret)
9712                         goto done;
9713         }
9714
9715         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
9716         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
9717                 dev_priv->display.crtc_enable(&intel_crtc->base);
9718
9719         /* FIXME: add subpixel order */
9720 done:
9721         if (ret && crtc->enabled)
9722                 crtc->mode = *saved_mode;
9723
9724 out:
9725         kfree(pipe_config);
9726         kfree(saved_mode);
9727         return ret;
9728 }
9729
9730 static int intel_set_mode(struct drm_crtc *crtc,
9731                           struct drm_display_mode *mode,
9732                           int x, int y, struct drm_framebuffer *fb)
9733 {
9734         int ret;
9735
9736         ret = __intel_set_mode(crtc, mode, x, y, fb);
9737
9738         if (ret == 0)
9739                 intel_modeset_check_state(crtc->dev);
9740
9741         return ret;
9742 }
9743
9744 void intel_crtc_restore_mode(struct drm_crtc *crtc)
9745 {
9746         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9747 }
9748
9749 #undef for_each_intel_crtc_masked
9750
9751 static void intel_set_config_free(struct intel_set_config *config)
9752 {
9753         if (!config)
9754                 return;
9755
9756         kfree(config->save_connector_encoders);
9757         kfree(config->save_encoder_crtcs);
9758         kfree(config);
9759 }
9760
9761 static int intel_set_config_save_state(struct drm_device *dev,
9762                                        struct intel_set_config *config)
9763 {
9764         struct drm_encoder *encoder;
9765         struct drm_connector *connector;
9766         int count;
9767
9768         config->save_encoder_crtcs =
9769                 kcalloc(dev->mode_config.num_encoder,
9770                         sizeof(struct drm_crtc *), GFP_KERNEL);
9771         if (!config->save_encoder_crtcs)
9772                 return -ENOMEM;
9773
9774         config->save_connector_encoders =
9775                 kcalloc(dev->mode_config.num_connector,
9776                         sizeof(struct drm_encoder *), GFP_KERNEL);
9777         if (!config->save_connector_encoders)
9778                 return -ENOMEM;
9779
9780         /* Copy data. Note that driver private data is not affected.
9781          * Should anything bad happen only the expected state is
9782          * restored, not the drivers personal bookkeeping.
9783          */
9784         count = 0;
9785         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9786                 config->save_encoder_crtcs[count++] = encoder->crtc;
9787         }
9788
9789         count = 0;
9790         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9791                 config->save_connector_encoders[count++] = connector->encoder;
9792         }
9793
9794         return 0;
9795 }
9796
9797 static void intel_set_config_restore_state(struct drm_device *dev,
9798                                            struct intel_set_config *config)
9799 {
9800         struct intel_encoder *encoder;
9801         struct intel_connector *connector;
9802         int count;
9803
9804         count = 0;
9805         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9806                 encoder->new_crtc =
9807                         to_intel_crtc(config->save_encoder_crtcs[count++]);
9808         }
9809
9810         count = 0;
9811         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9812                 connector->new_encoder =
9813                         to_intel_encoder(config->save_connector_encoders[count++]);
9814         }
9815 }
9816
9817 static bool
9818 is_crtc_connector_off(struct drm_mode_set *set)
9819 {
9820         int i;
9821
9822         if (set->num_connectors == 0)
9823                 return false;
9824
9825         if (WARN_ON(set->connectors == NULL))
9826                 return false;
9827
9828         for (i = 0; i < set->num_connectors; i++)
9829                 if (set->connectors[i]->encoder &&
9830                     set->connectors[i]->encoder->crtc == set->crtc &&
9831                     set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
9832                         return true;
9833
9834         return false;
9835 }
9836
9837 static void
9838 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9839                                       struct intel_set_config *config)
9840 {
9841
9842         /* We should be able to check here if the fb has the same properties
9843          * and then just flip_or_move it */
9844         if (is_crtc_connector_off(set)) {
9845                 config->mode_changed = true;
9846         } else if (set->crtc->fb != set->fb) {
9847                 /* If we have no fb then treat it as a full mode set */
9848                 if (set->crtc->fb == NULL) {
9849                         struct intel_crtc *intel_crtc =
9850                                 to_intel_crtc(set->crtc);
9851
9852                         if (intel_crtc->active && i915_fastboot) {
9853                                 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9854                                 config->fb_changed = true;
9855                         } else {
9856                                 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9857                                 config->mode_changed = true;
9858                         }
9859                 } else if (set->fb == NULL) {
9860                         config->mode_changed = true;
9861                 } else if (set->fb->pixel_format !=
9862                            set->crtc->fb->pixel_format) {
9863                         config->mode_changed = true;
9864                 } else {
9865                         config->fb_changed = true;
9866                 }
9867         }
9868
9869         if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9870                 config->fb_changed = true;
9871
9872         if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9873                 DRM_DEBUG_KMS("modes are different, full mode set\n");
9874                 drm_mode_debug_printmodeline(&set->crtc->mode);
9875                 drm_mode_debug_printmodeline(set->mode);
9876                 config->mode_changed = true;
9877         }
9878
9879         DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9880                         set->crtc->base.id, config->mode_changed, config->fb_changed);
9881 }
9882
9883 static int
9884 intel_modeset_stage_output_state(struct drm_device *dev,
9885                                  struct drm_mode_set *set,
9886                                  struct intel_set_config *config)
9887 {
9888         struct drm_crtc *new_crtc;
9889         struct intel_connector *connector;
9890         struct intel_encoder *encoder;
9891         int ro;
9892
9893         /* The upper layers ensure that we either disable a crtc or have a list
9894          * of connectors. For paranoia, double-check this. */
9895         WARN_ON(!set->fb && (set->num_connectors != 0));
9896         WARN_ON(set->fb && (set->num_connectors == 0));
9897
9898         list_for_each_entry(connector, &dev->mode_config.connector_list,
9899                             base.head) {
9900                 /* Otherwise traverse passed in connector list and get encoders
9901                  * for them. */
9902                 for (ro = 0; ro < set->num_connectors; ro++) {
9903                         if (set->connectors[ro] == &connector->base) {
9904                                 connector->new_encoder = connector->encoder;
9905                                 break;
9906                         }
9907                 }
9908
9909                 /* If we disable the crtc, disable all its connectors. Also, if
9910                  * the connector is on the changing crtc but not on the new
9911                  * connector list, disable it. */
9912                 if ((!set->fb || ro == set->num_connectors) &&
9913                     connector->base.encoder &&
9914                     connector->base.encoder->crtc == set->crtc) {
9915                         connector->new_encoder = NULL;
9916
9917                         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9918                                 connector->base.base.id,
9919                                 drm_get_connector_name(&connector->base));
9920                 }
9921
9922
9923                 if (&connector->new_encoder->base != connector->base.encoder) {
9924                         DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9925                         config->mode_changed = true;
9926                 }
9927         }
9928         /* connector->new_encoder is now updated for all connectors. */
9929
9930         /* Update crtc of enabled connectors. */
9931         list_for_each_entry(connector, &dev->mode_config.connector_list,
9932                             base.head) {
9933                 if (!connector->new_encoder)
9934                         continue;
9935
9936                 new_crtc = connector->new_encoder->base.crtc;
9937
9938                 for (ro = 0; ro < set->num_connectors; ro++) {
9939                         if (set->connectors[ro] == &connector->base)
9940                                 new_crtc = set->crtc;
9941                 }
9942
9943                 /* Make sure the new CRTC will work with the encoder */
9944                 if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
9945                                          new_crtc)) {
9946                         return -EINVAL;
9947                 }
9948                 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9949
9950                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9951                         connector->base.base.id,
9952                         drm_get_connector_name(&connector->base),
9953                         new_crtc->base.id);
9954         }
9955
9956         /* Check for any encoders that needs to be disabled. */
9957         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9958                             base.head) {
9959                 int num_connectors = 0;
9960                 list_for_each_entry(connector,
9961                                     &dev->mode_config.connector_list,
9962                                     base.head) {
9963                         if (connector->new_encoder == encoder) {
9964                                 WARN_ON(!connector->new_encoder->new_crtc);
9965                                 num_connectors++;
9966                         }
9967                 }
9968
9969                 if (num_connectors == 0)
9970                         encoder->new_crtc = NULL;
9971                 else if (num_connectors > 1)
9972                         return -EINVAL;
9973
9974                 /* Only now check for crtc changes so we don't miss encoders
9975                  * that will be disabled. */
9976                 if (&encoder->new_crtc->base != encoder->base.crtc) {
9977                         DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9978                         config->mode_changed = true;
9979                 }
9980         }
9981         /* Now we've also updated encoder->new_crtc for all encoders. */
9982
9983         return 0;
9984 }
9985
9986 static int intel_crtc_set_config(struct drm_mode_set *set)
9987 {
9988         struct drm_device *dev;
9989         struct drm_mode_set save_set;
9990         struct intel_set_config *config;
9991         int ret;
9992
9993         BUG_ON(!set);
9994         BUG_ON(!set->crtc);
9995         BUG_ON(!set->crtc->helper_private);
9996
9997         /* Enforce sane interface api - has been abused by the fb helper. */
9998         BUG_ON(!set->mode && set->fb);
9999         BUG_ON(set->fb && set->num_connectors == 0);
10000
10001         if (set->fb) {
10002                 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
10003                                 set->crtc->base.id, set->fb->base.id,
10004                                 (int)set->num_connectors, set->x, set->y);
10005         } else {
10006                 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
10007         }
10008
10009         dev = set->crtc->dev;
10010
10011         ret = -ENOMEM;
10012         config = kzalloc(sizeof(*config), GFP_KERNEL);
10013         if (!config)
10014                 goto out_config;
10015
10016         ret = intel_set_config_save_state(dev, config);
10017         if (ret)
10018                 goto out_config;
10019
10020         save_set.crtc = set->crtc;
10021         save_set.mode = &set->crtc->mode;
10022         save_set.x = set->crtc->x;
10023         save_set.y = set->crtc->y;
10024         save_set.fb = set->crtc->fb;
10025
10026         /* Compute whether we need a full modeset, only an fb base update or no
10027          * change at all. In the future we might also check whether only the
10028          * mode changed, e.g. for LVDS where we only change the panel fitter in
10029          * such cases. */
10030         intel_set_config_compute_mode_changes(set, config);
10031
10032         ret = intel_modeset_stage_output_state(dev, set, config);
10033         if (ret)
10034                 goto fail;
10035
10036         if (config->mode_changed) {
10037                 ret = intel_set_mode(set->crtc, set->mode,
10038                                      set->x, set->y, set->fb);
10039         } else if (config->fb_changed) {
10040                 intel_crtc_wait_for_pending_flips(set->crtc);
10041
10042                 ret = intel_pipe_set_base(set->crtc,
10043                                           set->x, set->y, set->fb);
10044                 /*
10045                  * In the fastboot case this may be our only check of the
10046                  * state after boot.  It would be better to only do it on
10047                  * the first update, but we don't have a nice way of doing that
10048                  * (and really, set_config isn't used much for high freq page
10049                  * flipping, so increasing its cost here shouldn't be a big
10050                  * deal).
10051                  */
10052                 if (i915_fastboot && ret == 0)
10053                         intel_modeset_check_state(set->crtc->dev);
10054         }
10055
10056         if (ret) {
10057                 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
10058                               set->crtc->base.id, ret);
10059 fail:
10060                 intel_set_config_restore_state(dev, config);
10061
10062                 /* Try to restore the config */
10063                 if (config->mode_changed &&
10064                     intel_set_mode(save_set.crtc, save_set.mode,
10065                                    save_set.x, save_set.y, save_set.fb))
10066                         DRM_ERROR("failed to restore config after modeset failure\n");
10067         }
10068
10069 out_config:
10070         intel_set_config_free(config);
10071         return ret;
10072 }
10073
10074 static const struct drm_crtc_funcs intel_crtc_funcs = {
10075         .cursor_set = intel_crtc_cursor_set,
10076         .cursor_move = intel_crtc_cursor_move,
10077         .gamma_set = intel_crtc_gamma_set,
10078         .set_config = intel_crtc_set_config,
10079         .destroy = intel_crtc_destroy,
10080         .page_flip = intel_crtc_page_flip,
10081 };
10082
10083 static void intel_cpu_pll_init(struct drm_device *dev)
10084 {
10085         if (HAS_DDI(dev))
10086                 intel_ddi_pll_init(dev);
10087 }
10088
10089 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10090                                       struct intel_shared_dpll *pll,
10091                                       struct intel_dpll_hw_state *hw_state)
10092 {
10093         uint32_t val;
10094
10095         val = I915_READ(PCH_DPLL(pll->id));
10096         hw_state->dpll = val;
10097         hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10098         hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
10099
10100         return val & DPLL_VCO_ENABLE;
10101 }
10102
10103 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
10104                                   struct intel_shared_dpll *pll)
10105 {
10106         I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
10107         I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
10108 }
10109
10110 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10111                                 struct intel_shared_dpll *pll)
10112 {
10113         /* PCH refclock must be enabled first */
10114         ibx_assert_pch_refclk_enabled(dev_priv);
10115
10116         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10117
10118         /* Wait for the clocks to stabilize. */
10119         POSTING_READ(PCH_DPLL(pll->id));
10120         udelay(150);
10121
10122         /* The pixel multiplier can only be updated once the
10123          * DPLL is enabled and the clocks are stable.
10124          *
10125          * So write it again.
10126          */
10127         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10128         POSTING_READ(PCH_DPLL(pll->id));
10129         udelay(200);
10130 }
10131
10132 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10133                                  struct intel_shared_dpll *pll)
10134 {
10135         struct drm_device *dev = dev_priv->dev;
10136         struct intel_crtc *crtc;
10137
10138         /* Make sure no transcoder isn't still depending on us. */
10139         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
10140                 if (intel_crtc_to_shared_dpll(crtc) == pll)
10141                         assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
10142         }
10143
10144         I915_WRITE(PCH_DPLL(pll->id), 0);
10145         POSTING_READ(PCH_DPLL(pll->id));
10146         udelay(200);
10147 }
10148
10149 static char *ibx_pch_dpll_names[] = {
10150         "PCH DPLL A",
10151         "PCH DPLL B",
10152 };
10153
10154 static void ibx_pch_dpll_init(struct drm_device *dev)
10155 {
10156         struct drm_i915_private *dev_priv = dev->dev_private;
10157         int i;
10158
10159         dev_priv->num_shared_dpll = 2;
10160
10161         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10162                 dev_priv->shared_dplls[i].id = i;
10163                 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
10164                 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
10165                 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
10166                 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
10167                 dev_priv->shared_dplls[i].get_hw_state =
10168                         ibx_pch_dpll_get_hw_state;
10169         }
10170 }
10171
10172 static void intel_shared_dpll_init(struct drm_device *dev)
10173 {
10174         struct drm_i915_private *dev_priv = dev->dev_private;
10175
10176         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10177                 ibx_pch_dpll_init(dev);
10178         else
10179                 dev_priv->num_shared_dpll = 0;
10180
10181         BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10182 }
10183
10184 static void intel_crtc_init(struct drm_device *dev, int pipe)
10185 {
10186         drm_i915_private_t *dev_priv = dev->dev_private;
10187         struct intel_crtc *intel_crtc;
10188         int i;
10189
10190         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
10191         if (intel_crtc == NULL)
10192                 return;
10193
10194         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10195
10196         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10197         for (i = 0; i < 256; i++) {
10198                 intel_crtc->lut_r[i] = i;
10199                 intel_crtc->lut_g[i] = i;
10200                 intel_crtc->lut_b[i] = i;
10201         }
10202
10203         /*
10204          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10205          * is hooked to plane B. Hence we want plane A feeding pipe B.
10206          */
10207         intel_crtc->pipe = pipe;
10208         intel_crtc->plane = pipe;
10209         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
10210                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10211                 intel_crtc->plane = !pipe;
10212         }
10213
10214         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10215                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10216         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10217         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
10218
10219         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10220 }
10221
10222 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
10223 {
10224         struct drm_encoder *encoder = connector->base.encoder;
10225
10226         WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
10227
10228         if (!encoder)
10229                 return INVALID_PIPE;
10230
10231         return to_intel_crtc(encoder->crtc)->pipe;
10232 }
10233
10234 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10235                                 struct drm_file *file)
10236 {
10237         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10238         struct drm_mode_object *drmmode_obj;
10239         struct intel_crtc *crtc;
10240
10241         if (!drm_core_check_feature(dev, DRIVER_MODESET))
10242                 return -ENODEV;
10243
10244         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
10245                         DRM_MODE_OBJECT_CRTC);
10246
10247         if (!drmmode_obj) {
10248                 DRM_ERROR("no such CRTC id\n");
10249                 return -ENOENT;
10250         }
10251
10252         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
10253         pipe_from_crtc_id->pipe = crtc->pipe;
10254
10255         return 0;
10256 }
10257
10258 static int intel_encoder_clones(struct intel_encoder *encoder)
10259 {
10260         struct drm_device *dev = encoder->base.dev;
10261         struct intel_encoder *source_encoder;
10262         int index_mask = 0;
10263         int entry = 0;
10264
10265         list_for_each_entry(source_encoder,
10266                             &dev->mode_config.encoder_list, base.head) {
10267
10268                 if (encoder == source_encoder)
10269                         index_mask |= (1 << entry);
10270
10271                 /* Intel hw has only one MUX where enocoders could be cloned. */
10272                 if (encoder->cloneable && source_encoder->cloneable)
10273                         index_mask |= (1 << entry);
10274
10275                 entry++;
10276         }
10277
10278         return index_mask;
10279 }
10280
10281 static bool has_edp_a(struct drm_device *dev)
10282 {
10283         struct drm_i915_private *dev_priv = dev->dev_private;
10284
10285         if (!IS_MOBILE(dev))
10286                 return false;
10287
10288         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
10289                 return false;
10290
10291         if (IS_GEN5(dev) &&
10292             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
10293                 return false;
10294
10295         return true;
10296 }
10297
10298 const char *intel_output_name(int output)
10299 {
10300         static const char *names[] = {
10301                 [INTEL_OUTPUT_UNUSED] = "Unused",
10302                 [INTEL_OUTPUT_ANALOG] = "Analog",
10303                 [INTEL_OUTPUT_DVO] = "DVO",
10304                 [INTEL_OUTPUT_SDVO] = "SDVO",
10305                 [INTEL_OUTPUT_LVDS] = "LVDS",
10306                 [INTEL_OUTPUT_TVOUT] = "TV",
10307                 [INTEL_OUTPUT_HDMI] = "HDMI",
10308                 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
10309                 [INTEL_OUTPUT_EDP] = "eDP",
10310                 [INTEL_OUTPUT_DSI] = "DSI",
10311                 [INTEL_OUTPUT_UNKNOWN] = "Unknown",
10312         };
10313
10314         if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
10315                 return "Invalid";
10316
10317         return names[output];
10318 }
10319
10320 static void intel_setup_outputs(struct drm_device *dev)
10321 {
10322         struct drm_i915_private *dev_priv = dev->dev_private;
10323         struct intel_encoder *encoder;
10324         bool dpd_is_edp = false;
10325
10326         intel_lvds_init(dev);
10327
10328         if (!IS_ULT(dev))
10329                 intel_crt_init(dev);
10330
10331         if (HAS_DDI(dev)) {
10332                 int found;
10333
10334                 /* Haswell uses DDI functions to detect digital outputs */
10335                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
10336                 /* DDI A only supports eDP */
10337                 if (found)
10338                         intel_ddi_init(dev, PORT_A);
10339
10340                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
10341                  * register */
10342                 found = I915_READ(SFUSE_STRAP);
10343
10344                 if (found & SFUSE_STRAP_DDIB_DETECTED)
10345                         intel_ddi_init(dev, PORT_B);
10346                 if (found & SFUSE_STRAP_DDIC_DETECTED)
10347                         intel_ddi_init(dev, PORT_C);
10348                 if (found & SFUSE_STRAP_DDID_DETECTED)
10349                         intel_ddi_init(dev, PORT_D);
10350         } else if (HAS_PCH_SPLIT(dev)) {
10351                 int found;
10352                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10353
10354                 if (has_edp_a(dev))
10355                         intel_dp_init(dev, DP_A, PORT_A);
10356
10357                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
10358                         /* PCH SDVOB multiplex with HDMIB */
10359                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
10360                         if (!found)
10361                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
10362                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
10363                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
10364                 }
10365
10366                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
10367                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
10368
10369                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
10370                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
10371
10372                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
10373                         intel_dp_init(dev, PCH_DP_C, PORT_C);
10374
10375                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
10376                         intel_dp_init(dev, PCH_DP_D, PORT_D);
10377         } else if (IS_VALLEYVIEW(dev)) {
10378                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
10379                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
10380                                         PORT_B);
10381                         if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
10382                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
10383                 }
10384
10385                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
10386                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10387                                         PORT_C);
10388                         if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10389                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10390                 }
10391
10392                 intel_dsi_init(dev);
10393         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10394                 bool found = false;
10395
10396                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10397                         DRM_DEBUG_KMS("probing SDVOB\n");
10398                         found = intel_sdvo_init(dev, GEN3_SDVOB, true);
10399                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
10400                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
10401                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
10402                         }
10403
10404                         if (!found && SUPPORTS_INTEGRATED_DP(dev))
10405                                 intel_dp_init(dev, DP_B, PORT_B);
10406                 }
10407
10408                 /* Before G4X SDVOC doesn't have its own detect register */
10409
10410                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10411                         DRM_DEBUG_KMS("probing SDVOC\n");
10412                         found = intel_sdvo_init(dev, GEN3_SDVOC, false);
10413                 }
10414
10415                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
10416
10417                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
10418                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
10419                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
10420                         }
10421                         if (SUPPORTS_INTEGRATED_DP(dev))
10422                                 intel_dp_init(dev, DP_C, PORT_C);
10423                 }
10424
10425                 if (SUPPORTS_INTEGRATED_DP(dev) &&
10426                     (I915_READ(DP_D) & DP_DETECTED))
10427                         intel_dp_init(dev, DP_D, PORT_D);
10428         } else if (IS_GEN2(dev))
10429                 intel_dvo_init(dev);
10430
10431         if (SUPPORTS_TV(dev))
10432                 intel_tv_init(dev);
10433
10434         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10435                 encoder->base.possible_crtcs = encoder->crtc_mask;
10436                 encoder->base.possible_clones =
10437                         intel_encoder_clones(encoder);
10438         }
10439
10440         intel_init_pch_refclk(dev);
10441
10442         drm_helper_move_panel_connectors_to_head(dev);
10443 }
10444
10445 void intel_framebuffer_fini(struct intel_framebuffer *fb)
10446 {
10447         drm_framebuffer_cleanup(&fb->base);
10448         WARN_ON(!fb->obj->framebuffer_references--);
10449         drm_gem_object_unreference_unlocked(&fb->obj->base);
10450 }
10451
10452 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
10453 {
10454         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10455
10456         intel_framebuffer_fini(intel_fb);
10457         kfree(intel_fb);
10458 }
10459
10460 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10461                                                 struct drm_file *file,
10462                                                 unsigned int *handle)
10463 {
10464         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10465         struct drm_i915_gem_object *obj = intel_fb->obj;
10466
10467         return drm_gem_handle_create(file, &obj->base, handle);
10468 }
10469
10470 static const struct drm_framebuffer_funcs intel_fb_funcs = {
10471         .destroy = intel_user_framebuffer_destroy,
10472         .create_handle = intel_user_framebuffer_create_handle,
10473 };
10474
10475 int intel_framebuffer_init(struct drm_device *dev,
10476                            struct intel_framebuffer *intel_fb,
10477                            struct drm_mode_fb_cmd2 *mode_cmd,
10478                            struct drm_i915_gem_object *obj)
10479 {
10480         int aligned_height;
10481         int pitch_limit;
10482         int ret;
10483
10484         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
10485
10486         if (obj->tiling_mode == I915_TILING_Y) {
10487                 DRM_DEBUG("hardware does not support tiling Y\n");
10488                 return -EINVAL;
10489         }
10490
10491         if (mode_cmd->pitches[0] & 63) {
10492                 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
10493                           mode_cmd->pitches[0]);
10494                 return -EINVAL;
10495         }
10496
10497         if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
10498                 pitch_limit = 32*1024;
10499         } else if (INTEL_INFO(dev)->gen >= 4) {
10500                 if (obj->tiling_mode)
10501                         pitch_limit = 16*1024;
10502                 else
10503                         pitch_limit = 32*1024;
10504         } else if (INTEL_INFO(dev)->gen >= 3) {
10505                 if (obj->tiling_mode)
10506                         pitch_limit = 8*1024;
10507                 else
10508                         pitch_limit = 16*1024;
10509         } else
10510                 /* XXX DSPC is limited to 4k tiled */
10511                 pitch_limit = 8*1024;
10512
10513         if (mode_cmd->pitches[0] > pitch_limit) {
10514                 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
10515                           obj->tiling_mode ? "tiled" : "linear",
10516                           mode_cmd->pitches[0], pitch_limit);
10517                 return -EINVAL;
10518         }
10519
10520         if (obj->tiling_mode != I915_TILING_NONE &&
10521             mode_cmd->pitches[0] != obj->stride) {
10522                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
10523                           mode_cmd->pitches[0], obj->stride);
10524                 return -EINVAL;
10525         }
10526
10527         /* Reject formats not supported by any plane early. */
10528         switch (mode_cmd->pixel_format) {
10529         case DRM_FORMAT_C8:
10530         case DRM_FORMAT_RGB565:
10531         case DRM_FORMAT_XRGB8888:
10532         case DRM_FORMAT_ARGB8888:
10533                 break;
10534         case DRM_FORMAT_XRGB1555:
10535         case DRM_FORMAT_ARGB1555:
10536                 if (INTEL_INFO(dev)->gen > 3) {
10537                         DRM_DEBUG("unsupported pixel format: %s\n",
10538                                   drm_get_format_name(mode_cmd->pixel_format));
10539                         return -EINVAL;
10540                 }
10541                 break;
10542         case DRM_FORMAT_XBGR8888:
10543         case DRM_FORMAT_ABGR8888:
10544         case DRM_FORMAT_XRGB2101010:
10545         case DRM_FORMAT_ARGB2101010:
10546         case DRM_FORMAT_XBGR2101010:
10547         case DRM_FORMAT_ABGR2101010:
10548                 if (INTEL_INFO(dev)->gen < 4) {
10549                         DRM_DEBUG("unsupported pixel format: %s\n",
10550                                   drm_get_format_name(mode_cmd->pixel_format));
10551                         return -EINVAL;
10552                 }
10553                 break;
10554         case DRM_FORMAT_YUYV:
10555         case DRM_FORMAT_UYVY:
10556         case DRM_FORMAT_YVYU:
10557         case DRM_FORMAT_VYUY:
10558                 if (INTEL_INFO(dev)->gen < 5) {
10559                         DRM_DEBUG("unsupported pixel format: %s\n",
10560                                   drm_get_format_name(mode_cmd->pixel_format));
10561                         return -EINVAL;
10562                 }
10563                 break;
10564         default:
10565                 DRM_DEBUG("unsupported pixel format: %s\n",
10566                           drm_get_format_name(mode_cmd->pixel_format));
10567                 return -EINVAL;
10568         }
10569
10570         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
10571         if (mode_cmd->offsets[0] != 0)
10572                 return -EINVAL;
10573
10574         aligned_height = intel_align_height(dev, mode_cmd->height,
10575                                             obj->tiling_mode);
10576         /* FIXME drm helper for size checks (especially planar formats)? */
10577         if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10578                 return -EINVAL;
10579
10580         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
10581         intel_fb->obj = obj;
10582         intel_fb->obj->framebuffer_references++;
10583
10584         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
10585         if (ret) {
10586                 DRM_ERROR("framebuffer init failed %d\n", ret);
10587                 return ret;
10588         }
10589
10590         return 0;
10591 }
10592
10593 static struct drm_framebuffer *
10594 intel_user_framebuffer_create(struct drm_device *dev,
10595                               struct drm_file *filp,
10596                               struct drm_mode_fb_cmd2 *mode_cmd)
10597 {
10598         struct drm_i915_gem_object *obj;
10599
10600         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
10601                                                 mode_cmd->handles[0]));
10602         if (&obj->base == NULL)
10603                 return ERR_PTR(-ENOENT);
10604
10605         return intel_framebuffer_create(dev, mode_cmd, obj);
10606 }
10607
10608 #ifndef CONFIG_DRM_I915_FBDEV
10609 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
10610 {
10611 }
10612 #endif
10613
10614 static const struct drm_mode_config_funcs intel_mode_funcs = {
10615         .fb_create = intel_user_framebuffer_create,
10616         .output_poll_changed = intel_fbdev_output_poll_changed,
10617 };
10618
10619 /* Set up chip specific display functions */
10620 static void intel_init_display(struct drm_device *dev)
10621 {
10622         struct drm_i915_private *dev_priv = dev->dev_private;
10623
10624         if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
10625                 dev_priv->display.find_dpll = g4x_find_best_dpll;
10626         else if (IS_VALLEYVIEW(dev))
10627                 dev_priv->display.find_dpll = vlv_find_best_dpll;
10628         else if (IS_PINEVIEW(dev))
10629                 dev_priv->display.find_dpll = pnv_find_best_dpll;
10630         else
10631                 dev_priv->display.find_dpll = i9xx_find_best_dpll;
10632
10633         if (HAS_DDI(dev)) {
10634                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
10635                 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
10636                 dev_priv->display.crtc_enable = haswell_crtc_enable;
10637                 dev_priv->display.crtc_disable = haswell_crtc_disable;
10638                 dev_priv->display.off = haswell_crtc_off;
10639                 dev_priv->display.update_plane = ironlake_update_plane;
10640         } else if (HAS_PCH_SPLIT(dev)) {
10641                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
10642                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
10643                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
10644                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
10645                 dev_priv->display.off = ironlake_crtc_off;
10646                 dev_priv->display.update_plane = ironlake_update_plane;
10647         } else if (IS_VALLEYVIEW(dev)) {
10648                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10649                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10650                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
10651                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
10652                 dev_priv->display.off = i9xx_crtc_off;
10653                 dev_priv->display.update_plane = i9xx_update_plane;
10654         } else {
10655                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10656                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10657                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
10658                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
10659                 dev_priv->display.off = i9xx_crtc_off;
10660                 dev_priv->display.update_plane = i9xx_update_plane;
10661         }
10662
10663         /* Returns the core display clock speed */
10664         if (IS_VALLEYVIEW(dev))
10665                 dev_priv->display.get_display_clock_speed =
10666                         valleyview_get_display_clock_speed;
10667         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
10668                 dev_priv->display.get_display_clock_speed =
10669                         i945_get_display_clock_speed;
10670         else if (IS_I915G(dev))
10671                 dev_priv->display.get_display_clock_speed =
10672                         i915_get_display_clock_speed;
10673         else if (IS_I945GM(dev) || IS_845G(dev))
10674                 dev_priv->display.get_display_clock_speed =
10675                         i9xx_misc_get_display_clock_speed;
10676         else if (IS_PINEVIEW(dev))
10677                 dev_priv->display.get_display_clock_speed =
10678                         pnv_get_display_clock_speed;
10679         else if (IS_I915GM(dev))
10680                 dev_priv->display.get_display_clock_speed =
10681                         i915gm_get_display_clock_speed;
10682         else if (IS_I865G(dev))
10683                 dev_priv->display.get_display_clock_speed =
10684                         i865_get_display_clock_speed;
10685         else if (IS_I85X(dev))
10686                 dev_priv->display.get_display_clock_speed =
10687                         i855_get_display_clock_speed;
10688         else /* 852, 830 */
10689                 dev_priv->display.get_display_clock_speed =
10690                         i830_get_display_clock_speed;
10691
10692         if (HAS_PCH_SPLIT(dev)) {
10693                 if (IS_GEN5(dev)) {
10694                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
10695                         dev_priv->display.write_eld = ironlake_write_eld;
10696                 } else if (IS_GEN6(dev)) {
10697                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
10698                         dev_priv->display.write_eld = ironlake_write_eld;
10699                 } else if (IS_IVYBRIDGE(dev)) {
10700                         /* FIXME: detect B0+ stepping and use auto training */
10701                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
10702                         dev_priv->display.write_eld = ironlake_write_eld;
10703                         dev_priv->display.modeset_global_resources =
10704                                 ivb_modeset_global_resources;
10705                 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
10706                         dev_priv->display.fdi_link_train = hsw_fdi_link_train;
10707                         dev_priv->display.write_eld = haswell_write_eld;
10708                         dev_priv->display.modeset_global_resources =
10709                                 haswell_modeset_global_resources;
10710                 }
10711         } else if (IS_G4X(dev)) {
10712                 dev_priv->display.write_eld = g4x_write_eld;
10713         } else if (IS_VALLEYVIEW(dev)) {
10714                 dev_priv->display.modeset_global_resources =
10715                         valleyview_modeset_global_resources;
10716                 dev_priv->display.write_eld = ironlake_write_eld;
10717         }
10718
10719         /* Default just returns -ENODEV to indicate unsupported */
10720         dev_priv->display.queue_flip = intel_default_queue_flip;
10721
10722         switch (INTEL_INFO(dev)->gen) {
10723         case 2:
10724                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
10725                 break;
10726
10727         case 3:
10728                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
10729                 break;
10730
10731         case 4:
10732         case 5:
10733                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
10734                 break;
10735
10736         case 6:
10737                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
10738                 break;
10739         case 7:
10740         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
10741                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
10742                 break;
10743         }
10744
10745         intel_panel_init_backlight_funcs(dev);
10746 }
10747
10748 /*
10749  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
10750  * resume, or other times.  This quirk makes sure that's the case for
10751  * affected systems.
10752  */
10753 static void quirk_pipea_force(struct drm_device *dev)
10754 {
10755         struct drm_i915_private *dev_priv = dev->dev_private;
10756
10757         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
10758         DRM_INFO("applying pipe a force quirk\n");
10759 }
10760
10761 /*
10762  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
10763  */
10764 static void quirk_ssc_force_disable(struct drm_device *dev)
10765 {
10766         struct drm_i915_private *dev_priv = dev->dev_private;
10767         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
10768         DRM_INFO("applying lvds SSC disable quirk\n");
10769 }
10770
10771 /*
10772  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
10773  * brightness value
10774  */
10775 static void quirk_invert_brightness(struct drm_device *dev)
10776 {
10777         struct drm_i915_private *dev_priv = dev->dev_private;
10778         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
10779         DRM_INFO("applying inverted panel brightness quirk\n");
10780 }
10781
10782 struct intel_quirk {
10783         int device;
10784         int subsystem_vendor;
10785         int subsystem_device;
10786         void (*hook)(struct drm_device *dev);
10787 };
10788
10789 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
10790 struct intel_dmi_quirk {
10791         void (*hook)(struct drm_device *dev);
10792         const struct dmi_system_id (*dmi_id_list)[];
10793 };
10794
10795 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
10796 {
10797         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
10798         return 1;
10799 }
10800
10801 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
10802         {
10803                 .dmi_id_list = &(const struct dmi_system_id[]) {
10804                         {
10805                                 .callback = intel_dmi_reverse_brightness,
10806                                 .ident = "NCR Corporation",
10807                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
10808                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
10809                                 },
10810                         },
10811                         { }  /* terminating entry */
10812                 },
10813                 .hook = quirk_invert_brightness,
10814         },
10815 };
10816
10817 static struct intel_quirk intel_quirks[] = {
10818         /* HP Mini needs pipe A force quirk (LP: #322104) */
10819         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
10820
10821         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10822         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
10823
10824         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10825         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10826
10827         /* 830 needs to leave pipe A & dpll A up */
10828         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10829
10830         /* Lenovo U160 cannot use SSC on LVDS */
10831         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
10832
10833         /* Sony Vaio Y cannot use SSC on LVDS */
10834         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10835
10836         /* Acer Aspire 5734Z must invert backlight brightness */
10837         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10838
10839         /* Acer/eMachines G725 */
10840         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10841
10842         /* Acer/eMachines e725 */
10843         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10844
10845         /* Acer/Packard Bell NCL20 */
10846         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10847
10848         /* Acer Aspire 4736Z */
10849         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10850 };
10851
10852 static void intel_init_quirks(struct drm_device *dev)
10853 {
10854         struct pci_dev *d = dev->pdev;
10855         int i;
10856
10857         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10858                 struct intel_quirk *q = &intel_quirks[i];
10859
10860                 if (d->device == q->device &&
10861                     (d->subsystem_vendor == q->subsystem_vendor ||
10862                      q->subsystem_vendor == PCI_ANY_ID) &&
10863                     (d->subsystem_device == q->subsystem_device ||
10864                      q->subsystem_device == PCI_ANY_ID))
10865                         q->hook(dev);
10866         }
10867         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
10868                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
10869                         intel_dmi_quirks[i].hook(dev);
10870         }
10871 }
10872
10873 /* Disable the VGA plane that we never use */
10874 static void i915_disable_vga(struct drm_device *dev)
10875 {
10876         struct drm_i915_private *dev_priv = dev->dev_private;
10877         u8 sr1;
10878         u32 vga_reg = i915_vgacntrl_reg(dev);
10879
10880         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10881         outb(SR01, VGA_SR_INDEX);
10882         sr1 = inb(VGA_SR_DATA);
10883         outb(sr1 | 1<<5, VGA_SR_DATA);
10884         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10885         udelay(300);
10886
10887         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10888         POSTING_READ(vga_reg);
10889 }
10890
10891 void intel_modeset_init_hw(struct drm_device *dev)
10892 {
10893         intel_prepare_ddi(dev);
10894
10895         intel_init_clock_gating(dev);
10896
10897         intel_reset_dpio(dev);
10898
10899         mutex_lock(&dev->struct_mutex);
10900         intel_enable_gt_powersave(dev);
10901         mutex_unlock(&dev->struct_mutex);
10902 }
10903
10904 void intel_modeset_suspend_hw(struct drm_device *dev)
10905 {
10906         intel_suspend_hw(dev);
10907 }
10908
10909 void intel_modeset_init(struct drm_device *dev)
10910 {
10911         struct drm_i915_private *dev_priv = dev->dev_private;
10912         int i, j, ret;
10913
10914         drm_mode_config_init(dev);
10915
10916         dev->mode_config.min_width = 0;
10917         dev->mode_config.min_height = 0;
10918
10919         dev->mode_config.preferred_depth = 24;
10920         dev->mode_config.prefer_shadow = 1;
10921
10922         dev->mode_config.funcs = &intel_mode_funcs;
10923
10924         intel_init_quirks(dev);
10925
10926         intel_init_pm(dev);
10927
10928         if (INTEL_INFO(dev)->num_pipes == 0)
10929                 return;
10930
10931         intel_init_display(dev);
10932
10933         if (IS_GEN2(dev)) {
10934                 dev->mode_config.max_width = 2048;
10935                 dev->mode_config.max_height = 2048;
10936         } else if (IS_GEN3(dev)) {
10937                 dev->mode_config.max_width = 4096;
10938                 dev->mode_config.max_height = 4096;
10939         } else {
10940                 dev->mode_config.max_width = 8192;
10941                 dev->mode_config.max_height = 8192;
10942         }
10943         dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
10944
10945         DRM_DEBUG_KMS("%d display pipe%s available.\n",
10946                       INTEL_INFO(dev)->num_pipes,
10947                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
10948
10949         for_each_pipe(i) {
10950                 intel_crtc_init(dev, i);
10951                 for (j = 0; j < dev_priv->num_plane; j++) {
10952                         ret = intel_plane_init(dev, i, j);
10953                         if (ret)
10954                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10955                                               pipe_name(i), sprite_name(i, j), ret);
10956                 }
10957         }
10958
10959         intel_init_dpio(dev);
10960         intel_reset_dpio(dev);
10961
10962         intel_cpu_pll_init(dev);
10963         intel_shared_dpll_init(dev);
10964
10965         /* Just disable it once at startup */
10966         i915_disable_vga(dev);
10967         intel_setup_outputs(dev);
10968
10969         /* Just in case the BIOS is doing something questionable. */
10970         intel_disable_fbc(dev);
10971
10972         mutex_lock(&dev->mode_config.mutex);
10973         intel_modeset_setup_hw_state(dev, false);
10974         mutex_unlock(&dev->mode_config.mutex);
10975 }
10976
10977 static void
10978 intel_connector_break_all_links(struct intel_connector *connector)
10979 {
10980         connector->base.dpms = DRM_MODE_DPMS_OFF;
10981         connector->base.encoder = NULL;
10982         connector->encoder->connectors_active = false;
10983         connector->encoder->base.crtc = NULL;
10984 }
10985
10986 static void intel_enable_pipe_a(struct drm_device *dev)
10987 {
10988         struct intel_connector *connector;
10989         struct drm_connector *crt = NULL;
10990         struct intel_load_detect_pipe load_detect_temp;
10991
10992         /* We can't just switch on the pipe A, we need to set things up with a
10993          * proper mode and output configuration. As a gross hack, enable pipe A
10994          * by enabling the load detect pipe once. */
10995         list_for_each_entry(connector,
10996                             &dev->mode_config.connector_list,
10997                             base.head) {
10998                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10999                         crt = &connector->base;
11000                         break;
11001                 }
11002         }
11003
11004         if (!crt)
11005                 return;
11006
11007         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
11008                 intel_release_load_detect_pipe(crt, &load_detect_temp);
11009
11010
11011 }
11012
11013 static bool
11014 intel_check_plane_mapping(struct intel_crtc *crtc)
11015 {
11016         struct drm_device *dev = crtc->base.dev;
11017         struct drm_i915_private *dev_priv = dev->dev_private;
11018         u32 reg, val;
11019
11020         if (INTEL_INFO(dev)->num_pipes == 1)
11021                 return true;
11022
11023         reg = DSPCNTR(!crtc->plane);
11024         val = I915_READ(reg);
11025
11026         if ((val & DISPLAY_PLANE_ENABLE) &&
11027             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
11028                 return false;
11029
11030         return true;
11031 }
11032
11033 static void intel_sanitize_crtc(struct intel_crtc *crtc)
11034 {
11035         struct drm_device *dev = crtc->base.dev;
11036         struct drm_i915_private *dev_priv = dev->dev_private;
11037         u32 reg;
11038
11039         /* Clear any frame start delays used for debugging left by the BIOS */
11040         reg = PIPECONF(crtc->config.cpu_transcoder);
11041         I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
11042
11043         /* We need to sanitize the plane -> pipe mapping first because this will
11044          * disable the crtc (and hence change the state) if it is wrong. Note
11045          * that gen4+ has a fixed plane -> pipe mapping.  */
11046         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
11047                 struct intel_connector *connector;
11048                 bool plane;
11049
11050                 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
11051                               crtc->base.base.id);
11052
11053                 /* Pipe has the wrong plane attached and the plane is active.
11054                  * Temporarily change the plane mapping and disable everything
11055                  * ...  */
11056                 plane = crtc->plane;
11057                 crtc->plane = !plane;
11058                 dev_priv->display.crtc_disable(&crtc->base);
11059                 crtc->plane = plane;
11060
11061                 /* ... and break all links. */
11062                 list_for_each_entry(connector, &dev->mode_config.connector_list,
11063                                     base.head) {
11064                         if (connector->encoder->base.crtc != &crtc->base)
11065                                 continue;
11066
11067                         intel_connector_break_all_links(connector);
11068                 }
11069
11070                 WARN_ON(crtc->active);
11071                 crtc->base.enabled = false;
11072         }
11073
11074         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
11075             crtc->pipe == PIPE_A && !crtc->active) {
11076                 /* BIOS forgot to enable pipe A, this mostly happens after
11077                  * resume. Force-enable the pipe to fix this, the update_dpms
11078                  * call below we restore the pipe to the right state, but leave
11079                  * the required bits on. */
11080                 intel_enable_pipe_a(dev);
11081         }
11082
11083         /* Adjust the state of the output pipe according to whether we
11084          * have active connectors/encoders. */
11085         intel_crtc_update_dpms(&crtc->base);
11086
11087         if (crtc->active != crtc->base.enabled) {
11088                 struct intel_encoder *encoder;
11089
11090                 /* This can happen either due to bugs in the get_hw_state
11091                  * functions or because the pipe is force-enabled due to the
11092                  * pipe A quirk. */
11093                 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
11094                               crtc->base.base.id,
11095                               crtc->base.enabled ? "enabled" : "disabled",
11096                               crtc->active ? "enabled" : "disabled");
11097
11098                 crtc->base.enabled = crtc->active;
11099
11100                 /* Because we only establish the connector -> encoder ->
11101                  * crtc links if something is active, this means the
11102                  * crtc is now deactivated. Break the links. connector
11103                  * -> encoder links are only establish when things are
11104                  *  actually up, hence no need to break them. */
11105                 WARN_ON(crtc->active);
11106
11107                 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
11108                         WARN_ON(encoder->connectors_active);
11109                         encoder->base.crtc = NULL;
11110                 }
11111         }
11112 }
11113
11114 static void intel_sanitize_encoder(struct intel_encoder *encoder)
11115 {
11116         struct intel_connector *connector;
11117         struct drm_device *dev = encoder->base.dev;
11118
11119         /* We need to check both for a crtc link (meaning that the
11120          * encoder is active and trying to read from a pipe) and the
11121          * pipe itself being active. */
11122         bool has_active_crtc = encoder->base.crtc &&
11123                 to_intel_crtc(encoder->base.crtc)->active;
11124
11125         if (encoder->connectors_active && !has_active_crtc) {
11126                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
11127                               encoder->base.base.id,
11128                               drm_get_encoder_name(&encoder->base));
11129
11130                 /* Connector is active, but has no active pipe. This is
11131                  * fallout from our resume register restoring. Disable
11132                  * the encoder manually again. */
11133                 if (encoder->base.crtc) {
11134                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
11135                                       encoder->base.base.id,
11136                                       drm_get_encoder_name(&encoder->base));
11137                         encoder->disable(encoder);
11138                 }
11139
11140                 /* Inconsistent output/port/pipe state happens presumably due to
11141                  * a bug in one of the get_hw_state functions. Or someplace else
11142                  * in our code, like the register restore mess on resume. Clamp
11143                  * things to off as a safer default. */
11144                 list_for_each_entry(connector,
11145                                     &dev->mode_config.connector_list,
11146                                     base.head) {
11147                         if (connector->encoder != encoder)
11148                                 continue;
11149
11150                         intel_connector_break_all_links(connector);
11151                 }
11152         }
11153         /* Enabled encoders without active connectors will be fixed in
11154          * the crtc fixup. */
11155 }
11156
11157 void i915_redisable_vga(struct drm_device *dev)
11158 {
11159         struct drm_i915_private *dev_priv = dev->dev_private;
11160         u32 vga_reg = i915_vgacntrl_reg(dev);
11161
11162         /* This function can be called both from intel_modeset_setup_hw_state or
11163          * at a very early point in our resume sequence, where the power well
11164          * structures are not yet restored. Since this function is at a very
11165          * paranoid "someone might have enabled VGA while we were not looking"
11166          * level, just check if the power well is enabled instead of trying to
11167          * follow the "don't touch the power well if we don't need it" policy
11168          * the rest of the driver uses. */
11169         if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
11170             (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
11171                 return;
11172
11173         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
11174                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11175                 i915_disable_vga(dev);
11176         }
11177 }
11178
11179 static void intel_modeset_readout_hw_state(struct drm_device *dev)
11180 {
11181         struct drm_i915_private *dev_priv = dev->dev_private;
11182         enum pipe pipe;
11183         struct intel_crtc *crtc;
11184         struct intel_encoder *encoder;
11185         struct intel_connector *connector;
11186         int i;
11187
11188         list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11189                             base.head) {
11190                 memset(&crtc->config, 0, sizeof(crtc->config));
11191
11192                 crtc->active = dev_priv->display.get_pipe_config(crtc,
11193                                                                  &crtc->config);
11194
11195                 crtc->base.enabled = crtc->active;
11196                 crtc->primary_enabled = crtc->active;
11197
11198                 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
11199                               crtc->base.base.id,
11200                               crtc->active ? "enabled" : "disabled");
11201         }
11202
11203         /* FIXME: Smash this into the new shared dpll infrastructure. */
11204         if (HAS_DDI(dev))
11205                 intel_ddi_setup_hw_pll_state(dev);
11206
11207         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11208                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11209
11210                 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
11211                 pll->active = 0;
11212                 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11213                                     base.head) {
11214                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11215                                 pll->active++;
11216                 }
11217                 pll->refcount = pll->active;
11218
11219                 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
11220                               pll->name, pll->refcount, pll->on);
11221         }
11222
11223         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11224                             base.head) {
11225                 pipe = 0;
11226
11227                 if (encoder->get_hw_state(encoder, &pipe)) {
11228                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11229                         encoder->base.crtc = &crtc->base;
11230                         encoder->get_config(encoder, &crtc->config);
11231                 } else {
11232                         encoder->base.crtc = NULL;
11233                 }
11234
11235                 encoder->connectors_active = false;
11236                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
11237                               encoder->base.base.id,
11238                               drm_get_encoder_name(&encoder->base),
11239                               encoder->base.crtc ? "enabled" : "disabled",
11240                               pipe_name(pipe));
11241         }
11242
11243         list_for_each_entry(connector, &dev->mode_config.connector_list,
11244                             base.head) {
11245                 if (connector->get_hw_state(connector)) {
11246                         connector->base.dpms = DRM_MODE_DPMS_ON;
11247                         connector->encoder->connectors_active = true;
11248                         connector->base.encoder = &connector->encoder->base;
11249                 } else {
11250                         connector->base.dpms = DRM_MODE_DPMS_OFF;
11251                         connector->base.encoder = NULL;
11252                 }
11253                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
11254                               connector->base.base.id,
11255                               drm_get_connector_name(&connector->base),
11256                               connector->base.encoder ? "enabled" : "disabled");
11257         }
11258 }
11259
11260 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
11261  * and i915 state tracking structures. */
11262 void intel_modeset_setup_hw_state(struct drm_device *dev,
11263                                   bool force_restore)
11264 {
11265         struct drm_i915_private *dev_priv = dev->dev_private;
11266         enum pipe pipe;
11267         struct intel_crtc *crtc;
11268         struct intel_encoder *encoder;
11269         int i;
11270
11271         intel_modeset_readout_hw_state(dev);
11272
11273         /*
11274          * Now that we have the config, copy it to each CRTC struct
11275          * Note that this could go away if we move to using crtc_config
11276          * checking everywhere.
11277          */
11278         list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11279                             base.head) {
11280                 if (crtc->active && i915_fastboot) {
11281                         intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);                   DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
11282                                       crtc->base.base.id);
11283                         drm_mode_debug_printmodeline(&crtc->base.mode);
11284                 }
11285         }
11286
11287         /* HW state is read out, now we need to sanitize this mess. */
11288         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11289                             base.head) {
11290                 intel_sanitize_encoder(encoder);
11291         }
11292
11293         for_each_pipe(pipe) {
11294                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11295                 intel_sanitize_crtc(crtc);
11296                 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
11297         }
11298
11299         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11300                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11301
11302                 if (!pll->on || pll->active)
11303                         continue;
11304
11305                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
11306
11307                 pll->disable(dev_priv, pll);
11308                 pll->on = false;
11309         }
11310
11311         if (HAS_PCH_SPLIT(dev))
11312                 ilk_wm_get_hw_state(dev);
11313
11314         if (force_restore) {
11315                 i915_redisable_vga(dev);
11316
11317                 /*
11318                  * We need to use raw interfaces for restoring state to avoid
11319                  * checking (bogus) intermediate states.
11320                  */
11321                 for_each_pipe(pipe) {
11322                         struct drm_crtc *crtc =
11323                                 dev_priv->pipe_to_crtc_mapping[pipe];
11324
11325                         __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
11326                                          crtc->fb);
11327                 }
11328         } else {
11329                 intel_modeset_update_staged_output_state(dev);
11330         }
11331
11332         intel_modeset_check_state(dev);
11333 }
11334
11335 void intel_modeset_gem_init(struct drm_device *dev)
11336 {
11337         intel_modeset_init_hw(dev);
11338
11339         intel_setup_overlay(dev);
11340 }
11341
11342 void intel_modeset_cleanup(struct drm_device *dev)
11343 {
11344         struct drm_i915_private *dev_priv = dev->dev_private;
11345         struct drm_crtc *crtc;
11346         struct drm_connector *connector;
11347
11348         /*
11349          * Interrupts and polling as the first thing to avoid creating havoc.
11350          * Too much stuff here (turning of rps, connectors, ...) would
11351          * experience fancy races otherwise.
11352          */
11353         drm_irq_uninstall(dev);
11354         cancel_work_sync(&dev_priv->hotplug_work);
11355         /*
11356          * Due to the hpd irq storm handling the hotplug work can re-arm the
11357          * poll handlers. Hence disable polling after hpd handling is shut down.
11358          */
11359         drm_kms_helper_poll_fini(dev);
11360
11361         mutex_lock(&dev->struct_mutex);
11362
11363         intel_unregister_dsm_handler();
11364
11365         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11366                 /* Skip inactive CRTCs */
11367                 if (!crtc->fb)
11368                         continue;
11369
11370                 intel_increase_pllclock(crtc);
11371         }
11372
11373         intel_disable_fbc(dev);
11374
11375         intel_disable_gt_powersave(dev);
11376
11377         ironlake_teardown_rc6(dev);
11378
11379         mutex_unlock(&dev->struct_mutex);
11380
11381         /* flush any delayed tasks or pending work */
11382         flush_scheduled_work();
11383
11384         /* destroy the backlight and sysfs files before encoders/connectors */
11385         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11386                 intel_panel_destroy_backlight(connector);
11387                 drm_sysfs_connector_remove(connector);
11388         }
11389
11390         drm_mode_config_cleanup(dev);
11391
11392         intel_cleanup_overlay(dev);
11393 }
11394
11395 /*
11396  * Return which encoder is currently attached for connector.
11397  */
11398 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
11399 {
11400         return &intel_attached_encoder(connector)->base;
11401 }
11402
11403 void intel_connector_attach_encoder(struct intel_connector *connector,
11404                                     struct intel_encoder *encoder)
11405 {
11406         connector->encoder = encoder;
11407         drm_mode_connector_attach_encoder(&connector->base,
11408                                           &encoder->base);
11409 }
11410
11411 /*
11412  * set vga decode state - true == enable VGA decode
11413  */
11414 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11415 {
11416         struct drm_i915_private *dev_priv = dev->dev_private;
11417         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11418         u16 gmch_ctrl;
11419
11420         pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11421         if (state)
11422                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11423         else
11424                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11425         pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11426         return 0;
11427 }
11428
11429 struct intel_display_error_state {
11430
11431         u32 power_well_driver;
11432
11433         int num_transcoders;
11434
11435         struct intel_cursor_error_state {
11436                 u32 control;
11437                 u32 position;
11438                 u32 base;
11439                 u32 size;
11440         } cursor[I915_MAX_PIPES];
11441
11442         struct intel_pipe_error_state {
11443                 bool power_domain_on;
11444                 u32 source;
11445         } pipe[I915_MAX_PIPES];
11446
11447         struct intel_plane_error_state {
11448                 u32 control;
11449                 u32 stride;
11450                 u32 size;
11451                 u32 pos;
11452                 u32 addr;
11453                 u32 surface;
11454                 u32 tile_offset;
11455         } plane[I915_MAX_PIPES];
11456
11457         struct intel_transcoder_error_state {
11458                 bool power_domain_on;
11459                 enum transcoder cpu_transcoder;
11460
11461                 u32 conf;
11462
11463                 u32 htotal;
11464                 u32 hblank;
11465                 u32 hsync;
11466                 u32 vtotal;
11467                 u32 vblank;
11468                 u32 vsync;
11469         } transcoder[4];
11470 };
11471
11472 struct intel_display_error_state *
11473 intel_display_capture_error_state(struct drm_device *dev)
11474 {
11475         drm_i915_private_t *dev_priv = dev->dev_private;
11476         struct intel_display_error_state *error;
11477         int transcoders[] = {
11478                 TRANSCODER_A,
11479                 TRANSCODER_B,
11480                 TRANSCODER_C,
11481                 TRANSCODER_EDP,
11482         };
11483         int i;
11484
11485         if (INTEL_INFO(dev)->num_pipes == 0)
11486                 return NULL;
11487
11488         error = kzalloc(sizeof(*error), GFP_ATOMIC);
11489         if (error == NULL)
11490                 return NULL;
11491
11492         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
11493                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
11494
11495         for_each_pipe(i) {
11496                 error->pipe[i].power_domain_on =
11497                         intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
11498                 if (!error->pipe[i].power_domain_on)
11499                         continue;
11500
11501                 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
11502                         error->cursor[i].control = I915_READ(CURCNTR(i));
11503                         error->cursor[i].position = I915_READ(CURPOS(i));
11504                         error->cursor[i].base = I915_READ(CURBASE(i));
11505                 } else {
11506                         error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
11507                         error->cursor[i].position = I915_READ(CURPOS_IVB(i));
11508                         error->cursor[i].base = I915_READ(CURBASE_IVB(i));
11509                 }
11510
11511                 error->plane[i].control = I915_READ(DSPCNTR(i));
11512                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
11513                 if (INTEL_INFO(dev)->gen <= 3) {
11514                         error->plane[i].size = I915_READ(DSPSIZE(i));
11515                         error->plane[i].pos = I915_READ(DSPPOS(i));
11516                 }
11517                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11518                         error->plane[i].addr = I915_READ(DSPADDR(i));
11519                 if (INTEL_INFO(dev)->gen >= 4) {
11520                         error->plane[i].surface = I915_READ(DSPSURF(i));
11521                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
11522                 }
11523
11524                 error->pipe[i].source = I915_READ(PIPESRC(i));
11525         }
11526
11527         error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11528         if (HAS_DDI(dev_priv->dev))
11529                 error->num_transcoders++; /* Account for eDP. */
11530
11531         for (i = 0; i < error->num_transcoders; i++) {
11532                 enum transcoder cpu_transcoder = transcoders[i];
11533
11534                 error->transcoder[i].power_domain_on =
11535                         intel_display_power_enabled_sw(dev,
11536                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11537                 if (!error->transcoder[i].power_domain_on)
11538                         continue;
11539
11540                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
11541
11542                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
11543                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
11544                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
11545                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
11546                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
11547                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
11548                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
11549         }
11550
11551         return error;
11552 }
11553
11554 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
11555
11556 void
11557 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
11558                                 struct drm_device *dev,
11559                                 struct intel_display_error_state *error)
11560 {
11561         int i;
11562
11563         if (!error)
11564                 return;
11565
11566         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
11567         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
11568                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
11569                            error->power_well_driver);
11570         for_each_pipe(i) {
11571                 err_printf(m, "Pipe [%d]:\n", i);
11572                 err_printf(m, "  Power: %s\n",
11573                            error->pipe[i].power_domain_on ? "on" : "off");
11574                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
11575
11576                 err_printf(m, "Plane [%d]:\n", i);
11577                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
11578                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
11579                 if (INTEL_INFO(dev)->gen <= 3) {
11580                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
11581                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
11582                 }
11583                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11584                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
11585                 if (INTEL_INFO(dev)->gen >= 4) {
11586                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
11587                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
11588                 }
11589
11590                 err_printf(m, "Cursor [%d]:\n", i);
11591                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
11592                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
11593                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
11594         }
11595
11596         for (i = 0; i < error->num_transcoders; i++) {
11597                 err_printf(m, "CPU transcoder: %c\n",
11598                            transcoder_name(error->transcoder[i].cpu_transcoder));
11599                 err_printf(m, "  Power: %s\n",
11600                            error->transcoder[i].power_domain_on ? "on" : "off");
11601                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
11602                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
11603                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
11604                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
11605                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
11606                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
11607                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
11608         }
11609 }