drm/i915: Improve watermark dirtyness checks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <drm/i915_powerwell.h>
34
35 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
36  * framebuffer contents in-memory, aiming at reducing the required bandwidth
37  * during in-memory transfers and, therefore, reduce the power packet.
38  *
39  * The benefits of FBC are mostly visible with solid backgrounds and
40  * variation-less patterns.
41  *
42  * FBC-related functionality can be enabled by the means of the
43  * i915.i915_enable_fbc parameter
44  */
45
46 static void i8xx_disable_fbc(struct drm_device *dev)
47 {
48         struct drm_i915_private *dev_priv = dev->dev_private;
49         u32 fbc_ctl;
50
51         /* Disable compression */
52         fbc_ctl = I915_READ(FBC_CONTROL);
53         if ((fbc_ctl & FBC_CTL_EN) == 0)
54                 return;
55
56         fbc_ctl &= ~FBC_CTL_EN;
57         I915_WRITE(FBC_CONTROL, fbc_ctl);
58
59         /* Wait for compressing bit to clear */
60         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
61                 DRM_DEBUG_KMS("FBC idle timed out\n");
62                 return;
63         }
64
65         DRM_DEBUG_KMS("disabled FBC\n");
66 }
67
68 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
69 {
70         struct drm_device *dev = crtc->dev;
71         struct drm_i915_private *dev_priv = dev->dev_private;
72         struct drm_framebuffer *fb = crtc->fb;
73         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
74         struct drm_i915_gem_object *obj = intel_fb->obj;
75         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
76         int cfb_pitch;
77         int plane, i;
78         u32 fbc_ctl, fbc_ctl2;
79
80         cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
81         if (fb->pitches[0] < cfb_pitch)
82                 cfb_pitch = fb->pitches[0];
83
84         /* FBC_CTL wants 64B units */
85         cfb_pitch = (cfb_pitch / 64) - 1;
86         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
87
88         /* Clear old tags */
89         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
90                 I915_WRITE(FBC_TAG + (i * 4), 0);
91
92         /* Set it up... */
93         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
94         fbc_ctl2 |= plane;
95         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
96         I915_WRITE(FBC_FENCE_OFF, crtc->y);
97
98         /* enable it... */
99         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
100         if (IS_I945GM(dev))
101                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
102         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
103         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
104         fbc_ctl |= obj->fence_reg;
105         I915_WRITE(FBC_CONTROL, fbc_ctl);
106
107         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
108                       cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
109 }
110
111 static bool i8xx_fbc_enabled(struct drm_device *dev)
112 {
113         struct drm_i915_private *dev_priv = dev->dev_private;
114
115         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
116 }
117
118 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
119 {
120         struct drm_device *dev = crtc->dev;
121         struct drm_i915_private *dev_priv = dev->dev_private;
122         struct drm_framebuffer *fb = crtc->fb;
123         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
124         struct drm_i915_gem_object *obj = intel_fb->obj;
125         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
126         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
127         unsigned long stall_watermark = 200;
128         u32 dpfc_ctl;
129
130         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
131         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
132         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
133
134         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
135                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
136                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
137         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
138
139         /* enable it... */
140         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
141
142         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
143 }
144
145 static void g4x_disable_fbc(struct drm_device *dev)
146 {
147         struct drm_i915_private *dev_priv = dev->dev_private;
148         u32 dpfc_ctl;
149
150         /* Disable compression */
151         dpfc_ctl = I915_READ(DPFC_CONTROL);
152         if (dpfc_ctl & DPFC_CTL_EN) {
153                 dpfc_ctl &= ~DPFC_CTL_EN;
154                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
155
156                 DRM_DEBUG_KMS("disabled FBC\n");
157         }
158 }
159
160 static bool g4x_fbc_enabled(struct drm_device *dev)
161 {
162         struct drm_i915_private *dev_priv = dev->dev_private;
163
164         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
165 }
166
167 static void sandybridge_blit_fbc_update(struct drm_device *dev)
168 {
169         struct drm_i915_private *dev_priv = dev->dev_private;
170         u32 blt_ecoskpd;
171
172         /* Make sure blitter notifies FBC of writes */
173         gen6_gt_force_wake_get(dev_priv);
174         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
175         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
176                 GEN6_BLITTER_LOCK_SHIFT;
177         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
178         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
179         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
180         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
181                          GEN6_BLITTER_LOCK_SHIFT);
182         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
183         POSTING_READ(GEN6_BLITTER_ECOSKPD);
184         gen6_gt_force_wake_put(dev_priv);
185 }
186
187 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
188 {
189         struct drm_device *dev = crtc->dev;
190         struct drm_i915_private *dev_priv = dev->dev_private;
191         struct drm_framebuffer *fb = crtc->fb;
192         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
193         struct drm_i915_gem_object *obj = intel_fb->obj;
194         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
195         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
196         unsigned long stall_watermark = 200;
197         u32 dpfc_ctl;
198
199         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
200         dpfc_ctl &= DPFC_RESERVED;
201         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
202         /* Set persistent mode for front-buffer rendering, ala X. */
203         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
204         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
205         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
206
207         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
208                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
209                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
210         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
211         I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
212         /* enable it... */
213         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
214
215         if (IS_GEN6(dev)) {
216                 I915_WRITE(SNB_DPFC_CTL_SA,
217                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
218                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
219                 sandybridge_blit_fbc_update(dev);
220         }
221
222         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
223 }
224
225 static void ironlake_disable_fbc(struct drm_device *dev)
226 {
227         struct drm_i915_private *dev_priv = dev->dev_private;
228         u32 dpfc_ctl;
229
230         /* Disable compression */
231         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
232         if (dpfc_ctl & DPFC_CTL_EN) {
233                 dpfc_ctl &= ~DPFC_CTL_EN;
234                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
235
236                 if (IS_IVYBRIDGE(dev))
237                         /* WaFbcDisableDpfcClockGating:ivb */
238                         I915_WRITE(ILK_DSPCLK_GATE_D,
239                                    I915_READ(ILK_DSPCLK_GATE_D) &
240                                    ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
241
242                 if (IS_HASWELL(dev))
243                         /* WaFbcDisableDpfcClockGating:hsw */
244                         I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
245                                    I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
246                                    ~HSW_DPFC_GATING_DISABLE);
247
248                 DRM_DEBUG_KMS("disabled FBC\n");
249         }
250 }
251
252 static bool ironlake_fbc_enabled(struct drm_device *dev)
253 {
254         struct drm_i915_private *dev_priv = dev->dev_private;
255
256         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
257 }
258
259 static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
260 {
261         struct drm_device *dev = crtc->dev;
262         struct drm_i915_private *dev_priv = dev->dev_private;
263         struct drm_framebuffer *fb = crtc->fb;
264         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
265         struct drm_i915_gem_object *obj = intel_fb->obj;
266         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
267
268         I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
269
270         I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
271                    IVB_DPFC_CTL_FENCE_EN |
272                    intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
273
274         if (IS_IVYBRIDGE(dev)) {
275                 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
276                 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
277                 /* WaFbcDisableDpfcClockGating:ivb */
278                 I915_WRITE(ILK_DSPCLK_GATE_D,
279                            I915_READ(ILK_DSPCLK_GATE_D) |
280                            ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
281         } else {
282                 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
283                 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
284                            HSW_BYPASS_FBC_QUEUE);
285                 /* WaFbcDisableDpfcClockGating:hsw */
286                 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
287                            I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
288                            HSW_DPFC_GATING_DISABLE);
289         }
290
291         I915_WRITE(SNB_DPFC_CTL_SA,
292                    SNB_CPU_FENCE_ENABLE | obj->fence_reg);
293         I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
294
295         sandybridge_blit_fbc_update(dev);
296
297         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
298 }
299
300 bool intel_fbc_enabled(struct drm_device *dev)
301 {
302         struct drm_i915_private *dev_priv = dev->dev_private;
303
304         if (!dev_priv->display.fbc_enabled)
305                 return false;
306
307         return dev_priv->display.fbc_enabled(dev);
308 }
309
310 static void intel_fbc_work_fn(struct work_struct *__work)
311 {
312         struct intel_fbc_work *work =
313                 container_of(to_delayed_work(__work),
314                              struct intel_fbc_work, work);
315         struct drm_device *dev = work->crtc->dev;
316         struct drm_i915_private *dev_priv = dev->dev_private;
317
318         mutex_lock(&dev->struct_mutex);
319         if (work == dev_priv->fbc.fbc_work) {
320                 /* Double check that we haven't switched fb without cancelling
321                  * the prior work.
322                  */
323                 if (work->crtc->fb == work->fb) {
324                         dev_priv->display.enable_fbc(work->crtc,
325                                                      work->interval);
326
327                         dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
328                         dev_priv->fbc.fb_id = work->crtc->fb->base.id;
329                         dev_priv->fbc.y = work->crtc->y;
330                 }
331
332                 dev_priv->fbc.fbc_work = NULL;
333         }
334         mutex_unlock(&dev->struct_mutex);
335
336         kfree(work);
337 }
338
339 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
340 {
341         if (dev_priv->fbc.fbc_work == NULL)
342                 return;
343
344         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
345
346         /* Synchronisation is provided by struct_mutex and checking of
347          * dev_priv->fbc.fbc_work, so we can perform the cancellation
348          * entirely asynchronously.
349          */
350         if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
351                 /* tasklet was killed before being run, clean up */
352                 kfree(dev_priv->fbc.fbc_work);
353
354         /* Mark the work as no longer wanted so that if it does
355          * wake-up (because the work was already running and waiting
356          * for our mutex), it will discover that is no longer
357          * necessary to run.
358          */
359         dev_priv->fbc.fbc_work = NULL;
360 }
361
362 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
363 {
364         struct intel_fbc_work *work;
365         struct drm_device *dev = crtc->dev;
366         struct drm_i915_private *dev_priv = dev->dev_private;
367
368         if (!dev_priv->display.enable_fbc)
369                 return;
370
371         intel_cancel_fbc_work(dev_priv);
372
373         work = kzalloc(sizeof(*work), GFP_KERNEL);
374         if (work == NULL) {
375                 DRM_ERROR("Failed to allocate FBC work structure\n");
376                 dev_priv->display.enable_fbc(crtc, interval);
377                 return;
378         }
379
380         work->crtc = crtc;
381         work->fb = crtc->fb;
382         work->interval = interval;
383         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
384
385         dev_priv->fbc.fbc_work = work;
386
387         /* Delay the actual enabling to let pageflipping cease and the
388          * display to settle before starting the compression. Note that
389          * this delay also serves a second purpose: it allows for a
390          * vblank to pass after disabling the FBC before we attempt
391          * to modify the control registers.
392          *
393          * A more complicated solution would involve tracking vblanks
394          * following the termination of the page-flipping sequence
395          * and indeed performing the enable as a co-routine and not
396          * waiting synchronously upon the vblank.
397          *
398          * WaFbcWaitForVBlankBeforeEnable:ilk,snb
399          */
400         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
401 }
402
403 void intel_disable_fbc(struct drm_device *dev)
404 {
405         struct drm_i915_private *dev_priv = dev->dev_private;
406
407         intel_cancel_fbc_work(dev_priv);
408
409         if (!dev_priv->display.disable_fbc)
410                 return;
411
412         dev_priv->display.disable_fbc(dev);
413         dev_priv->fbc.plane = -1;
414 }
415
416 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
417                               enum no_fbc_reason reason)
418 {
419         if (dev_priv->fbc.no_fbc_reason == reason)
420                 return false;
421
422         dev_priv->fbc.no_fbc_reason = reason;
423         return true;
424 }
425
426 /**
427  * intel_update_fbc - enable/disable FBC as needed
428  * @dev: the drm_device
429  *
430  * Set up the framebuffer compression hardware at mode set time.  We
431  * enable it if possible:
432  *   - plane A only (on pre-965)
433  *   - no pixel mulitply/line duplication
434  *   - no alpha buffer discard
435  *   - no dual wide
436  *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
437  *
438  * We can't assume that any compression will take place (worst case),
439  * so the compressed buffer has to be the same size as the uncompressed
440  * one.  It also must reside (along with the line length buffer) in
441  * stolen memory.
442  *
443  * We need to enable/disable FBC on a global basis.
444  */
445 void intel_update_fbc(struct drm_device *dev)
446 {
447         struct drm_i915_private *dev_priv = dev->dev_private;
448         struct drm_crtc *crtc = NULL, *tmp_crtc;
449         struct intel_crtc *intel_crtc;
450         struct drm_framebuffer *fb;
451         struct intel_framebuffer *intel_fb;
452         struct drm_i915_gem_object *obj;
453         const struct drm_display_mode *adjusted_mode;
454         unsigned int max_width, max_height;
455
456         if (!I915_HAS_FBC(dev)) {
457                 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
458                 return;
459         }
460
461         if (!i915_powersave) {
462                 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
463                         DRM_DEBUG_KMS("fbc disabled per module param\n");
464                 return;
465         }
466
467         /*
468          * If FBC is already on, we just have to verify that we can
469          * keep it that way...
470          * Need to disable if:
471          *   - more than one pipe is active
472          *   - changing FBC params (stride, fence, mode)
473          *   - new fb is too large to fit in compressed buffer
474          *   - going to an unsupported config (interlace, pixel multiply, etc.)
475          */
476         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
477                 if (intel_crtc_active(tmp_crtc) &&
478                     to_intel_crtc(tmp_crtc)->primary_enabled) {
479                         if (crtc) {
480                                 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
481                                         DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
482                                 goto out_disable;
483                         }
484                         crtc = tmp_crtc;
485                 }
486         }
487
488         if (!crtc || crtc->fb == NULL) {
489                 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
490                         DRM_DEBUG_KMS("no output, disabling\n");
491                 goto out_disable;
492         }
493
494         intel_crtc = to_intel_crtc(crtc);
495         fb = crtc->fb;
496         intel_fb = to_intel_framebuffer(fb);
497         obj = intel_fb->obj;
498         adjusted_mode = &intel_crtc->config.adjusted_mode;
499
500         if (i915_enable_fbc < 0 &&
501             INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
502                 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
503                         DRM_DEBUG_KMS("disabled per chip default\n");
504                 goto out_disable;
505         }
506         if (!i915_enable_fbc) {
507                 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
508                         DRM_DEBUG_KMS("fbc disabled per module param\n");
509                 goto out_disable;
510         }
511         if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
512             (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
513                 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
514                         DRM_DEBUG_KMS("mode incompatible with compression, "
515                                       "disabling\n");
516                 goto out_disable;
517         }
518
519         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
520                 max_width = 4096;
521                 max_height = 2048;
522         } else {
523                 max_width = 2048;
524                 max_height = 1536;
525         }
526         if (intel_crtc->config.pipe_src_w > max_width ||
527             intel_crtc->config.pipe_src_h > max_height) {
528                 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
529                         DRM_DEBUG_KMS("mode too large for compression, disabling\n");
530                 goto out_disable;
531         }
532         if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
533             intel_crtc->plane != 0) {
534                 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
535                         DRM_DEBUG_KMS("plane not 0, disabling compression\n");
536                 goto out_disable;
537         }
538
539         /* The use of a CPU fence is mandatory in order to detect writes
540          * by the CPU to the scanout and trigger updates to the FBC.
541          */
542         if (obj->tiling_mode != I915_TILING_X ||
543             obj->fence_reg == I915_FENCE_REG_NONE) {
544                 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
545                         DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
546                 goto out_disable;
547         }
548
549         /* If the kernel debugger is active, always disable compression */
550         if (in_dbg_master())
551                 goto out_disable;
552
553         if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
554                 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
555                         DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
556                 goto out_disable;
557         }
558
559         /* If the scanout has not changed, don't modify the FBC settings.
560          * Note that we make the fundamental assumption that the fb->obj
561          * cannot be unpinned (and have its GTT offset and fence revoked)
562          * without first being decoupled from the scanout and FBC disabled.
563          */
564         if (dev_priv->fbc.plane == intel_crtc->plane &&
565             dev_priv->fbc.fb_id == fb->base.id &&
566             dev_priv->fbc.y == crtc->y)
567                 return;
568
569         if (intel_fbc_enabled(dev)) {
570                 /* We update FBC along two paths, after changing fb/crtc
571                  * configuration (modeswitching) and after page-flipping
572                  * finishes. For the latter, we know that not only did
573                  * we disable the FBC at the start of the page-flip
574                  * sequence, but also more than one vblank has passed.
575                  *
576                  * For the former case of modeswitching, it is possible
577                  * to switch between two FBC valid configurations
578                  * instantaneously so we do need to disable the FBC
579                  * before we can modify its control registers. We also
580                  * have to wait for the next vblank for that to take
581                  * effect. However, since we delay enabling FBC we can
582                  * assume that a vblank has passed since disabling and
583                  * that we can safely alter the registers in the deferred
584                  * callback.
585                  *
586                  * In the scenario that we go from a valid to invalid
587                  * and then back to valid FBC configuration we have
588                  * no strict enforcement that a vblank occurred since
589                  * disabling the FBC. However, along all current pipe
590                  * disabling paths we do need to wait for a vblank at
591                  * some point. And we wait before enabling FBC anyway.
592                  */
593                 DRM_DEBUG_KMS("disabling active FBC for update\n");
594                 intel_disable_fbc(dev);
595         }
596
597         intel_enable_fbc(crtc, 500);
598         dev_priv->fbc.no_fbc_reason = FBC_OK;
599         return;
600
601 out_disable:
602         /* Multiple disables should be harmless */
603         if (intel_fbc_enabled(dev)) {
604                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
605                 intel_disable_fbc(dev);
606         }
607         i915_gem_stolen_cleanup_compression(dev);
608 }
609
610 static void i915_pineview_get_mem_freq(struct drm_device *dev)
611 {
612         drm_i915_private_t *dev_priv = dev->dev_private;
613         u32 tmp;
614
615         tmp = I915_READ(CLKCFG);
616
617         switch (tmp & CLKCFG_FSB_MASK) {
618         case CLKCFG_FSB_533:
619                 dev_priv->fsb_freq = 533; /* 133*4 */
620                 break;
621         case CLKCFG_FSB_800:
622                 dev_priv->fsb_freq = 800; /* 200*4 */
623                 break;
624         case CLKCFG_FSB_667:
625                 dev_priv->fsb_freq =  667; /* 167*4 */
626                 break;
627         case CLKCFG_FSB_400:
628                 dev_priv->fsb_freq = 400; /* 100*4 */
629                 break;
630         }
631
632         switch (tmp & CLKCFG_MEM_MASK) {
633         case CLKCFG_MEM_533:
634                 dev_priv->mem_freq = 533;
635                 break;
636         case CLKCFG_MEM_667:
637                 dev_priv->mem_freq = 667;
638                 break;
639         case CLKCFG_MEM_800:
640                 dev_priv->mem_freq = 800;
641                 break;
642         }
643
644         /* detect pineview DDR3 setting */
645         tmp = I915_READ(CSHRDDR3CTL);
646         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
647 }
648
649 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
650 {
651         drm_i915_private_t *dev_priv = dev->dev_private;
652         u16 ddrpll, csipll;
653
654         ddrpll = I915_READ16(DDRMPLL1);
655         csipll = I915_READ16(CSIPLL0);
656
657         switch (ddrpll & 0xff) {
658         case 0xc:
659                 dev_priv->mem_freq = 800;
660                 break;
661         case 0x10:
662                 dev_priv->mem_freq = 1066;
663                 break;
664         case 0x14:
665                 dev_priv->mem_freq = 1333;
666                 break;
667         case 0x18:
668                 dev_priv->mem_freq = 1600;
669                 break;
670         default:
671                 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
672                                  ddrpll & 0xff);
673                 dev_priv->mem_freq = 0;
674                 break;
675         }
676
677         dev_priv->ips.r_t = dev_priv->mem_freq;
678
679         switch (csipll & 0x3ff) {
680         case 0x00c:
681                 dev_priv->fsb_freq = 3200;
682                 break;
683         case 0x00e:
684                 dev_priv->fsb_freq = 3733;
685                 break;
686         case 0x010:
687                 dev_priv->fsb_freq = 4266;
688                 break;
689         case 0x012:
690                 dev_priv->fsb_freq = 4800;
691                 break;
692         case 0x014:
693                 dev_priv->fsb_freq = 5333;
694                 break;
695         case 0x016:
696                 dev_priv->fsb_freq = 5866;
697                 break;
698         case 0x018:
699                 dev_priv->fsb_freq = 6400;
700                 break;
701         default:
702                 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
703                                  csipll & 0x3ff);
704                 dev_priv->fsb_freq = 0;
705                 break;
706         }
707
708         if (dev_priv->fsb_freq == 3200) {
709                 dev_priv->ips.c_m = 0;
710         } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
711                 dev_priv->ips.c_m = 1;
712         } else {
713                 dev_priv->ips.c_m = 2;
714         }
715 }
716
717 static const struct cxsr_latency cxsr_latency_table[] = {
718         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
719         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
720         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
721         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
722         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
723
724         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
725         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
726         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
727         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
728         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
729
730         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
731         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
732         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
733         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
734         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
735
736         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
737         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
738         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
739         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
740         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
741
742         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
743         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
744         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
745         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
746         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
747
748         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
749         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
750         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
751         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
752         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
753 };
754
755 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
756                                                          int is_ddr3,
757                                                          int fsb,
758                                                          int mem)
759 {
760         const struct cxsr_latency *latency;
761         int i;
762
763         if (fsb == 0 || mem == 0)
764                 return NULL;
765
766         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
767                 latency = &cxsr_latency_table[i];
768                 if (is_desktop == latency->is_desktop &&
769                     is_ddr3 == latency->is_ddr3 &&
770                     fsb == latency->fsb_freq && mem == latency->mem_freq)
771                         return latency;
772         }
773
774         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
775
776         return NULL;
777 }
778
779 static void pineview_disable_cxsr(struct drm_device *dev)
780 {
781         struct drm_i915_private *dev_priv = dev->dev_private;
782
783         /* deactivate cxsr */
784         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
785 }
786
787 /*
788  * Latency for FIFO fetches is dependent on several factors:
789  *   - memory configuration (speed, channels)
790  *   - chipset
791  *   - current MCH state
792  * It can be fairly high in some situations, so here we assume a fairly
793  * pessimal value.  It's a tradeoff between extra memory fetches (if we
794  * set this value too high, the FIFO will fetch frequently to stay full)
795  * and power consumption (set it too low to save power and we might see
796  * FIFO underruns and display "flicker").
797  *
798  * A value of 5us seems to be a good balance; safe for very low end
799  * platforms but not overly aggressive on lower latency configs.
800  */
801 static const int latency_ns = 5000;
802
803 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
804 {
805         struct drm_i915_private *dev_priv = dev->dev_private;
806         uint32_t dsparb = I915_READ(DSPARB);
807         int size;
808
809         size = dsparb & 0x7f;
810         if (plane)
811                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
812
813         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
814                       plane ? "B" : "A", size);
815
816         return size;
817 }
818
819 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
820 {
821         struct drm_i915_private *dev_priv = dev->dev_private;
822         uint32_t dsparb = I915_READ(DSPARB);
823         int size;
824
825         size = dsparb & 0x1ff;
826         if (plane)
827                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
828         size >>= 1; /* Convert to cachelines */
829
830         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
831                       plane ? "B" : "A", size);
832
833         return size;
834 }
835
836 static int i845_get_fifo_size(struct drm_device *dev, int plane)
837 {
838         struct drm_i915_private *dev_priv = dev->dev_private;
839         uint32_t dsparb = I915_READ(DSPARB);
840         int size;
841
842         size = dsparb & 0x7f;
843         size >>= 2; /* Convert to cachelines */
844
845         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
846                       plane ? "B" : "A",
847                       size);
848
849         return size;
850 }
851
852 static int i830_get_fifo_size(struct drm_device *dev, int plane)
853 {
854         struct drm_i915_private *dev_priv = dev->dev_private;
855         uint32_t dsparb = I915_READ(DSPARB);
856         int size;
857
858         size = dsparb & 0x7f;
859         size >>= 1; /* Convert to cachelines */
860
861         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
862                       plane ? "B" : "A", size);
863
864         return size;
865 }
866
867 /* Pineview has different values for various configs */
868 static const struct intel_watermark_params pineview_display_wm = {
869         PINEVIEW_DISPLAY_FIFO,
870         PINEVIEW_MAX_WM,
871         PINEVIEW_DFT_WM,
872         PINEVIEW_GUARD_WM,
873         PINEVIEW_FIFO_LINE_SIZE
874 };
875 static const struct intel_watermark_params pineview_display_hplloff_wm = {
876         PINEVIEW_DISPLAY_FIFO,
877         PINEVIEW_MAX_WM,
878         PINEVIEW_DFT_HPLLOFF_WM,
879         PINEVIEW_GUARD_WM,
880         PINEVIEW_FIFO_LINE_SIZE
881 };
882 static const struct intel_watermark_params pineview_cursor_wm = {
883         PINEVIEW_CURSOR_FIFO,
884         PINEVIEW_CURSOR_MAX_WM,
885         PINEVIEW_CURSOR_DFT_WM,
886         PINEVIEW_CURSOR_GUARD_WM,
887         PINEVIEW_FIFO_LINE_SIZE,
888 };
889 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
890         PINEVIEW_CURSOR_FIFO,
891         PINEVIEW_CURSOR_MAX_WM,
892         PINEVIEW_CURSOR_DFT_WM,
893         PINEVIEW_CURSOR_GUARD_WM,
894         PINEVIEW_FIFO_LINE_SIZE
895 };
896 static const struct intel_watermark_params g4x_wm_info = {
897         G4X_FIFO_SIZE,
898         G4X_MAX_WM,
899         G4X_MAX_WM,
900         2,
901         G4X_FIFO_LINE_SIZE,
902 };
903 static const struct intel_watermark_params g4x_cursor_wm_info = {
904         I965_CURSOR_FIFO,
905         I965_CURSOR_MAX_WM,
906         I965_CURSOR_DFT_WM,
907         2,
908         G4X_FIFO_LINE_SIZE,
909 };
910 static const struct intel_watermark_params valleyview_wm_info = {
911         VALLEYVIEW_FIFO_SIZE,
912         VALLEYVIEW_MAX_WM,
913         VALLEYVIEW_MAX_WM,
914         2,
915         G4X_FIFO_LINE_SIZE,
916 };
917 static const struct intel_watermark_params valleyview_cursor_wm_info = {
918         I965_CURSOR_FIFO,
919         VALLEYVIEW_CURSOR_MAX_WM,
920         I965_CURSOR_DFT_WM,
921         2,
922         G4X_FIFO_LINE_SIZE,
923 };
924 static const struct intel_watermark_params i965_cursor_wm_info = {
925         I965_CURSOR_FIFO,
926         I965_CURSOR_MAX_WM,
927         I965_CURSOR_DFT_WM,
928         2,
929         I915_FIFO_LINE_SIZE,
930 };
931 static const struct intel_watermark_params i945_wm_info = {
932         I945_FIFO_SIZE,
933         I915_MAX_WM,
934         1,
935         2,
936         I915_FIFO_LINE_SIZE
937 };
938 static const struct intel_watermark_params i915_wm_info = {
939         I915_FIFO_SIZE,
940         I915_MAX_WM,
941         1,
942         2,
943         I915_FIFO_LINE_SIZE
944 };
945 static const struct intel_watermark_params i855_wm_info = {
946         I855GM_FIFO_SIZE,
947         I915_MAX_WM,
948         1,
949         2,
950         I830_FIFO_LINE_SIZE
951 };
952 static const struct intel_watermark_params i830_wm_info = {
953         I830_FIFO_SIZE,
954         I915_MAX_WM,
955         1,
956         2,
957         I830_FIFO_LINE_SIZE
958 };
959
960 static const struct intel_watermark_params ironlake_display_wm_info = {
961         ILK_DISPLAY_FIFO,
962         ILK_DISPLAY_MAXWM,
963         ILK_DISPLAY_DFTWM,
964         2,
965         ILK_FIFO_LINE_SIZE
966 };
967 static const struct intel_watermark_params ironlake_cursor_wm_info = {
968         ILK_CURSOR_FIFO,
969         ILK_CURSOR_MAXWM,
970         ILK_CURSOR_DFTWM,
971         2,
972         ILK_FIFO_LINE_SIZE
973 };
974 static const struct intel_watermark_params ironlake_display_srwm_info = {
975         ILK_DISPLAY_SR_FIFO,
976         ILK_DISPLAY_MAX_SRWM,
977         ILK_DISPLAY_DFT_SRWM,
978         2,
979         ILK_FIFO_LINE_SIZE
980 };
981 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
982         ILK_CURSOR_SR_FIFO,
983         ILK_CURSOR_MAX_SRWM,
984         ILK_CURSOR_DFT_SRWM,
985         2,
986         ILK_FIFO_LINE_SIZE
987 };
988
989 static const struct intel_watermark_params sandybridge_display_wm_info = {
990         SNB_DISPLAY_FIFO,
991         SNB_DISPLAY_MAXWM,
992         SNB_DISPLAY_DFTWM,
993         2,
994         SNB_FIFO_LINE_SIZE
995 };
996 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
997         SNB_CURSOR_FIFO,
998         SNB_CURSOR_MAXWM,
999         SNB_CURSOR_DFTWM,
1000         2,
1001         SNB_FIFO_LINE_SIZE
1002 };
1003 static const struct intel_watermark_params sandybridge_display_srwm_info = {
1004         SNB_DISPLAY_SR_FIFO,
1005         SNB_DISPLAY_MAX_SRWM,
1006         SNB_DISPLAY_DFT_SRWM,
1007         2,
1008         SNB_FIFO_LINE_SIZE
1009 };
1010 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1011         SNB_CURSOR_SR_FIFO,
1012         SNB_CURSOR_MAX_SRWM,
1013         SNB_CURSOR_DFT_SRWM,
1014         2,
1015         SNB_FIFO_LINE_SIZE
1016 };
1017
1018
1019 /**
1020  * intel_calculate_wm - calculate watermark level
1021  * @clock_in_khz: pixel clock
1022  * @wm: chip FIFO params
1023  * @pixel_size: display pixel size
1024  * @latency_ns: memory latency for the platform
1025  *
1026  * Calculate the watermark level (the level at which the display plane will
1027  * start fetching from memory again).  Each chip has a different display
1028  * FIFO size and allocation, so the caller needs to figure that out and pass
1029  * in the correct intel_watermark_params structure.
1030  *
1031  * As the pixel clock runs, the FIFO will be drained at a rate that depends
1032  * on the pixel size.  When it reaches the watermark level, it'll start
1033  * fetching FIFO line sized based chunks from memory until the FIFO fills
1034  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
1035  * will occur, and a display engine hang could result.
1036  */
1037 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1038                                         const struct intel_watermark_params *wm,
1039                                         int fifo_size,
1040                                         int pixel_size,
1041                                         unsigned long latency_ns)
1042 {
1043         long entries_required, wm_size;
1044
1045         /*
1046          * Note: we need to make sure we don't overflow for various clock &
1047          * latency values.
1048          * clocks go from a few thousand to several hundred thousand.
1049          * latency is usually a few thousand
1050          */
1051         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1052                 1000;
1053         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1054
1055         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1056
1057         wm_size = fifo_size - (entries_required + wm->guard_size);
1058
1059         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1060
1061         /* Don't promote wm_size to unsigned... */
1062         if (wm_size > (long)wm->max_wm)
1063                 wm_size = wm->max_wm;
1064         if (wm_size <= 0)
1065                 wm_size = wm->default_wm;
1066         return wm_size;
1067 }
1068
1069 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1070 {
1071         struct drm_crtc *crtc, *enabled = NULL;
1072
1073         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1074                 if (intel_crtc_active(crtc)) {
1075                         if (enabled)
1076                                 return NULL;
1077                         enabled = crtc;
1078                 }
1079         }
1080
1081         return enabled;
1082 }
1083
1084 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1085 {
1086         struct drm_device *dev = unused_crtc->dev;
1087         struct drm_i915_private *dev_priv = dev->dev_private;
1088         struct drm_crtc *crtc;
1089         const struct cxsr_latency *latency;
1090         u32 reg;
1091         unsigned long wm;
1092
1093         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1094                                          dev_priv->fsb_freq, dev_priv->mem_freq);
1095         if (!latency) {
1096                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1097                 pineview_disable_cxsr(dev);
1098                 return;
1099         }
1100
1101         crtc = single_enabled_crtc(dev);
1102         if (crtc) {
1103                 const struct drm_display_mode *adjusted_mode;
1104                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1105                 int clock;
1106
1107                 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1108                 clock = adjusted_mode->crtc_clock;
1109
1110                 /* Display SR */
1111                 wm = intel_calculate_wm(clock, &pineview_display_wm,
1112                                         pineview_display_wm.fifo_size,
1113                                         pixel_size, latency->display_sr);
1114                 reg = I915_READ(DSPFW1);
1115                 reg &= ~DSPFW_SR_MASK;
1116                 reg |= wm << DSPFW_SR_SHIFT;
1117                 I915_WRITE(DSPFW1, reg);
1118                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1119
1120                 /* cursor SR */
1121                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1122                                         pineview_display_wm.fifo_size,
1123                                         pixel_size, latency->cursor_sr);
1124                 reg = I915_READ(DSPFW3);
1125                 reg &= ~DSPFW_CURSOR_SR_MASK;
1126                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1127                 I915_WRITE(DSPFW3, reg);
1128
1129                 /* Display HPLL off SR */
1130                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1131                                         pineview_display_hplloff_wm.fifo_size,
1132                                         pixel_size, latency->display_hpll_disable);
1133                 reg = I915_READ(DSPFW3);
1134                 reg &= ~DSPFW_HPLL_SR_MASK;
1135                 reg |= wm & DSPFW_HPLL_SR_MASK;
1136                 I915_WRITE(DSPFW3, reg);
1137
1138                 /* cursor HPLL off SR */
1139                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1140                                         pineview_display_hplloff_wm.fifo_size,
1141                                         pixel_size, latency->cursor_hpll_disable);
1142                 reg = I915_READ(DSPFW3);
1143                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1144                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1145                 I915_WRITE(DSPFW3, reg);
1146                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1147
1148                 /* activate cxsr */
1149                 I915_WRITE(DSPFW3,
1150                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1151                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1152         } else {
1153                 pineview_disable_cxsr(dev);
1154                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1155         }
1156 }
1157
1158 static bool g4x_compute_wm0(struct drm_device *dev,
1159                             int plane,
1160                             const struct intel_watermark_params *display,
1161                             int display_latency_ns,
1162                             const struct intel_watermark_params *cursor,
1163                             int cursor_latency_ns,
1164                             int *plane_wm,
1165                             int *cursor_wm)
1166 {
1167         struct drm_crtc *crtc;
1168         const struct drm_display_mode *adjusted_mode;
1169         int htotal, hdisplay, clock, pixel_size;
1170         int line_time_us, line_count;
1171         int entries, tlb_miss;
1172
1173         crtc = intel_get_crtc_for_plane(dev, plane);
1174         if (!intel_crtc_active(crtc)) {
1175                 *cursor_wm = cursor->guard_size;
1176                 *plane_wm = display->guard_size;
1177                 return false;
1178         }
1179
1180         adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1181         clock = adjusted_mode->crtc_clock;
1182         htotal = adjusted_mode->htotal;
1183         hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1184         pixel_size = crtc->fb->bits_per_pixel / 8;
1185
1186         /* Use the small buffer method to calculate plane watermark */
1187         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1188         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1189         if (tlb_miss > 0)
1190                 entries += tlb_miss;
1191         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1192         *plane_wm = entries + display->guard_size;
1193         if (*plane_wm > (int)display->max_wm)
1194                 *plane_wm = display->max_wm;
1195
1196         /* Use the large buffer method to calculate cursor watermark */
1197         line_time_us = ((htotal * 1000) / clock);
1198         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1199         entries = line_count * 64 * pixel_size;
1200         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1201         if (tlb_miss > 0)
1202                 entries += tlb_miss;
1203         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1204         *cursor_wm = entries + cursor->guard_size;
1205         if (*cursor_wm > (int)cursor->max_wm)
1206                 *cursor_wm = (int)cursor->max_wm;
1207
1208         return true;
1209 }
1210
1211 /*
1212  * Check the wm result.
1213  *
1214  * If any calculated watermark values is larger than the maximum value that
1215  * can be programmed into the associated watermark register, that watermark
1216  * must be disabled.
1217  */
1218 static bool g4x_check_srwm(struct drm_device *dev,
1219                            int display_wm, int cursor_wm,
1220                            const struct intel_watermark_params *display,
1221                            const struct intel_watermark_params *cursor)
1222 {
1223         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1224                       display_wm, cursor_wm);
1225
1226         if (display_wm > display->max_wm) {
1227                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1228                               display_wm, display->max_wm);
1229                 return false;
1230         }
1231
1232         if (cursor_wm > cursor->max_wm) {
1233                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1234                               cursor_wm, cursor->max_wm);
1235                 return false;
1236         }
1237
1238         if (!(display_wm || cursor_wm)) {
1239                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1240                 return false;
1241         }
1242
1243         return true;
1244 }
1245
1246 static bool g4x_compute_srwm(struct drm_device *dev,
1247                              int plane,
1248                              int latency_ns,
1249                              const struct intel_watermark_params *display,
1250                              const struct intel_watermark_params *cursor,
1251                              int *display_wm, int *cursor_wm)
1252 {
1253         struct drm_crtc *crtc;
1254         const struct drm_display_mode *adjusted_mode;
1255         int hdisplay, htotal, pixel_size, clock;
1256         unsigned long line_time_us;
1257         int line_count, line_size;
1258         int small, large;
1259         int entries;
1260
1261         if (!latency_ns) {
1262                 *display_wm = *cursor_wm = 0;
1263                 return false;
1264         }
1265
1266         crtc = intel_get_crtc_for_plane(dev, plane);
1267         adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1268         clock = adjusted_mode->crtc_clock;
1269         htotal = adjusted_mode->htotal;
1270         hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1271         pixel_size = crtc->fb->bits_per_pixel / 8;
1272
1273         line_time_us = (htotal * 1000) / clock;
1274         line_count = (latency_ns / line_time_us + 1000) / 1000;
1275         line_size = hdisplay * pixel_size;
1276
1277         /* Use the minimum of the small and large buffer method for primary */
1278         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1279         large = line_count * line_size;
1280
1281         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1282         *display_wm = entries + display->guard_size;
1283
1284         /* calculate the self-refresh watermark for display cursor */
1285         entries = line_count * pixel_size * 64;
1286         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1287         *cursor_wm = entries + cursor->guard_size;
1288
1289         return g4x_check_srwm(dev,
1290                               *display_wm, *cursor_wm,
1291                               display, cursor);
1292 }
1293
1294 static bool vlv_compute_drain_latency(struct drm_device *dev,
1295                                      int plane,
1296                                      int *plane_prec_mult,
1297                                      int *plane_dl,
1298                                      int *cursor_prec_mult,
1299                                      int *cursor_dl)
1300 {
1301         struct drm_crtc *crtc;
1302         int clock, pixel_size;
1303         int entries;
1304
1305         crtc = intel_get_crtc_for_plane(dev, plane);
1306         if (!intel_crtc_active(crtc))
1307                 return false;
1308
1309         clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1310         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
1311
1312         entries = (clock / 1000) * pixel_size;
1313         *plane_prec_mult = (entries > 256) ?
1314                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1315         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1316                                                      pixel_size);
1317
1318         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
1319         *cursor_prec_mult = (entries > 256) ?
1320                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1321         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1322
1323         return true;
1324 }
1325
1326 /*
1327  * Update drain latency registers of memory arbiter
1328  *
1329  * Valleyview SoC has a new memory arbiter and needs drain latency registers
1330  * to be programmed. Each plane has a drain latency multiplier and a drain
1331  * latency value.
1332  */
1333
1334 static void vlv_update_drain_latency(struct drm_device *dev)
1335 {
1336         struct drm_i915_private *dev_priv = dev->dev_private;
1337         int planea_prec, planea_dl, planeb_prec, planeb_dl;
1338         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1339         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1340                                                         either 16 or 32 */
1341
1342         /* For plane A, Cursor A */
1343         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1344                                       &cursor_prec_mult, &cursora_dl)) {
1345                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1346                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1347                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1348                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1349
1350                 I915_WRITE(VLV_DDL1, cursora_prec |
1351                                 (cursora_dl << DDL_CURSORA_SHIFT) |
1352                                 planea_prec | planea_dl);
1353         }
1354
1355         /* For plane B, Cursor B */
1356         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1357                                       &cursor_prec_mult, &cursorb_dl)) {
1358                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1359                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1360                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1361                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1362
1363                 I915_WRITE(VLV_DDL2, cursorb_prec |
1364                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
1365                                 planeb_prec | planeb_dl);
1366         }
1367 }
1368
1369 #define single_plane_enabled(mask) is_power_of_2(mask)
1370
1371 static void valleyview_update_wm(struct drm_crtc *crtc)
1372 {
1373         struct drm_device *dev = crtc->dev;
1374         static const int sr_latency_ns = 12000;
1375         struct drm_i915_private *dev_priv = dev->dev_private;
1376         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1377         int plane_sr, cursor_sr;
1378         int ignore_plane_sr, ignore_cursor_sr;
1379         unsigned int enabled = 0;
1380
1381         vlv_update_drain_latency(dev);
1382
1383         if (g4x_compute_wm0(dev, PIPE_A,
1384                             &valleyview_wm_info, latency_ns,
1385                             &valleyview_cursor_wm_info, latency_ns,
1386                             &planea_wm, &cursora_wm))
1387                 enabled |= 1 << PIPE_A;
1388
1389         if (g4x_compute_wm0(dev, PIPE_B,
1390                             &valleyview_wm_info, latency_ns,
1391                             &valleyview_cursor_wm_info, latency_ns,
1392                             &planeb_wm, &cursorb_wm))
1393                 enabled |= 1 << PIPE_B;
1394
1395         if (single_plane_enabled(enabled) &&
1396             g4x_compute_srwm(dev, ffs(enabled) - 1,
1397                              sr_latency_ns,
1398                              &valleyview_wm_info,
1399                              &valleyview_cursor_wm_info,
1400                              &plane_sr, &ignore_cursor_sr) &&
1401             g4x_compute_srwm(dev, ffs(enabled) - 1,
1402                              2*sr_latency_ns,
1403                              &valleyview_wm_info,
1404                              &valleyview_cursor_wm_info,
1405                              &ignore_plane_sr, &cursor_sr)) {
1406                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1407         } else {
1408                 I915_WRITE(FW_BLC_SELF_VLV,
1409                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1410                 plane_sr = cursor_sr = 0;
1411         }
1412
1413         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1414                       planea_wm, cursora_wm,
1415                       planeb_wm, cursorb_wm,
1416                       plane_sr, cursor_sr);
1417
1418         I915_WRITE(DSPFW1,
1419                    (plane_sr << DSPFW_SR_SHIFT) |
1420                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1421                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1422                    planea_wm);
1423         I915_WRITE(DSPFW2,
1424                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1425                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1426         I915_WRITE(DSPFW3,
1427                    (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1428                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1429 }
1430
1431 static void g4x_update_wm(struct drm_crtc *crtc)
1432 {
1433         struct drm_device *dev = crtc->dev;
1434         static const int sr_latency_ns = 12000;
1435         struct drm_i915_private *dev_priv = dev->dev_private;
1436         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1437         int plane_sr, cursor_sr;
1438         unsigned int enabled = 0;
1439
1440         if (g4x_compute_wm0(dev, PIPE_A,
1441                             &g4x_wm_info, latency_ns,
1442                             &g4x_cursor_wm_info, latency_ns,
1443                             &planea_wm, &cursora_wm))
1444                 enabled |= 1 << PIPE_A;
1445
1446         if (g4x_compute_wm0(dev, PIPE_B,
1447                             &g4x_wm_info, latency_ns,
1448                             &g4x_cursor_wm_info, latency_ns,
1449                             &planeb_wm, &cursorb_wm))
1450                 enabled |= 1 << PIPE_B;
1451
1452         if (single_plane_enabled(enabled) &&
1453             g4x_compute_srwm(dev, ffs(enabled) - 1,
1454                              sr_latency_ns,
1455                              &g4x_wm_info,
1456                              &g4x_cursor_wm_info,
1457                              &plane_sr, &cursor_sr)) {
1458                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1459         } else {
1460                 I915_WRITE(FW_BLC_SELF,
1461                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1462                 plane_sr = cursor_sr = 0;
1463         }
1464
1465         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1466                       planea_wm, cursora_wm,
1467                       planeb_wm, cursorb_wm,
1468                       plane_sr, cursor_sr);
1469
1470         I915_WRITE(DSPFW1,
1471                    (plane_sr << DSPFW_SR_SHIFT) |
1472                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1473                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1474                    planea_wm);
1475         I915_WRITE(DSPFW2,
1476                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1477                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1478         /* HPLL off in SR has some issues on G4x... disable it */
1479         I915_WRITE(DSPFW3,
1480                    (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1481                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1482 }
1483
1484 static void i965_update_wm(struct drm_crtc *unused_crtc)
1485 {
1486         struct drm_device *dev = unused_crtc->dev;
1487         struct drm_i915_private *dev_priv = dev->dev_private;
1488         struct drm_crtc *crtc;
1489         int srwm = 1;
1490         int cursor_sr = 16;
1491
1492         /* Calc sr entries for one plane configs */
1493         crtc = single_enabled_crtc(dev);
1494         if (crtc) {
1495                 /* self-refresh has much higher latency */
1496                 static const int sr_latency_ns = 12000;
1497                 const struct drm_display_mode *adjusted_mode =
1498                         &to_intel_crtc(crtc)->config.adjusted_mode;
1499                 int clock = adjusted_mode->crtc_clock;
1500                 int htotal = adjusted_mode->htotal;
1501                 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1502                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1503                 unsigned long line_time_us;
1504                 int entries;
1505
1506                 line_time_us = ((htotal * 1000) / clock);
1507
1508                 /* Use ns/us then divide to preserve precision */
1509                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1510                         pixel_size * hdisplay;
1511                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1512                 srwm = I965_FIFO_SIZE - entries;
1513                 if (srwm < 0)
1514                         srwm = 1;
1515                 srwm &= 0x1ff;
1516                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1517                               entries, srwm);
1518
1519                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1520                         pixel_size * 64;
1521                 entries = DIV_ROUND_UP(entries,
1522                                           i965_cursor_wm_info.cacheline_size);
1523                 cursor_sr = i965_cursor_wm_info.fifo_size -
1524                         (entries + i965_cursor_wm_info.guard_size);
1525
1526                 if (cursor_sr > i965_cursor_wm_info.max_wm)
1527                         cursor_sr = i965_cursor_wm_info.max_wm;
1528
1529                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1530                               "cursor %d\n", srwm, cursor_sr);
1531
1532                 if (IS_CRESTLINE(dev))
1533                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1534         } else {
1535                 /* Turn off self refresh if both pipes are enabled */
1536                 if (IS_CRESTLINE(dev))
1537                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1538                                    & ~FW_BLC_SELF_EN);
1539         }
1540
1541         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1542                       srwm);
1543
1544         /* 965 has limitations... */
1545         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1546                    (8 << 16) | (8 << 8) | (8 << 0));
1547         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1548         /* update cursor SR watermark */
1549         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1550 }
1551
1552 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1553 {
1554         struct drm_device *dev = unused_crtc->dev;
1555         struct drm_i915_private *dev_priv = dev->dev_private;
1556         const struct intel_watermark_params *wm_info;
1557         uint32_t fwater_lo;
1558         uint32_t fwater_hi;
1559         int cwm, srwm = 1;
1560         int fifo_size;
1561         int planea_wm, planeb_wm;
1562         struct drm_crtc *crtc, *enabled = NULL;
1563
1564         if (IS_I945GM(dev))
1565                 wm_info = &i945_wm_info;
1566         else if (!IS_GEN2(dev))
1567                 wm_info = &i915_wm_info;
1568         else
1569                 wm_info = &i855_wm_info;
1570
1571         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1572         crtc = intel_get_crtc_for_plane(dev, 0);
1573         if (intel_crtc_active(crtc)) {
1574                 const struct drm_display_mode *adjusted_mode;
1575                 int cpp = crtc->fb->bits_per_pixel / 8;
1576                 if (IS_GEN2(dev))
1577                         cpp = 4;
1578
1579                 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1580                 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1581                                                wm_info, fifo_size, cpp,
1582                                                latency_ns);
1583                 enabled = crtc;
1584         } else
1585                 planea_wm = fifo_size - wm_info->guard_size;
1586
1587         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1588         crtc = intel_get_crtc_for_plane(dev, 1);
1589         if (intel_crtc_active(crtc)) {
1590                 const struct drm_display_mode *adjusted_mode;
1591                 int cpp = crtc->fb->bits_per_pixel / 8;
1592                 if (IS_GEN2(dev))
1593                         cpp = 4;
1594
1595                 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1596                 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1597                                                wm_info, fifo_size, cpp,
1598                                                latency_ns);
1599                 if (enabled == NULL)
1600                         enabled = crtc;
1601                 else
1602                         enabled = NULL;
1603         } else
1604                 planeb_wm = fifo_size - wm_info->guard_size;
1605
1606         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1607
1608         /*
1609          * Overlay gets an aggressive default since video jitter is bad.
1610          */
1611         cwm = 2;
1612
1613         /* Play safe and disable self-refresh before adjusting watermarks. */
1614         if (IS_I945G(dev) || IS_I945GM(dev))
1615                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1616         else if (IS_I915GM(dev))
1617                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1618
1619         /* Calc sr entries for one plane configs */
1620         if (HAS_FW_BLC(dev) && enabled) {
1621                 /* self-refresh has much higher latency */
1622                 static const int sr_latency_ns = 6000;
1623                 const struct drm_display_mode *adjusted_mode =
1624                         &to_intel_crtc(enabled)->config.adjusted_mode;
1625                 int clock = adjusted_mode->crtc_clock;
1626                 int htotal = adjusted_mode->htotal;
1627                 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1628                 int pixel_size = enabled->fb->bits_per_pixel / 8;
1629                 unsigned long line_time_us;
1630                 int entries;
1631
1632                 line_time_us = (htotal * 1000) / clock;
1633
1634                 /* Use ns/us then divide to preserve precision */
1635                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1636                         pixel_size * hdisplay;
1637                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1638                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1639                 srwm = wm_info->fifo_size - entries;
1640                 if (srwm < 0)
1641                         srwm = 1;
1642
1643                 if (IS_I945G(dev) || IS_I945GM(dev))
1644                         I915_WRITE(FW_BLC_SELF,
1645                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1646                 else if (IS_I915GM(dev))
1647                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1648         }
1649
1650         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1651                       planea_wm, planeb_wm, cwm, srwm);
1652
1653         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1654         fwater_hi = (cwm & 0x1f);
1655
1656         /* Set request length to 8 cachelines per fetch */
1657         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1658         fwater_hi = fwater_hi | (1 << 8);
1659
1660         I915_WRITE(FW_BLC, fwater_lo);
1661         I915_WRITE(FW_BLC2, fwater_hi);
1662
1663         if (HAS_FW_BLC(dev)) {
1664                 if (enabled) {
1665                         if (IS_I945G(dev) || IS_I945GM(dev))
1666                                 I915_WRITE(FW_BLC_SELF,
1667                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1668                         else if (IS_I915GM(dev))
1669                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1670                         DRM_DEBUG_KMS("memory self refresh enabled\n");
1671                 } else
1672                         DRM_DEBUG_KMS("memory self refresh disabled\n");
1673         }
1674 }
1675
1676 static void i830_update_wm(struct drm_crtc *unused_crtc)
1677 {
1678         struct drm_device *dev = unused_crtc->dev;
1679         struct drm_i915_private *dev_priv = dev->dev_private;
1680         struct drm_crtc *crtc;
1681         const struct drm_display_mode *adjusted_mode;
1682         uint32_t fwater_lo;
1683         int planea_wm;
1684
1685         crtc = single_enabled_crtc(dev);
1686         if (crtc == NULL)
1687                 return;
1688
1689         adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1690         planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1691                                        &i830_wm_info,
1692                                        dev_priv->display.get_fifo_size(dev, 0),
1693                                        4, latency_ns);
1694         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1695         fwater_lo |= (3<<8) | planea_wm;
1696
1697         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1698
1699         I915_WRITE(FW_BLC, fwater_lo);
1700 }
1701
1702 /*
1703  * Check the wm result.
1704  *
1705  * If any calculated watermark values is larger than the maximum value that
1706  * can be programmed into the associated watermark register, that watermark
1707  * must be disabled.
1708  */
1709 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1710                                 int fbc_wm, int display_wm, int cursor_wm,
1711                                 const struct intel_watermark_params *display,
1712                                 const struct intel_watermark_params *cursor)
1713 {
1714         struct drm_i915_private *dev_priv = dev->dev_private;
1715
1716         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1717                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1718
1719         if (fbc_wm > SNB_FBC_MAX_SRWM) {
1720                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1721                               fbc_wm, SNB_FBC_MAX_SRWM, level);
1722
1723                 /* fbc has it's own way to disable FBC WM */
1724                 I915_WRITE(DISP_ARB_CTL,
1725                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1726                 return false;
1727         } else if (INTEL_INFO(dev)->gen >= 6) {
1728                 /* enable FBC WM (except on ILK, where it must remain off) */
1729                 I915_WRITE(DISP_ARB_CTL,
1730                            I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1731         }
1732
1733         if (display_wm > display->max_wm) {
1734                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1735                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
1736                 return false;
1737         }
1738
1739         if (cursor_wm > cursor->max_wm) {
1740                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1741                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1742                 return false;
1743         }
1744
1745         if (!(fbc_wm || display_wm || cursor_wm)) {
1746                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1747                 return false;
1748         }
1749
1750         return true;
1751 }
1752
1753 /*
1754  * Compute watermark values of WM[1-3],
1755  */
1756 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1757                                   int latency_ns,
1758                                   const struct intel_watermark_params *display,
1759                                   const struct intel_watermark_params *cursor,
1760                                   int *fbc_wm, int *display_wm, int *cursor_wm)
1761 {
1762         struct drm_crtc *crtc;
1763         const struct drm_display_mode *adjusted_mode;
1764         unsigned long line_time_us;
1765         int hdisplay, htotal, pixel_size, clock;
1766         int line_count, line_size;
1767         int small, large;
1768         int entries;
1769
1770         if (!latency_ns) {
1771                 *fbc_wm = *display_wm = *cursor_wm = 0;
1772                 return false;
1773         }
1774
1775         crtc = intel_get_crtc_for_plane(dev, plane);
1776         adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1777         clock = adjusted_mode->crtc_clock;
1778         htotal = adjusted_mode->htotal;
1779         hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1780         pixel_size = crtc->fb->bits_per_pixel / 8;
1781
1782         line_time_us = (htotal * 1000) / clock;
1783         line_count = (latency_ns / line_time_us + 1000) / 1000;
1784         line_size = hdisplay * pixel_size;
1785
1786         /* Use the minimum of the small and large buffer method for primary */
1787         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1788         large = line_count * line_size;
1789
1790         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1791         *display_wm = entries + display->guard_size;
1792
1793         /*
1794          * Spec says:
1795          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1796          */
1797         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1798
1799         /* calculate the self-refresh watermark for display cursor */
1800         entries = line_count * pixel_size * 64;
1801         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1802         *cursor_wm = entries + cursor->guard_size;
1803
1804         return ironlake_check_srwm(dev, level,
1805                                    *fbc_wm, *display_wm, *cursor_wm,
1806                                    display, cursor);
1807 }
1808
1809 static void ironlake_update_wm(struct drm_crtc *crtc)
1810 {
1811         struct drm_device *dev = crtc->dev;
1812         struct drm_i915_private *dev_priv = dev->dev_private;
1813         int fbc_wm, plane_wm, cursor_wm;
1814         unsigned int enabled;
1815
1816         enabled = 0;
1817         if (g4x_compute_wm0(dev, PIPE_A,
1818                             &ironlake_display_wm_info,
1819                             dev_priv->wm.pri_latency[0] * 100,
1820                             &ironlake_cursor_wm_info,
1821                             dev_priv->wm.cur_latency[0] * 100,
1822                             &plane_wm, &cursor_wm)) {
1823                 I915_WRITE(WM0_PIPEA_ILK,
1824                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1825                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1826                               " plane %d, " "cursor: %d\n",
1827                               plane_wm, cursor_wm);
1828                 enabled |= 1 << PIPE_A;
1829         }
1830
1831         if (g4x_compute_wm0(dev, PIPE_B,
1832                             &ironlake_display_wm_info,
1833                             dev_priv->wm.pri_latency[0] * 100,
1834                             &ironlake_cursor_wm_info,
1835                             dev_priv->wm.cur_latency[0] * 100,
1836                             &plane_wm, &cursor_wm)) {
1837                 I915_WRITE(WM0_PIPEB_ILK,
1838                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1839                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1840                               " plane %d, cursor: %d\n",
1841                               plane_wm, cursor_wm);
1842                 enabled |= 1 << PIPE_B;
1843         }
1844
1845         /*
1846          * Calculate and update the self-refresh watermark only when one
1847          * display plane is used.
1848          */
1849         I915_WRITE(WM3_LP_ILK, 0);
1850         I915_WRITE(WM2_LP_ILK, 0);
1851         I915_WRITE(WM1_LP_ILK, 0);
1852
1853         if (!single_plane_enabled(enabled))
1854                 return;
1855         enabled = ffs(enabled) - 1;
1856
1857         /* WM1 */
1858         if (!ironlake_compute_srwm(dev, 1, enabled,
1859                                    dev_priv->wm.pri_latency[1] * 500,
1860                                    &ironlake_display_srwm_info,
1861                                    &ironlake_cursor_srwm_info,
1862                                    &fbc_wm, &plane_wm, &cursor_wm))
1863                 return;
1864
1865         I915_WRITE(WM1_LP_ILK,
1866                    WM1_LP_SR_EN |
1867                    (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1868                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1869                    (plane_wm << WM1_LP_SR_SHIFT) |
1870                    cursor_wm);
1871
1872         /* WM2 */
1873         if (!ironlake_compute_srwm(dev, 2, enabled,
1874                                    dev_priv->wm.pri_latency[2] * 500,
1875                                    &ironlake_display_srwm_info,
1876                                    &ironlake_cursor_srwm_info,
1877                                    &fbc_wm, &plane_wm, &cursor_wm))
1878                 return;
1879
1880         I915_WRITE(WM2_LP_ILK,
1881                    WM2_LP_EN |
1882                    (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1883                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1884                    (plane_wm << WM1_LP_SR_SHIFT) |
1885                    cursor_wm);
1886
1887         /*
1888          * WM3 is unsupported on ILK, probably because we don't have latency
1889          * data for that power state
1890          */
1891 }
1892
1893 static void sandybridge_update_wm(struct drm_crtc *crtc)
1894 {
1895         struct drm_device *dev = crtc->dev;
1896         struct drm_i915_private *dev_priv = dev->dev_private;
1897         int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
1898         u32 val;
1899         int fbc_wm, plane_wm, cursor_wm;
1900         unsigned int enabled;
1901
1902         enabled = 0;
1903         if (g4x_compute_wm0(dev, PIPE_A,
1904                             &sandybridge_display_wm_info, latency,
1905                             &sandybridge_cursor_wm_info, latency,
1906                             &plane_wm, &cursor_wm)) {
1907                 val = I915_READ(WM0_PIPEA_ILK);
1908                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1909                 I915_WRITE(WM0_PIPEA_ILK, val |
1910                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1911                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1912                               " plane %d, " "cursor: %d\n",
1913                               plane_wm, cursor_wm);
1914                 enabled |= 1 << PIPE_A;
1915         }
1916
1917         if (g4x_compute_wm0(dev, PIPE_B,
1918                             &sandybridge_display_wm_info, latency,
1919                             &sandybridge_cursor_wm_info, latency,
1920                             &plane_wm, &cursor_wm)) {
1921                 val = I915_READ(WM0_PIPEB_ILK);
1922                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1923                 I915_WRITE(WM0_PIPEB_ILK, val |
1924                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1925                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1926                               " plane %d, cursor: %d\n",
1927                               plane_wm, cursor_wm);
1928                 enabled |= 1 << PIPE_B;
1929         }
1930
1931         /*
1932          * Calculate and update the self-refresh watermark only when one
1933          * display plane is used.
1934          *
1935          * SNB support 3 levels of watermark.
1936          *
1937          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1938          * and disabled in the descending order
1939          *
1940          */
1941         I915_WRITE(WM3_LP_ILK, 0);
1942         I915_WRITE(WM2_LP_ILK, 0);
1943         I915_WRITE(WM1_LP_ILK, 0);
1944
1945         if (!single_plane_enabled(enabled) ||
1946             dev_priv->sprite_scaling_enabled)
1947                 return;
1948         enabled = ffs(enabled) - 1;
1949
1950         /* WM1 */
1951         if (!ironlake_compute_srwm(dev, 1, enabled,
1952                                    dev_priv->wm.pri_latency[1] * 500,
1953                                    &sandybridge_display_srwm_info,
1954                                    &sandybridge_cursor_srwm_info,
1955                                    &fbc_wm, &plane_wm, &cursor_wm))
1956                 return;
1957
1958         I915_WRITE(WM1_LP_ILK,
1959                    WM1_LP_SR_EN |
1960                    (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1961                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1962                    (plane_wm << WM1_LP_SR_SHIFT) |
1963                    cursor_wm);
1964
1965         /* WM2 */
1966         if (!ironlake_compute_srwm(dev, 2, enabled,
1967                                    dev_priv->wm.pri_latency[2] * 500,
1968                                    &sandybridge_display_srwm_info,
1969                                    &sandybridge_cursor_srwm_info,
1970                                    &fbc_wm, &plane_wm, &cursor_wm))
1971                 return;
1972
1973         I915_WRITE(WM2_LP_ILK,
1974                    WM2_LP_EN |
1975                    (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1976                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1977                    (plane_wm << WM1_LP_SR_SHIFT) |
1978                    cursor_wm);
1979
1980         /* WM3 */
1981         if (!ironlake_compute_srwm(dev, 3, enabled,
1982                                    dev_priv->wm.pri_latency[3] * 500,
1983                                    &sandybridge_display_srwm_info,
1984                                    &sandybridge_cursor_srwm_info,
1985                                    &fbc_wm, &plane_wm, &cursor_wm))
1986                 return;
1987
1988         I915_WRITE(WM3_LP_ILK,
1989                    WM3_LP_EN |
1990                    (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1991                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1992                    (plane_wm << WM1_LP_SR_SHIFT) |
1993                    cursor_wm);
1994 }
1995
1996 static void ivybridge_update_wm(struct drm_crtc *crtc)
1997 {
1998         struct drm_device *dev = crtc->dev;
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
2001         u32 val;
2002         int fbc_wm, plane_wm, cursor_wm;
2003         int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
2004         unsigned int enabled;
2005
2006         enabled = 0;
2007         if (g4x_compute_wm0(dev, PIPE_A,
2008                             &sandybridge_display_wm_info, latency,
2009                             &sandybridge_cursor_wm_info, latency,
2010                             &plane_wm, &cursor_wm)) {
2011                 val = I915_READ(WM0_PIPEA_ILK);
2012                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2013                 I915_WRITE(WM0_PIPEA_ILK, val |
2014                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2015                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
2016                               " plane %d, " "cursor: %d\n",
2017                               plane_wm, cursor_wm);
2018                 enabled |= 1 << PIPE_A;
2019         }
2020
2021         if (g4x_compute_wm0(dev, PIPE_B,
2022                             &sandybridge_display_wm_info, latency,
2023                             &sandybridge_cursor_wm_info, latency,
2024                             &plane_wm, &cursor_wm)) {
2025                 val = I915_READ(WM0_PIPEB_ILK);
2026                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2027                 I915_WRITE(WM0_PIPEB_ILK, val |
2028                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2029                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2030                               " plane %d, cursor: %d\n",
2031                               plane_wm, cursor_wm);
2032                 enabled |= 1 << PIPE_B;
2033         }
2034
2035         if (g4x_compute_wm0(dev, PIPE_C,
2036                             &sandybridge_display_wm_info, latency,
2037                             &sandybridge_cursor_wm_info, latency,
2038                             &plane_wm, &cursor_wm)) {
2039                 val = I915_READ(WM0_PIPEC_IVB);
2040                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2041                 I915_WRITE(WM0_PIPEC_IVB, val |
2042                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2043                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2044                               " plane %d, cursor: %d\n",
2045                               plane_wm, cursor_wm);
2046                 enabled |= 1 << PIPE_C;
2047         }
2048
2049         /*
2050          * Calculate and update the self-refresh watermark only when one
2051          * display plane is used.
2052          *
2053          * SNB support 3 levels of watermark.
2054          *
2055          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2056          * and disabled in the descending order
2057          *
2058          */
2059         I915_WRITE(WM3_LP_ILK, 0);
2060         I915_WRITE(WM2_LP_ILK, 0);
2061         I915_WRITE(WM1_LP_ILK, 0);
2062
2063         if (!single_plane_enabled(enabled) ||
2064             dev_priv->sprite_scaling_enabled)
2065                 return;
2066         enabled = ffs(enabled) - 1;
2067
2068         /* WM1 */
2069         if (!ironlake_compute_srwm(dev, 1, enabled,
2070                                    dev_priv->wm.pri_latency[1] * 500,
2071                                    &sandybridge_display_srwm_info,
2072                                    &sandybridge_cursor_srwm_info,
2073                                    &fbc_wm, &plane_wm, &cursor_wm))
2074                 return;
2075
2076         I915_WRITE(WM1_LP_ILK,
2077                    WM1_LP_SR_EN |
2078                    (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
2079                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2080                    (plane_wm << WM1_LP_SR_SHIFT) |
2081                    cursor_wm);
2082
2083         /* WM2 */
2084         if (!ironlake_compute_srwm(dev, 2, enabled,
2085                                    dev_priv->wm.pri_latency[2] * 500,
2086                                    &sandybridge_display_srwm_info,
2087                                    &sandybridge_cursor_srwm_info,
2088                                    &fbc_wm, &plane_wm, &cursor_wm))
2089                 return;
2090
2091         I915_WRITE(WM2_LP_ILK,
2092                    WM2_LP_EN |
2093                    (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
2094                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2095                    (plane_wm << WM1_LP_SR_SHIFT) |
2096                    cursor_wm);
2097
2098         /* WM3, note we have to correct the cursor latency */
2099         if (!ironlake_compute_srwm(dev, 3, enabled,
2100                                    dev_priv->wm.pri_latency[3] * 500,
2101                                    &sandybridge_display_srwm_info,
2102                                    &sandybridge_cursor_srwm_info,
2103                                    &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2104             !ironlake_compute_srwm(dev, 3, enabled,
2105                                    dev_priv->wm.cur_latency[3] * 500,
2106                                    &sandybridge_display_srwm_info,
2107                                    &sandybridge_cursor_srwm_info,
2108                                    &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2109                 return;
2110
2111         I915_WRITE(WM3_LP_ILK,
2112                    WM3_LP_EN |
2113                    (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
2114                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2115                    (plane_wm << WM1_LP_SR_SHIFT) |
2116                    cursor_wm);
2117 }
2118
2119 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2120                                     struct drm_crtc *crtc)
2121 {
2122         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2123         uint32_t pixel_rate;
2124
2125         pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2126
2127         /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2128          * adjust the pixel_rate here. */
2129
2130         if (intel_crtc->config.pch_pfit.enabled) {
2131                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2132                 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2133
2134                 pipe_w = intel_crtc->config.pipe_src_w;
2135                 pipe_h = intel_crtc->config.pipe_src_h;
2136                 pfit_w = (pfit_size >> 16) & 0xFFFF;
2137                 pfit_h = pfit_size & 0xFFFF;
2138                 if (pipe_w < pfit_w)
2139                         pipe_w = pfit_w;
2140                 if (pipe_h < pfit_h)
2141                         pipe_h = pfit_h;
2142
2143                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2144                                      pfit_w * pfit_h);
2145         }
2146
2147         return pixel_rate;
2148 }
2149
2150 /* latency must be in 0.1us units. */
2151 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2152                                uint32_t latency)
2153 {
2154         uint64_t ret;
2155
2156         if (WARN(latency == 0, "Latency value missing\n"))
2157                 return UINT_MAX;
2158
2159         ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2160         ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2161
2162         return ret;
2163 }
2164
2165 /* latency must be in 0.1us units. */
2166 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2167                                uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2168                                uint32_t latency)
2169 {
2170         uint32_t ret;
2171
2172         if (WARN(latency == 0, "Latency value missing\n"))
2173                 return UINT_MAX;
2174
2175         ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2176         ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2177         ret = DIV_ROUND_UP(ret, 64) + 2;
2178         return ret;
2179 }
2180
2181 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2182                            uint8_t bytes_per_pixel)
2183 {
2184         return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2185 }
2186
2187 struct hsw_pipe_wm_parameters {
2188         bool active;
2189         uint32_t pipe_htotal;
2190         uint32_t pixel_rate;
2191         struct intel_plane_wm_parameters pri;
2192         struct intel_plane_wm_parameters spr;
2193         struct intel_plane_wm_parameters cur;
2194 };
2195
2196 struct hsw_wm_maximums {
2197         uint16_t pri;
2198         uint16_t spr;
2199         uint16_t cur;
2200         uint16_t fbc;
2201 };
2202
2203 /* used in computing the new watermarks state */
2204 struct intel_wm_config {
2205         unsigned int num_pipes_active;
2206         bool sprites_enabled;
2207         bool sprites_scaled;
2208 };
2209
2210 /*
2211  * For both WM_PIPE and WM_LP.
2212  * mem_value must be in 0.1us units.
2213  */
2214 static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2215                                    uint32_t mem_value,
2216                                    bool is_lp)
2217 {
2218         uint32_t method1, method2;
2219
2220         if (!params->active || !params->pri.enabled)
2221                 return 0;
2222
2223         method1 = ilk_wm_method1(params->pixel_rate,
2224                                  params->pri.bytes_per_pixel,
2225                                  mem_value);
2226
2227         if (!is_lp)
2228                 return method1;
2229
2230         method2 = ilk_wm_method2(params->pixel_rate,
2231                                  params->pipe_htotal,
2232                                  params->pri.horiz_pixels,
2233                                  params->pri.bytes_per_pixel,
2234                                  mem_value);
2235
2236         return min(method1, method2);
2237 }
2238
2239 /*
2240  * For both WM_PIPE and WM_LP.
2241  * mem_value must be in 0.1us units.
2242  */
2243 static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2244                                    uint32_t mem_value)
2245 {
2246         uint32_t method1, method2;
2247
2248         if (!params->active || !params->spr.enabled)
2249                 return 0;
2250
2251         method1 = ilk_wm_method1(params->pixel_rate,
2252                                  params->spr.bytes_per_pixel,
2253                                  mem_value);
2254         method2 = ilk_wm_method2(params->pixel_rate,
2255                                  params->pipe_htotal,
2256                                  params->spr.horiz_pixels,
2257                                  params->spr.bytes_per_pixel,
2258                                  mem_value);
2259         return min(method1, method2);
2260 }
2261
2262 /*
2263  * For both WM_PIPE and WM_LP.
2264  * mem_value must be in 0.1us units.
2265  */
2266 static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2267                                    uint32_t mem_value)
2268 {
2269         if (!params->active || !params->cur.enabled)
2270                 return 0;
2271
2272         return ilk_wm_method2(params->pixel_rate,
2273                               params->pipe_htotal,
2274                               params->cur.horiz_pixels,
2275                               params->cur.bytes_per_pixel,
2276                               mem_value);
2277 }
2278
2279 /* Only for WM_LP. */
2280 static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2281                                    uint32_t pri_val)
2282 {
2283         if (!params->active || !params->pri.enabled)
2284                 return 0;
2285
2286         return ilk_wm_fbc(pri_val,
2287                           params->pri.horiz_pixels,
2288                           params->pri.bytes_per_pixel);
2289 }
2290
2291 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2292 {
2293         if (INTEL_INFO(dev)->gen >= 7)
2294                 return 768;
2295         else
2296                 return 512;
2297 }
2298
2299 /* Calculate the maximum primary/sprite plane watermark */
2300 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2301                                      int level,
2302                                      const struct intel_wm_config *config,
2303                                      enum intel_ddb_partitioning ddb_partitioning,
2304                                      bool is_sprite)
2305 {
2306         unsigned int fifo_size = ilk_display_fifo_size(dev);
2307         unsigned int max;
2308
2309         /* if sprites aren't enabled, sprites get nothing */
2310         if (is_sprite && !config->sprites_enabled)
2311                 return 0;
2312
2313         /* HSW allows LP1+ watermarks even with multiple pipes */
2314         if (level == 0 || config->num_pipes_active > 1) {
2315                 fifo_size /= INTEL_INFO(dev)->num_pipes;
2316
2317                 /*
2318                  * For some reason the non self refresh
2319                  * FIFO size is only half of the self
2320                  * refresh FIFO size on ILK/SNB.
2321                  */
2322                 if (INTEL_INFO(dev)->gen <= 6)
2323                         fifo_size /= 2;
2324         }
2325
2326         if (config->sprites_enabled) {
2327                 /* level 0 is always calculated with 1:1 split */
2328                 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2329                         if (is_sprite)
2330                                 fifo_size *= 5;
2331                         fifo_size /= 6;
2332                 } else {
2333                         fifo_size /= 2;
2334                 }
2335         }
2336
2337         /* clamp to max that the registers can hold */
2338         if (INTEL_INFO(dev)->gen >= 7)
2339                 /* IVB/HSW primary/sprite plane watermarks */
2340                 max = level == 0 ? 127 : 1023;
2341         else if (!is_sprite)
2342                 /* ILK/SNB primary plane watermarks */
2343                 max = level == 0 ? 127 : 511;
2344         else
2345                 /* ILK/SNB sprite plane watermarks */
2346                 max = level == 0 ? 63 : 255;
2347
2348         return min(fifo_size, max);
2349 }
2350
2351 /* Calculate the maximum cursor plane watermark */
2352 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2353                                       int level,
2354                                       const struct intel_wm_config *config)
2355 {
2356         /* HSW LP1+ watermarks w/ multiple pipes */
2357         if (level > 0 && config->num_pipes_active > 1)
2358                 return 64;
2359
2360         /* otherwise just report max that registers can hold */
2361         if (INTEL_INFO(dev)->gen >= 7)
2362                 return level == 0 ? 63 : 255;
2363         else
2364                 return level == 0 ? 31 : 63;
2365 }
2366
2367 /* Calculate the maximum FBC watermark */
2368 static unsigned int ilk_fbc_wm_max(void)
2369 {
2370         /* max that registers can hold */
2371         return 15;
2372 }
2373
2374 static void ilk_wm_max(struct drm_device *dev,
2375                        int level,
2376                        const struct intel_wm_config *config,
2377                        enum intel_ddb_partitioning ddb_partitioning,
2378                        struct hsw_wm_maximums *max)
2379 {
2380         max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2381         max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2382         max->cur = ilk_cursor_wm_max(dev, level, config);
2383         max->fbc = ilk_fbc_wm_max();
2384 }
2385
2386 static bool ilk_check_wm(int level,
2387                          const struct hsw_wm_maximums *max,
2388                          struct intel_wm_level *result)
2389 {
2390         bool ret;
2391
2392         /* already determined to be invalid? */
2393         if (!result->enable)
2394                 return false;
2395
2396         result->enable = result->pri_val <= max->pri &&
2397                          result->spr_val <= max->spr &&
2398                          result->cur_val <= max->cur;
2399
2400         ret = result->enable;
2401
2402         /*
2403          * HACK until we can pre-compute everything,
2404          * and thus fail gracefully if LP0 watermarks
2405          * are exceeded...
2406          */
2407         if (level == 0 && !result->enable) {
2408                 if (result->pri_val > max->pri)
2409                         DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2410                                       level, result->pri_val, max->pri);
2411                 if (result->spr_val > max->spr)
2412                         DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2413                                       level, result->spr_val, max->spr);
2414                 if (result->cur_val > max->cur)
2415                         DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2416                                       level, result->cur_val, max->cur);
2417
2418                 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2419                 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2420                 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2421                 result->enable = true;
2422         }
2423
2424         DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2425
2426         return ret;
2427 }
2428
2429 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2430                                  int level,
2431                                  const struct hsw_pipe_wm_parameters *p,
2432                                  struct intel_wm_level *result)
2433 {
2434         uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2435         uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2436         uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2437
2438         /* WM1+ latency values stored in 0.5us units */
2439         if (level > 0) {
2440                 pri_latency *= 5;
2441                 spr_latency *= 5;
2442                 cur_latency *= 5;
2443         }
2444
2445         result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2446         result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2447         result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2448         result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2449         result->enable = true;
2450 }
2451
2452 static uint32_t
2453 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2454 {
2455         struct drm_i915_private *dev_priv = dev->dev_private;
2456         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2457         struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2458         u32 linetime, ips_linetime;
2459
2460         if (!intel_crtc_active(crtc))
2461                 return 0;
2462
2463         /* The WM are computed with base on how long it takes to fill a single
2464          * row at the given clock rate, multiplied by 8.
2465          * */
2466         linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2467         ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2468                                          intel_ddi_get_cdclk_freq(dev_priv));
2469
2470         return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2471                PIPE_WM_LINETIME_TIME(linetime);
2472 }
2473
2474 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2475 {
2476         struct drm_i915_private *dev_priv = dev->dev_private;
2477
2478         if (IS_HASWELL(dev)) {
2479                 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2480
2481                 wm[0] = (sskpd >> 56) & 0xFF;
2482                 if (wm[0] == 0)
2483                         wm[0] = sskpd & 0xF;
2484                 wm[1] = (sskpd >> 4) & 0xFF;
2485                 wm[2] = (sskpd >> 12) & 0xFF;
2486                 wm[3] = (sskpd >> 20) & 0x1FF;
2487                 wm[4] = (sskpd >> 32) & 0x1FF;
2488         } else if (INTEL_INFO(dev)->gen >= 6) {
2489                 uint32_t sskpd = I915_READ(MCH_SSKPD);
2490
2491                 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2492                 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2493                 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2494                 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2495         } else if (INTEL_INFO(dev)->gen >= 5) {
2496                 uint32_t mltr = I915_READ(MLTR_ILK);
2497
2498                 /* ILK primary LP0 latency is 700 ns */
2499                 wm[0] = 7;
2500                 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2501                 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2502         }
2503 }
2504
2505 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2506 {
2507         /* ILK sprite LP0 latency is 1300 ns */
2508         if (INTEL_INFO(dev)->gen == 5)
2509                 wm[0] = 13;
2510 }
2511
2512 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2513 {
2514         /* ILK cursor LP0 latency is 1300 ns */
2515         if (INTEL_INFO(dev)->gen == 5)
2516                 wm[0] = 13;
2517
2518         /* WaDoubleCursorLP3Latency:ivb */
2519         if (IS_IVYBRIDGE(dev))
2520                 wm[3] *= 2;
2521 }
2522
2523 static int ilk_wm_max_level(const struct drm_device *dev)
2524 {
2525         /* how many WM levels are we expecting */
2526         if (IS_HASWELL(dev))
2527                 return 4;
2528         else if (INTEL_INFO(dev)->gen >= 6)
2529                 return 3;
2530         else
2531                 return 2;
2532 }
2533
2534 static void intel_print_wm_latency(struct drm_device *dev,
2535                                    const char *name,
2536                                    const uint16_t wm[5])
2537 {
2538         int level, max_level = ilk_wm_max_level(dev);
2539
2540         for (level = 0; level <= max_level; level++) {
2541                 unsigned int latency = wm[level];
2542
2543                 if (latency == 0) {
2544                         DRM_ERROR("%s WM%d latency not provided\n",
2545                                   name, level);
2546                         continue;
2547                 }
2548
2549                 /* WM1+ latency values in 0.5us units */
2550                 if (level > 0)
2551                         latency *= 5;
2552
2553                 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2554                               name, level, wm[level],
2555                               latency / 10, latency % 10);
2556         }
2557 }
2558
2559 static void intel_setup_wm_latency(struct drm_device *dev)
2560 {
2561         struct drm_i915_private *dev_priv = dev->dev_private;
2562
2563         intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2564
2565         memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2566                sizeof(dev_priv->wm.pri_latency));
2567         memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2568                sizeof(dev_priv->wm.pri_latency));
2569
2570         intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2571         intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2572
2573         intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2574         intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2575         intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2576 }
2577
2578 static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2579                                       struct hsw_pipe_wm_parameters *p,
2580                                       struct intel_wm_config *config)
2581 {
2582         struct drm_device *dev = crtc->dev;
2583         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2584         enum pipe pipe = intel_crtc->pipe;
2585         struct drm_plane *plane;
2586
2587         p->active = intel_crtc_active(crtc);
2588         if (p->active) {
2589                 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2590                 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2591                 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2592                 p->cur.bytes_per_pixel = 4;
2593                 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2594                 p->cur.horiz_pixels = 64;
2595                 /* TODO: for now, assume primary and cursor planes are always enabled. */
2596                 p->pri.enabled = true;
2597                 p->cur.enabled = true;
2598         }
2599
2600         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2601                 config->num_pipes_active += intel_crtc_active(crtc);
2602
2603         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2604                 struct intel_plane *intel_plane = to_intel_plane(plane);
2605
2606                 if (intel_plane->pipe == pipe)
2607                         p->spr = intel_plane->wm;
2608
2609                 config->sprites_enabled |= intel_plane->wm.enabled;
2610                 config->sprites_scaled |= intel_plane->wm.scaled;
2611         }
2612 }
2613
2614 /* Compute new watermarks for the pipe */
2615 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2616                                   const struct hsw_pipe_wm_parameters *params,
2617                                   struct intel_pipe_wm *pipe_wm)
2618 {
2619         struct drm_device *dev = crtc->dev;
2620         struct drm_i915_private *dev_priv = dev->dev_private;
2621         int level, max_level = ilk_wm_max_level(dev);
2622         /* LP0 watermark maximums depend on this pipe alone */
2623         struct intel_wm_config config = {
2624                 .num_pipes_active = 1,
2625                 .sprites_enabled = params->spr.enabled,
2626                 .sprites_scaled = params->spr.scaled,
2627         };
2628         struct hsw_wm_maximums max;
2629
2630         /* LP0 watermarks always use 1/2 DDB partitioning */
2631         ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2632
2633         for (level = 0; level <= max_level; level++)
2634                 ilk_compute_wm_level(dev_priv, level, params,
2635                                      &pipe_wm->wm[level]);
2636
2637         pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2638
2639         /* At least LP0 must be valid */
2640         return ilk_check_wm(0, &max, &pipe_wm->wm[0]);
2641 }
2642
2643 /*
2644  * Merge the watermarks from all active pipes for a specific level.
2645  */
2646 static void ilk_merge_wm_level(struct drm_device *dev,
2647                                int level,
2648                                struct intel_wm_level *ret_wm)
2649 {
2650         const struct intel_crtc *intel_crtc;
2651
2652         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2653                 const struct intel_wm_level *wm =
2654                         &intel_crtc->wm.active.wm[level];
2655
2656                 if (!wm->enable)
2657                         return;
2658
2659                 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2660                 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2661                 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2662                 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2663         }
2664
2665         ret_wm->enable = true;
2666 }
2667
2668 /*
2669  * Merge all low power watermarks for all active pipes.
2670  */
2671 static void ilk_wm_merge(struct drm_device *dev,
2672                          const struct hsw_wm_maximums *max,
2673                          struct intel_pipe_wm *merged)
2674 {
2675         int level, max_level = ilk_wm_max_level(dev);
2676
2677         merged->fbc_wm_enabled = true;
2678
2679         /* merge each WM1+ level */
2680         for (level = 1; level <= max_level; level++) {
2681                 struct intel_wm_level *wm = &merged->wm[level];
2682
2683                 ilk_merge_wm_level(dev, level, wm);
2684
2685                 if (!ilk_check_wm(level, max, wm))
2686                         break;
2687
2688                 /*
2689                  * The spec says it is preferred to disable
2690                  * FBC WMs instead of disabling a WM level.
2691                  */
2692                 if (wm->fbc_val > max->fbc) {
2693                         merged->fbc_wm_enabled = false;
2694                         wm->fbc_val = 0;
2695                 }
2696         }
2697 }
2698
2699 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2700 {
2701         /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2702         return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2703 }
2704
2705 static void hsw_compute_wm_results(struct drm_device *dev,
2706                                    const struct intel_pipe_wm *merged,
2707                                    enum intel_ddb_partitioning partitioning,
2708                                    struct hsw_wm_values *results)
2709 {
2710         struct intel_crtc *intel_crtc;
2711         int level, wm_lp;
2712
2713         results->enable_fbc_wm = merged->fbc_wm_enabled;
2714         results->partitioning = partitioning;
2715
2716         /* LP1+ register values */
2717         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2718                 const struct intel_wm_level *r;
2719
2720                 level = ilk_wm_lp_to_level(wm_lp, merged);
2721
2722                 r = &merged->wm[level];
2723                 if (!r->enable)
2724                         break;
2725
2726                 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2727                                                           r->fbc_val,
2728                                                           r->pri_val,
2729                                                           r->cur_val);
2730                 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2731         }
2732
2733         /* LP0 register values */
2734         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2735                 enum pipe pipe = intel_crtc->pipe;
2736                 const struct intel_wm_level *r =
2737                         &intel_crtc->wm.active.wm[0];
2738
2739                 if (WARN_ON(!r->enable))
2740                         continue;
2741
2742                 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2743
2744                 results->wm_pipe[pipe] =
2745                         (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2746                         (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2747                         r->cur_val;
2748         }
2749 }
2750
2751 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2752  * case both are at the same level. Prefer r1 in case they're the same. */
2753 static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2754                                                   struct intel_pipe_wm *r1,
2755                                                   struct intel_pipe_wm *r2)
2756 {
2757         int level, max_level = ilk_wm_max_level(dev);
2758         int level1 = 0, level2 = 0;
2759
2760         for (level = 1; level <= max_level; level++) {
2761                 if (r1->wm[level].enable)
2762                         level1 = level;
2763                 if (r2->wm[level].enable)
2764                         level2 = level;
2765         }
2766
2767         if (level1 == level2) {
2768                 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2769                         return r2;
2770                 else
2771                         return r1;
2772         } else if (level1 > level2) {
2773                 return r1;
2774         } else {
2775                 return r2;
2776         }
2777 }
2778
2779 /* dirty bits used to track which watermarks need changes */
2780 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2781 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2782 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2783 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2784 #define WM_DIRTY_FBC (1 << 24)
2785 #define WM_DIRTY_DDB (1 << 25)
2786
2787 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2788                                          const struct hsw_wm_values *old,
2789                                          const struct hsw_wm_values *new)
2790 {
2791         unsigned int dirty = 0;
2792         enum pipe pipe;
2793         int wm_lp;
2794
2795         for_each_pipe(pipe) {
2796                 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2797                         dirty |= WM_DIRTY_LINETIME(pipe);
2798                         /* Must disable LP1+ watermarks too */
2799                         dirty |= WM_DIRTY_LP_ALL;
2800                 }
2801
2802                 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2803                         dirty |= WM_DIRTY_PIPE(pipe);
2804                         /* Must disable LP1+ watermarks too */
2805                         dirty |= WM_DIRTY_LP_ALL;
2806                 }
2807         }
2808
2809         if (old->enable_fbc_wm != new->enable_fbc_wm) {
2810                 dirty |= WM_DIRTY_FBC;
2811                 /* Must disable LP1+ watermarks too */
2812                 dirty |= WM_DIRTY_LP_ALL;
2813         }
2814
2815         if (old->partitioning != new->partitioning) {
2816                 dirty |= WM_DIRTY_DDB;
2817                 /* Must disable LP1+ watermarks too */
2818                 dirty |= WM_DIRTY_LP_ALL;
2819         }
2820
2821         /* LP1+ watermarks already deemed dirty, no need to continue */
2822         if (dirty & WM_DIRTY_LP_ALL)
2823                 return dirty;
2824
2825         /* Find the lowest numbered LP1+ watermark in need of an update... */
2826         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2827                 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2828                     old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2829                         break;
2830         }
2831
2832         /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2833         for (; wm_lp <= 3; wm_lp++)
2834                 dirty |= WM_DIRTY_LP(wm_lp);
2835
2836         return dirty;
2837 }
2838
2839 /*
2840  * The spec says we shouldn't write when we don't need, because every write
2841  * causes WMs to be re-evaluated, expending some power.
2842  */
2843 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2844                                 struct hsw_wm_values *results)
2845 {
2846         struct hsw_wm_values previous;
2847         unsigned int dirty;
2848         uint32_t val;
2849
2850         previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2851         previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2852         previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2853         previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2854         previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2855         previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2856         previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2857         previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2858         previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2859         previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2860         previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2861         previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2862
2863         previous.partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2864                                 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2865
2866         previous.enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2867
2868         dirty = ilk_compute_wm_dirty(dev_priv->dev, &previous, results);
2869         if (!dirty)
2870                 return;
2871
2872         if (dirty & WM_DIRTY_LP(3) && previous.wm_lp[2] != 0)
2873                 I915_WRITE(WM3_LP_ILK, 0);
2874         if (dirty & WM_DIRTY_LP(2) && previous.wm_lp[1] != 0)
2875                 I915_WRITE(WM2_LP_ILK, 0);
2876         if (dirty & WM_DIRTY_LP(1) && previous.wm_lp[0] != 0)
2877                 I915_WRITE(WM1_LP_ILK, 0);
2878
2879         if (dirty & WM_DIRTY_PIPE(PIPE_A))
2880                 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2881         if (dirty & WM_DIRTY_PIPE(PIPE_B))
2882                 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2883         if (dirty & WM_DIRTY_PIPE(PIPE_C))
2884                 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2885
2886         if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2887                 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2888         if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2889                 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2890         if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2891                 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2892
2893         if (dirty & WM_DIRTY_DDB) {
2894                 val = I915_READ(WM_MISC);
2895                 if (results->partitioning == INTEL_DDB_PART_1_2)
2896                         val &= ~WM_MISC_DATA_PARTITION_5_6;
2897                 else
2898                         val |= WM_MISC_DATA_PARTITION_5_6;
2899                 I915_WRITE(WM_MISC, val);
2900         }
2901
2902         if (dirty & WM_DIRTY_FBC) {
2903                 val = I915_READ(DISP_ARB_CTL);
2904                 if (results->enable_fbc_wm)
2905                         val &= ~DISP_FBC_WM_DIS;
2906                 else
2907                         val |= DISP_FBC_WM_DIS;
2908                 I915_WRITE(DISP_ARB_CTL, val);
2909         }
2910
2911         if (dirty & WM_DIRTY_LP(1) && previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2912                 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2913         if (dirty & WM_DIRTY_LP(2) && previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2914                 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2915         if (dirty & WM_DIRTY_LP(3) && previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2916                 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2917
2918         if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
2919                 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2920         if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
2921                 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2922         if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
2923                 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2924
2925         dev_priv->wm.hw = *results;
2926 }
2927
2928 static void haswell_update_wm(struct drm_crtc *crtc)
2929 {
2930         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2931         struct drm_device *dev = crtc->dev;
2932         struct drm_i915_private *dev_priv = dev->dev_private;
2933         struct hsw_wm_maximums max;
2934         struct hsw_pipe_wm_parameters params = {};
2935         struct hsw_wm_values results = {};
2936         enum intel_ddb_partitioning partitioning;
2937         struct intel_pipe_wm pipe_wm = {};
2938         struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2939         struct intel_wm_config config = {};
2940
2941         hsw_compute_wm_parameters(crtc, &params, &config);
2942
2943         intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2944
2945         if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2946                 return;
2947
2948         intel_crtc->wm.active = pipe_wm;
2949
2950         ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2951         ilk_wm_merge(dev, &max, &lp_wm_1_2);
2952
2953         /* 5/6 split only in single pipe config on IVB+ */
2954         if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active == 1) {
2955                 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2956                 ilk_wm_merge(dev, &max, &lp_wm_5_6);
2957
2958                 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2959         } else {
2960                 best_lp_wm = &lp_wm_1_2;
2961         }
2962
2963         partitioning = (best_lp_wm == &lp_wm_1_2) ?
2964                        INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2965
2966         hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2967
2968         hsw_write_wm_values(dev_priv, &results);
2969 }
2970
2971 static void haswell_update_sprite_wm(struct drm_plane *plane,
2972                                      struct drm_crtc *crtc,
2973                                      uint32_t sprite_width, int pixel_size,
2974                                      bool enabled, bool scaled)
2975 {
2976         struct intel_plane *intel_plane = to_intel_plane(plane);
2977
2978         intel_plane->wm.enabled = enabled;
2979         intel_plane->wm.scaled = scaled;
2980         intel_plane->wm.horiz_pixels = sprite_width;
2981         intel_plane->wm.bytes_per_pixel = pixel_size;
2982
2983         haswell_update_wm(crtc);
2984 }
2985
2986 static bool
2987 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2988                               uint32_t sprite_width, int pixel_size,
2989                               const struct intel_watermark_params *display,
2990                               int display_latency_ns, int *sprite_wm)
2991 {
2992         struct drm_crtc *crtc;
2993         int clock;
2994         int entries, tlb_miss;
2995
2996         crtc = intel_get_crtc_for_plane(dev, plane);
2997         if (!intel_crtc_active(crtc)) {
2998                 *sprite_wm = display->guard_size;
2999                 return false;
3000         }
3001
3002         clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3003
3004         /* Use the small buffer method to calculate the sprite watermark */
3005         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3006         tlb_miss = display->fifo_size*display->cacheline_size -
3007                 sprite_width * 8;
3008         if (tlb_miss > 0)
3009                 entries += tlb_miss;
3010         entries = DIV_ROUND_UP(entries, display->cacheline_size);
3011         *sprite_wm = entries + display->guard_size;
3012         if (*sprite_wm > (int)display->max_wm)
3013                 *sprite_wm = display->max_wm;
3014
3015         return true;
3016 }
3017
3018 static bool
3019 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
3020                                 uint32_t sprite_width, int pixel_size,
3021                                 const struct intel_watermark_params *display,
3022                                 int latency_ns, int *sprite_wm)
3023 {
3024         struct drm_crtc *crtc;
3025         unsigned long line_time_us;
3026         int clock;
3027         int line_count, line_size;
3028         int small, large;
3029         int entries;
3030
3031         if (!latency_ns) {
3032                 *sprite_wm = 0;
3033                 return false;
3034         }
3035
3036         crtc = intel_get_crtc_for_plane(dev, plane);
3037         clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3038         if (!clock) {
3039                 *sprite_wm = 0;
3040                 return false;
3041         }
3042
3043         line_time_us = (sprite_width * 1000) / clock;
3044         if (!line_time_us) {
3045                 *sprite_wm = 0;
3046                 return false;
3047         }
3048
3049         line_count = (latency_ns / line_time_us + 1000) / 1000;
3050         line_size = sprite_width * pixel_size;
3051
3052         /* Use the minimum of the small and large buffer method for primary */
3053         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3054         large = line_count * line_size;
3055
3056         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3057         *sprite_wm = entries + display->guard_size;
3058
3059         return *sprite_wm > 0x3ff ? false : true;
3060 }
3061
3062 static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3063                                          struct drm_crtc *crtc,
3064                                          uint32_t sprite_width, int pixel_size,
3065                                          bool enabled, bool scaled)
3066 {
3067         struct drm_device *dev = plane->dev;
3068         struct drm_i915_private *dev_priv = dev->dev_private;
3069         int pipe = to_intel_plane(plane)->pipe;
3070         int latency = dev_priv->wm.spr_latency[0] * 100;        /* In unit 0.1us */
3071         u32 val;
3072         int sprite_wm, reg;
3073         int ret;
3074
3075         if (!enabled)
3076                 return;
3077
3078         switch (pipe) {
3079         case 0:
3080                 reg = WM0_PIPEA_ILK;
3081                 break;
3082         case 1:
3083                 reg = WM0_PIPEB_ILK;
3084                 break;
3085         case 2:
3086                 reg = WM0_PIPEC_IVB;
3087                 break;
3088         default:
3089                 return; /* bad pipe */
3090         }
3091
3092         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
3093                                             &sandybridge_display_wm_info,
3094                                             latency, &sprite_wm);
3095         if (!ret) {
3096                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
3097                               pipe_name(pipe));
3098                 return;
3099         }
3100
3101         val = I915_READ(reg);
3102         val &= ~WM0_PIPE_SPRITE_MASK;
3103         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
3104         DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
3105
3106
3107         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3108                                               pixel_size,
3109                                               &sandybridge_display_srwm_info,
3110                                               dev_priv->wm.spr_latency[1] * 500,
3111                                               &sprite_wm);
3112         if (!ret) {
3113                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
3114                               pipe_name(pipe));
3115                 return;
3116         }
3117         I915_WRITE(WM1S_LP_ILK, sprite_wm);
3118
3119         /* Only IVB has two more LP watermarks for sprite */
3120         if (!IS_IVYBRIDGE(dev))
3121                 return;
3122
3123         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3124                                               pixel_size,
3125                                               &sandybridge_display_srwm_info,
3126                                               dev_priv->wm.spr_latency[2] * 500,
3127                                               &sprite_wm);
3128         if (!ret) {
3129                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
3130                               pipe_name(pipe));
3131                 return;
3132         }
3133         I915_WRITE(WM2S_LP_IVB, sprite_wm);
3134
3135         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3136                                               pixel_size,
3137                                               &sandybridge_display_srwm_info,
3138                                               dev_priv->wm.spr_latency[3] * 500,
3139                                               &sprite_wm);
3140         if (!ret) {
3141                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
3142                               pipe_name(pipe));
3143                 return;
3144         }
3145         I915_WRITE(WM3S_LP_IVB, sprite_wm);
3146 }
3147
3148 /**
3149  * intel_update_watermarks - update FIFO watermark values based on current modes
3150  *
3151  * Calculate watermark values for the various WM regs based on current mode
3152  * and plane configuration.
3153  *
3154  * There are several cases to deal with here:
3155  *   - normal (i.e. non-self-refresh)
3156  *   - self-refresh (SR) mode
3157  *   - lines are large relative to FIFO size (buffer can hold up to 2)
3158  *   - lines are small relative to FIFO size (buffer can hold more than 2
3159  *     lines), so need to account for TLB latency
3160  *
3161  *   The normal calculation is:
3162  *     watermark = dotclock * bytes per pixel * latency
3163  *   where latency is platform & configuration dependent (we assume pessimal
3164  *   values here).
3165  *
3166  *   The SR calculation is:
3167  *     watermark = (trunc(latency/line time)+1) * surface width *
3168  *       bytes per pixel
3169  *   where
3170  *     line time = htotal / dotclock
3171  *     surface width = hdisplay for normal plane and 64 for cursor
3172  *   and latency is assumed to be high, as above.
3173  *
3174  * The final value programmed to the register should always be rounded up,
3175  * and include an extra 2 entries to account for clock crossings.
3176  *
3177  * We don't use the sprite, so we can ignore that.  And on Crestline we have
3178  * to set the non-SR watermarks to 8.
3179  */
3180 void intel_update_watermarks(struct drm_crtc *crtc)
3181 {
3182         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3183
3184         if (dev_priv->display.update_wm)
3185                 dev_priv->display.update_wm(crtc);
3186 }
3187
3188 void intel_update_sprite_watermarks(struct drm_plane *plane,
3189                                     struct drm_crtc *crtc,
3190                                     uint32_t sprite_width, int pixel_size,
3191                                     bool enabled, bool scaled)
3192 {
3193         struct drm_i915_private *dev_priv = plane->dev->dev_private;
3194
3195         if (dev_priv->display.update_sprite_wm)
3196                 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
3197                                                    pixel_size, enabled, scaled);
3198 }
3199
3200 static struct drm_i915_gem_object *
3201 intel_alloc_context_page(struct drm_device *dev)
3202 {
3203         struct drm_i915_gem_object *ctx;
3204         int ret;
3205
3206         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3207
3208         ctx = i915_gem_alloc_object(dev, 4096);
3209         if (!ctx) {
3210                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3211                 return NULL;
3212         }
3213
3214         ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
3215         if (ret) {
3216                 DRM_ERROR("failed to pin power context: %d\n", ret);
3217                 goto err_unref;
3218         }
3219
3220         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3221         if (ret) {
3222                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3223                 goto err_unpin;
3224         }
3225
3226         return ctx;
3227
3228 err_unpin:
3229         i915_gem_object_unpin(ctx);
3230 err_unref:
3231         drm_gem_object_unreference(&ctx->base);
3232         return NULL;
3233 }
3234
3235 /**
3236  * Lock protecting IPS related data structures
3237  */
3238 DEFINE_SPINLOCK(mchdev_lock);
3239
3240 /* Global for IPS driver to get at the current i915 device. Protected by
3241  * mchdev_lock. */
3242 static struct drm_i915_private *i915_mch_dev;
3243
3244 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3245 {
3246         struct drm_i915_private *dev_priv = dev->dev_private;
3247         u16 rgvswctl;
3248
3249         assert_spin_locked(&mchdev_lock);
3250
3251         rgvswctl = I915_READ16(MEMSWCTL);
3252         if (rgvswctl & MEMCTL_CMD_STS) {
3253                 DRM_DEBUG("gpu busy, RCS change rejected\n");
3254                 return false; /* still busy with another command */
3255         }
3256
3257         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3258                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3259         I915_WRITE16(MEMSWCTL, rgvswctl);
3260         POSTING_READ16(MEMSWCTL);
3261
3262         rgvswctl |= MEMCTL_CMD_STS;
3263         I915_WRITE16(MEMSWCTL, rgvswctl);
3264
3265         return true;
3266 }
3267
3268 static void ironlake_enable_drps(struct drm_device *dev)
3269 {
3270         struct drm_i915_private *dev_priv = dev->dev_private;
3271         u32 rgvmodectl = I915_READ(MEMMODECTL);
3272         u8 fmax, fmin, fstart, vstart;
3273
3274         spin_lock_irq(&mchdev_lock);
3275
3276         /* Enable temp reporting */
3277         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3278         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3279
3280         /* 100ms RC evaluation intervals */
3281         I915_WRITE(RCUPEI, 100000);
3282         I915_WRITE(RCDNEI, 100000);
3283
3284         /* Set max/min thresholds to 90ms and 80ms respectively */
3285         I915_WRITE(RCBMAXAVG, 90000);
3286         I915_WRITE(RCBMINAVG, 80000);
3287
3288         I915_WRITE(MEMIHYST, 1);
3289
3290         /* Set up min, max, and cur for interrupt handling */
3291         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3292         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3293         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3294                 MEMMODE_FSTART_SHIFT;
3295
3296         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3297                 PXVFREQ_PX_SHIFT;
3298
3299         dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3300         dev_priv->ips.fstart = fstart;
3301
3302         dev_priv->ips.max_delay = fstart;
3303         dev_priv->ips.min_delay = fmin;
3304         dev_priv->ips.cur_delay = fstart;
3305
3306         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3307                          fmax, fmin, fstart);
3308
3309         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3310
3311         /*
3312          * Interrupts will be enabled in ironlake_irq_postinstall
3313          */
3314
3315         I915_WRITE(VIDSTART, vstart);
3316         POSTING_READ(VIDSTART);
3317
3318         rgvmodectl |= MEMMODE_SWMODE_EN;
3319         I915_WRITE(MEMMODECTL, rgvmodectl);
3320
3321         if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3322                 DRM_ERROR("stuck trying to change perf mode\n");
3323         mdelay(1);
3324
3325         ironlake_set_drps(dev, fstart);
3326
3327         dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3328                 I915_READ(0x112e0);
3329         dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3330         dev_priv->ips.last_count2 = I915_READ(0x112f4);
3331         getrawmonotonic(&dev_priv->ips.last_time2);
3332
3333         spin_unlock_irq(&mchdev_lock);
3334 }
3335
3336 static void ironlake_disable_drps(struct drm_device *dev)
3337 {
3338         struct drm_i915_private *dev_priv = dev->dev_private;
3339         u16 rgvswctl;
3340
3341         spin_lock_irq(&mchdev_lock);
3342
3343         rgvswctl = I915_READ16(MEMSWCTL);
3344
3345         /* Ack interrupts, disable EFC interrupt */
3346         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3347         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3348         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3349         I915_WRITE(DEIIR, DE_PCU_EVENT);
3350         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3351
3352         /* Go back to the starting frequency */
3353         ironlake_set_drps(dev, dev_priv->ips.fstart);
3354         mdelay(1);
3355         rgvswctl |= MEMCTL_CMD_STS;
3356         I915_WRITE(MEMSWCTL, rgvswctl);
3357         mdelay(1);
3358
3359         spin_unlock_irq(&mchdev_lock);
3360 }
3361
3362 /* There's a funny hw issue where the hw returns all 0 when reading from
3363  * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3364  * ourselves, instead of doing a rmw cycle (which might result in us clearing
3365  * all limits and the gpu stuck at whatever frequency it is at atm).
3366  */
3367 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3368 {
3369         u32 limits;
3370
3371         limits = 0;
3372
3373         if (*val >= dev_priv->rps.max_delay)
3374                 *val = dev_priv->rps.max_delay;
3375         limits |= dev_priv->rps.max_delay << 24;
3376
3377         /* Only set the down limit when we've reached the lowest level to avoid
3378          * getting more interrupts, otherwise leave this clear. This prevents a
3379          * race in the hw when coming out of rc6: There's a tiny window where
3380          * the hw runs at the minimal clock before selecting the desired
3381          * frequency, if the down threshold expires in that window we will not
3382          * receive a down interrupt. */
3383         if (*val <= dev_priv->rps.min_delay) {
3384                 *val = dev_priv->rps.min_delay;
3385                 limits |= dev_priv->rps.min_delay << 16;
3386         }
3387
3388         return limits;
3389 }
3390
3391 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3392 {
3393         int new_power;
3394
3395         new_power = dev_priv->rps.power;
3396         switch (dev_priv->rps.power) {
3397         case LOW_POWER:
3398                 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3399                         new_power = BETWEEN;
3400                 break;
3401
3402         case BETWEEN:
3403                 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3404                         new_power = LOW_POWER;
3405                 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3406                         new_power = HIGH_POWER;
3407                 break;
3408
3409         case HIGH_POWER:
3410                 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3411                         new_power = BETWEEN;
3412                 break;
3413         }
3414         /* Max/min bins are special */
3415         if (val == dev_priv->rps.min_delay)
3416                 new_power = LOW_POWER;
3417         if (val == dev_priv->rps.max_delay)
3418                 new_power = HIGH_POWER;
3419         if (new_power == dev_priv->rps.power)
3420                 return;
3421
3422         /* Note the units here are not exactly 1us, but 1280ns. */
3423         switch (new_power) {
3424         case LOW_POWER:
3425                 /* Upclock if more than 95% busy over 16ms */
3426                 I915_WRITE(GEN6_RP_UP_EI, 12500);
3427                 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3428
3429                 /* Downclock if less than 85% busy over 32ms */
3430                 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3431                 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3432
3433                 I915_WRITE(GEN6_RP_CONTROL,
3434                            GEN6_RP_MEDIA_TURBO |
3435                            GEN6_RP_MEDIA_HW_NORMAL_MODE |
3436                            GEN6_RP_MEDIA_IS_GFX |
3437                            GEN6_RP_ENABLE |
3438                            GEN6_RP_UP_BUSY_AVG |
3439                            GEN6_RP_DOWN_IDLE_AVG);
3440                 break;
3441
3442         case BETWEEN:
3443                 /* Upclock if more than 90% busy over 13ms */
3444                 I915_WRITE(GEN6_RP_UP_EI, 10250);
3445                 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3446
3447                 /* Downclock if less than 75% busy over 32ms */
3448                 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3449                 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3450
3451                 I915_WRITE(GEN6_RP_CONTROL,
3452                            GEN6_RP_MEDIA_TURBO |
3453                            GEN6_RP_MEDIA_HW_NORMAL_MODE |
3454                            GEN6_RP_MEDIA_IS_GFX |
3455                            GEN6_RP_ENABLE |
3456                            GEN6_RP_UP_BUSY_AVG |
3457                            GEN6_RP_DOWN_IDLE_AVG);
3458                 break;
3459
3460         case HIGH_POWER:
3461                 /* Upclock if more than 85% busy over 10ms */
3462                 I915_WRITE(GEN6_RP_UP_EI, 8000);
3463                 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3464
3465                 /* Downclock if less than 60% busy over 32ms */
3466                 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3467                 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3468
3469                 I915_WRITE(GEN6_RP_CONTROL,
3470                            GEN6_RP_MEDIA_TURBO |
3471                            GEN6_RP_MEDIA_HW_NORMAL_MODE |
3472                            GEN6_RP_MEDIA_IS_GFX |
3473                            GEN6_RP_ENABLE |
3474                            GEN6_RP_UP_BUSY_AVG |
3475                            GEN6_RP_DOWN_IDLE_AVG);
3476                 break;
3477         }
3478
3479         dev_priv->rps.power = new_power;
3480         dev_priv->rps.last_adj = 0;
3481 }
3482
3483 void gen6_set_rps(struct drm_device *dev, u8 val)
3484 {
3485         struct drm_i915_private *dev_priv = dev->dev_private;
3486         u32 limits = gen6_rps_limits(dev_priv, &val);
3487
3488         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3489         WARN_ON(val > dev_priv->rps.max_delay);
3490         WARN_ON(val < dev_priv->rps.min_delay);
3491
3492         if (val == dev_priv->rps.cur_delay)
3493                 return;
3494
3495         gen6_set_rps_thresholds(dev_priv, val);
3496
3497         if (IS_HASWELL(dev))
3498                 I915_WRITE(GEN6_RPNSWREQ,
3499                            HSW_FREQUENCY(val));
3500         else
3501                 I915_WRITE(GEN6_RPNSWREQ,
3502                            GEN6_FREQUENCY(val) |
3503                            GEN6_OFFSET(0) |
3504                            GEN6_AGGRESSIVE_TURBO);
3505
3506         /* Make sure we continue to get interrupts
3507          * until we hit the minimum or maximum frequencies.
3508          */
3509         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3510
3511         POSTING_READ(GEN6_RPNSWREQ);
3512
3513         dev_priv->rps.cur_delay = val;
3514
3515         trace_intel_gpu_freq_change(val * 50);
3516 }
3517
3518 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3519 {
3520         mutex_lock(&dev_priv->rps.hw_lock);
3521         if (dev_priv->rps.enabled) {
3522                 if (dev_priv->info->is_valleyview)
3523                         valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3524                 else
3525                         gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3526                 dev_priv->rps.last_adj = 0;
3527         }
3528         mutex_unlock(&dev_priv->rps.hw_lock);
3529 }
3530
3531 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3532 {
3533         mutex_lock(&dev_priv->rps.hw_lock);
3534         if (dev_priv->rps.enabled) {
3535                 if (dev_priv->info->is_valleyview)
3536                         valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3537                 else
3538                         gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3539                 dev_priv->rps.last_adj = 0;
3540         }
3541         mutex_unlock(&dev_priv->rps.hw_lock);
3542 }
3543
3544 /*
3545  * Wait until the previous freq change has completed,
3546  * or the timeout elapsed, and then update our notion
3547  * of the current GPU frequency.
3548  */
3549 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3550 {
3551         u32 pval;
3552
3553         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3554
3555         if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3556                 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3557
3558         pval >>= 8;
3559
3560         if (pval != dev_priv->rps.cur_delay)
3561                 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3562                                  vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3563                                  dev_priv->rps.cur_delay,
3564                                  vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3565
3566         dev_priv->rps.cur_delay = pval;
3567 }
3568
3569 void valleyview_set_rps(struct drm_device *dev, u8 val)
3570 {
3571         struct drm_i915_private *dev_priv = dev->dev_private;
3572
3573         gen6_rps_limits(dev_priv, &val);
3574
3575         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3576         WARN_ON(val > dev_priv->rps.max_delay);
3577         WARN_ON(val < dev_priv->rps.min_delay);
3578
3579         vlv_update_rps_cur_delay(dev_priv);
3580
3581         DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3582                          vlv_gpu_freq(dev_priv->mem_freq,
3583                                       dev_priv->rps.cur_delay),
3584                          dev_priv->rps.cur_delay,
3585                          vlv_gpu_freq(dev_priv->mem_freq, val), val);
3586
3587         if (val == dev_priv->rps.cur_delay)
3588                 return;
3589
3590         vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3591
3592         dev_priv->rps.cur_delay = val;
3593
3594         trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3595 }
3596
3597 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3598 {
3599         struct drm_i915_private *dev_priv = dev->dev_private;
3600
3601         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3602         I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3603         /* Complete PM interrupt masking here doesn't race with the rps work
3604          * item again unmasking PM interrupts because that is using a different
3605          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3606          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3607
3608         spin_lock_irq(&dev_priv->irq_lock);
3609         dev_priv->rps.pm_iir = 0;
3610         spin_unlock_irq(&dev_priv->irq_lock);
3611
3612         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3613 }
3614
3615 static void gen6_disable_rps(struct drm_device *dev)
3616 {
3617         struct drm_i915_private *dev_priv = dev->dev_private;
3618
3619         I915_WRITE(GEN6_RC_CONTROL, 0);
3620         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3621
3622         gen6_disable_rps_interrupts(dev);
3623 }
3624
3625 static void valleyview_disable_rps(struct drm_device *dev)
3626 {
3627         struct drm_i915_private *dev_priv = dev->dev_private;
3628
3629         I915_WRITE(GEN6_RC_CONTROL, 0);
3630
3631         gen6_disable_rps_interrupts(dev);
3632
3633         if (dev_priv->vlv_pctx) {
3634                 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3635                 dev_priv->vlv_pctx = NULL;
3636         }
3637 }
3638
3639 int intel_enable_rc6(const struct drm_device *dev)
3640 {
3641         /* No RC6 before Ironlake */
3642         if (INTEL_INFO(dev)->gen < 5)
3643                 return 0;
3644
3645         /* Respect the kernel parameter if it is set */
3646         if (i915_enable_rc6 >= 0)
3647                 return i915_enable_rc6;
3648
3649         /* Disable RC6 on Ironlake */
3650         if (INTEL_INFO(dev)->gen == 5)
3651                 return 0;
3652
3653         if (IS_HASWELL(dev)) {
3654                 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3655                 return INTEL_RC6_ENABLE;
3656         }
3657
3658         /* snb/ivb have more than one rc6 state. */
3659         if (INTEL_INFO(dev)->gen == 6) {
3660                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3661                 return INTEL_RC6_ENABLE;
3662         }
3663
3664         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3665         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3666 }
3667
3668 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3669 {
3670         struct drm_i915_private *dev_priv = dev->dev_private;
3671         u32 enabled_intrs;
3672
3673         spin_lock_irq(&dev_priv->irq_lock);
3674         WARN_ON(dev_priv->rps.pm_iir);
3675         snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3676         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3677         spin_unlock_irq(&dev_priv->irq_lock);
3678
3679         /* only unmask PM interrupts we need. Mask all others. */
3680         enabled_intrs = GEN6_PM_RPS_EVENTS;
3681
3682         /* IVB and SNB hard hangs on looping batchbuffer
3683          * if GEN6_PM_UP_EI_EXPIRED is masked.
3684          */
3685         if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3686                 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3687
3688         I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3689 }
3690
3691 static void gen6_enable_rps(struct drm_device *dev)
3692 {
3693         struct drm_i915_private *dev_priv = dev->dev_private;
3694         struct intel_ring_buffer *ring;
3695         u32 rp_state_cap;
3696         u32 gt_perf_status;
3697         u32 rc6vids, pcu_mbox, rc6_mask = 0;
3698         u32 gtfifodbg;
3699         int rc6_mode;
3700         int i, ret;
3701
3702         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3703
3704         /* Here begins a magic sequence of register writes to enable
3705          * auto-downclocking.
3706          *
3707          * Perhaps there might be some value in exposing these to
3708          * userspace...
3709          */
3710         I915_WRITE(GEN6_RC_STATE, 0);
3711
3712         /* Clear the DBG now so we don't confuse earlier errors */
3713         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3714                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3715                 I915_WRITE(GTFIFODBG, gtfifodbg);
3716         }
3717
3718         gen6_gt_force_wake_get(dev_priv);
3719
3720         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3721         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3722
3723         /* In units of 50MHz */
3724         dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3725         dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3726         dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
3727         dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
3728         dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3729         dev_priv->rps.cur_delay = 0;
3730
3731         /* disable the counters and set deterministic thresholds */
3732         I915_WRITE(GEN6_RC_CONTROL, 0);
3733
3734         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3735         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3736         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3737         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3738         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3739
3740         for_each_ring(ring, dev_priv, i)
3741                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3742
3743         I915_WRITE(GEN6_RC_SLEEP, 0);
3744         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3745         if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3746                 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3747         else
3748                 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3749         I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3750         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3751
3752         /* Check if we are enabling RC6 */
3753         rc6_mode = intel_enable_rc6(dev_priv->dev);
3754         if (rc6_mode & INTEL_RC6_ENABLE)
3755                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3756
3757         /* We don't use those on Haswell */
3758         if (!IS_HASWELL(dev)) {
3759                 if (rc6_mode & INTEL_RC6p_ENABLE)
3760                         rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3761
3762                 if (rc6_mode & INTEL_RC6pp_ENABLE)
3763                         rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3764         }
3765
3766         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3767                         (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3768                         (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3769                         (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3770
3771         I915_WRITE(GEN6_RC_CONTROL,
3772                    rc6_mask |
3773                    GEN6_RC_CTL_EI_MODE(1) |
3774                    GEN6_RC_CTL_HW_ENABLE);
3775
3776         /* Power down if completely idle for over 50ms */
3777         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3778         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3779
3780         ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3781         if (!ret) {
3782                 pcu_mbox = 0;
3783                 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3784                 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3785                         DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3786                                          (dev_priv->rps.max_delay & 0xff) * 50,
3787                                          (pcu_mbox & 0xff) * 50);
3788                         dev_priv->rps.hw_max = pcu_mbox & 0xff;
3789                 }
3790         } else {
3791                 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3792         }
3793
3794         dev_priv->rps.power = HIGH_POWER; /* force a reset */
3795         gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3796
3797         gen6_enable_rps_interrupts(dev);
3798
3799         rc6vids = 0;
3800         ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3801         if (IS_GEN6(dev) && ret) {
3802                 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3803         } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3804                 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3805                           GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3806                 rc6vids &= 0xffff00;
3807                 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3808                 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3809                 if (ret)
3810                         DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3811         }
3812
3813         gen6_gt_force_wake_put(dev_priv);
3814 }
3815
3816 void gen6_update_ring_freq(struct drm_device *dev)
3817 {
3818         struct drm_i915_private *dev_priv = dev->dev_private;
3819         int min_freq = 15;
3820         unsigned int gpu_freq;
3821         unsigned int max_ia_freq, min_ring_freq;
3822         int scaling_factor = 180;
3823         struct cpufreq_policy *policy;
3824
3825         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3826
3827         policy = cpufreq_cpu_get(0);
3828         if (policy) {
3829                 max_ia_freq = policy->cpuinfo.max_freq;
3830                 cpufreq_cpu_put(policy);
3831         } else {
3832                 /*
3833                  * Default to measured freq if none found, PCU will ensure we
3834                  * don't go over
3835                  */
3836                 max_ia_freq = tsc_khz;
3837         }
3838
3839         /* Convert from kHz to MHz */
3840         max_ia_freq /= 1000;
3841
3842         min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
3843         /* convert DDR frequency from units of 266.6MHz to bandwidth */
3844         min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3845
3846         /*
3847          * For each potential GPU frequency, load a ring frequency we'd like
3848          * to use for memory access.  We do this by specifying the IA frequency
3849          * the PCU should use as a reference to determine the ring frequency.
3850          */
3851         for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3852              gpu_freq--) {
3853                 int diff = dev_priv->rps.max_delay - gpu_freq;
3854                 unsigned int ia_freq = 0, ring_freq = 0;
3855
3856                 if (IS_HASWELL(dev)) {
3857                         ring_freq = mult_frac(gpu_freq, 5, 4);
3858                         ring_freq = max(min_ring_freq, ring_freq);
3859                         /* leave ia_freq as the default, chosen by cpufreq */
3860                 } else {
3861                         /* On older processors, there is no separate ring
3862                          * clock domain, so in order to boost the bandwidth
3863                          * of the ring, we need to upclock the CPU (ia_freq).
3864                          *
3865                          * For GPU frequencies less than 750MHz,
3866                          * just use the lowest ring freq.
3867                          */
3868                         if (gpu_freq < min_freq)
3869                                 ia_freq = 800;
3870                         else
3871                                 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3872                         ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3873                 }
3874
3875                 sandybridge_pcode_write(dev_priv,
3876                                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3877                                         ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3878                                         ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3879                                         gpu_freq);
3880         }
3881 }
3882
3883 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3884 {
3885         u32 val, rp0;
3886
3887         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3888
3889         rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3890         /* Clamp to max */
3891         rp0 = min_t(u32, rp0, 0xea);
3892
3893         return rp0;
3894 }
3895
3896 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3897 {
3898         u32 val, rpe;
3899
3900         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3901         rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3902         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3903         rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3904
3905         return rpe;
3906 }
3907
3908 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3909 {
3910         return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3911 }
3912
3913 static void valleyview_setup_pctx(struct drm_device *dev)
3914 {
3915         struct drm_i915_private *dev_priv = dev->dev_private;
3916         struct drm_i915_gem_object *pctx;
3917         unsigned long pctx_paddr;
3918         u32 pcbr;
3919         int pctx_size = 24*1024;
3920
3921         pcbr = I915_READ(VLV_PCBR);
3922         if (pcbr) {
3923                 /* BIOS set it up already, grab the pre-alloc'd space */
3924                 int pcbr_offset;
3925
3926                 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3927                 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3928                                                                       pcbr_offset,
3929                                                                       I915_GTT_OFFSET_NONE,
3930                                                                       pctx_size);
3931                 goto out;
3932         }
3933
3934         /*
3935          * From the Gunit register HAS:
3936          * The Gfx driver is expected to program this register and ensure
3937          * proper allocation within Gfx stolen memory.  For example, this
3938          * register should be programmed such than the PCBR range does not
3939          * overlap with other ranges, such as the frame buffer, protected
3940          * memory, or any other relevant ranges.
3941          */
3942         pctx = i915_gem_object_create_stolen(dev, pctx_size);
3943         if (!pctx) {
3944                 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3945                 return;
3946         }
3947
3948         pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3949         I915_WRITE(VLV_PCBR, pctx_paddr);
3950
3951 out:
3952         dev_priv->vlv_pctx = pctx;
3953 }
3954
3955 static void valleyview_enable_rps(struct drm_device *dev)
3956 {
3957         struct drm_i915_private *dev_priv = dev->dev_private;
3958         struct intel_ring_buffer *ring;
3959         u32 gtfifodbg, val, rc6_mode = 0;
3960         int i;
3961
3962         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3963
3964         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3965                 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3966                                  gtfifodbg);
3967                 I915_WRITE(GTFIFODBG, gtfifodbg);
3968         }
3969
3970         valleyview_setup_pctx(dev);
3971
3972         gen6_gt_force_wake_get(dev_priv);
3973
3974         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3975         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3976         I915_WRITE(GEN6_RP_UP_EI, 66000);
3977         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3978
3979         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3980
3981         I915_WRITE(GEN6_RP_CONTROL,
3982                    GEN6_RP_MEDIA_TURBO |
3983                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
3984                    GEN6_RP_MEDIA_IS_GFX |
3985                    GEN6_RP_ENABLE |
3986                    GEN6_RP_UP_BUSY_AVG |
3987                    GEN6_RP_DOWN_IDLE_CONT);
3988
3989         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3990         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3991         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3992
3993         for_each_ring(ring, dev_priv, i)
3994                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3995
3996         I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3997
3998         /* allows RC6 residency counter to work */
3999         I915_WRITE(VLV_COUNTER_CONTROL,
4000                    _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4001                                       VLV_MEDIA_RC6_COUNT_EN |
4002                                       VLV_RENDER_RC6_COUNT_EN));
4003         if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4004                 rc6_mode = GEN7_RC_CTL_TO_MODE;
4005         I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4006
4007         val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4008         switch ((val >> 6) & 3) {
4009         case 0:
4010         case 1:
4011                 dev_priv->mem_freq = 800;
4012                 break;
4013         case 2:
4014                 dev_priv->mem_freq = 1066;
4015                 break;
4016         case 3:
4017                 dev_priv->mem_freq = 1333;
4018                 break;
4019         }
4020         DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4021
4022         DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4023         DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4024
4025         dev_priv->rps.cur_delay = (val >> 8) & 0xff;
4026         DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4027                          vlv_gpu_freq(dev_priv->mem_freq,
4028                                       dev_priv->rps.cur_delay),
4029                          dev_priv->rps.cur_delay);
4030
4031         dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
4032         dev_priv->rps.hw_max = dev_priv->rps.max_delay;
4033         DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4034                          vlv_gpu_freq(dev_priv->mem_freq,
4035                                       dev_priv->rps.max_delay),
4036                          dev_priv->rps.max_delay);
4037
4038         dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
4039         DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4040                          vlv_gpu_freq(dev_priv->mem_freq,
4041                                       dev_priv->rps.rpe_delay),
4042                          dev_priv->rps.rpe_delay);
4043
4044         dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
4045         DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4046                          vlv_gpu_freq(dev_priv->mem_freq,
4047                                       dev_priv->rps.min_delay),
4048                          dev_priv->rps.min_delay);
4049
4050         DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4051                          vlv_gpu_freq(dev_priv->mem_freq,
4052                                       dev_priv->rps.rpe_delay),
4053                          dev_priv->rps.rpe_delay);
4054
4055         valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
4056
4057         gen6_enable_rps_interrupts(dev);
4058
4059         gen6_gt_force_wake_put(dev_priv);
4060 }
4061
4062 void ironlake_teardown_rc6(struct drm_device *dev)
4063 {
4064         struct drm_i915_private *dev_priv = dev->dev_private;
4065
4066         if (dev_priv->ips.renderctx) {
4067                 i915_gem_object_unpin(dev_priv->ips.renderctx);
4068                 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4069                 dev_priv->ips.renderctx = NULL;
4070         }
4071
4072         if (dev_priv->ips.pwrctx) {
4073                 i915_gem_object_unpin(dev_priv->ips.pwrctx);
4074                 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4075                 dev_priv->ips.pwrctx = NULL;
4076         }
4077 }
4078
4079 static void ironlake_disable_rc6(struct drm_device *dev)
4080 {
4081         struct drm_i915_private *dev_priv = dev->dev_private;
4082
4083         if (I915_READ(PWRCTXA)) {
4084                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4085                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4086                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4087                          50);
4088
4089                 I915_WRITE(PWRCTXA, 0);
4090                 POSTING_READ(PWRCTXA);
4091
4092                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4093                 POSTING_READ(RSTDBYCTL);
4094         }
4095 }
4096
4097 static int ironlake_setup_rc6(struct drm_device *dev)
4098 {
4099         struct drm_i915_private *dev_priv = dev->dev_private;
4100
4101         if (dev_priv->ips.renderctx == NULL)
4102                 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4103         if (!dev_priv->ips.renderctx)
4104                 return -ENOMEM;
4105
4106         if (dev_priv->ips.pwrctx == NULL)
4107                 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4108         if (!dev_priv->ips.pwrctx) {
4109                 ironlake_teardown_rc6(dev);
4110                 return -ENOMEM;
4111         }
4112
4113         return 0;
4114 }
4115
4116 static void ironlake_enable_rc6(struct drm_device *dev)
4117 {
4118         struct drm_i915_private *dev_priv = dev->dev_private;
4119         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
4120         bool was_interruptible;
4121         int ret;
4122
4123         /* rc6 disabled by default due to repeated reports of hanging during
4124          * boot and resume.
4125          */
4126         if (!intel_enable_rc6(dev))
4127                 return;
4128
4129         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4130
4131         ret = ironlake_setup_rc6(dev);
4132         if (ret)
4133                 return;
4134
4135         was_interruptible = dev_priv->mm.interruptible;
4136         dev_priv->mm.interruptible = false;
4137
4138         /*
4139          * GPU can automatically power down the render unit if given a page
4140          * to save state.
4141          */
4142         ret = intel_ring_begin(ring, 6);
4143         if (ret) {
4144                 ironlake_teardown_rc6(dev);
4145                 dev_priv->mm.interruptible = was_interruptible;
4146                 return;
4147         }
4148
4149         intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4150         intel_ring_emit(ring, MI_SET_CONTEXT);
4151         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4152                         MI_MM_SPACE_GTT |
4153                         MI_SAVE_EXT_STATE_EN |
4154                         MI_RESTORE_EXT_STATE_EN |
4155                         MI_RESTORE_INHIBIT);
4156         intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4157         intel_ring_emit(ring, MI_NOOP);
4158         intel_ring_emit(ring, MI_FLUSH);
4159         intel_ring_advance(ring);
4160
4161         /*
4162          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4163          * does an implicit flush, combined with MI_FLUSH above, it should be
4164          * safe to assume that renderctx is valid
4165          */
4166         ret = intel_ring_idle(ring);
4167         dev_priv->mm.interruptible = was_interruptible;
4168         if (ret) {
4169                 DRM_ERROR("failed to enable ironlake power savings\n");
4170                 ironlake_teardown_rc6(dev);
4171                 return;
4172         }
4173
4174         I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4175         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4176 }
4177
4178 static unsigned long intel_pxfreq(u32 vidfreq)
4179 {
4180         unsigned long freq;
4181         int div = (vidfreq & 0x3f0000) >> 16;
4182         int post = (vidfreq & 0x3000) >> 12;
4183         int pre = (vidfreq & 0x7);
4184
4185         if (!pre)
4186                 return 0;
4187
4188         freq = ((div * 133333) / ((1<<post) * pre));
4189
4190         return freq;
4191 }
4192
4193 static const struct cparams {
4194         u16 i;
4195         u16 t;
4196         u16 m;
4197         u16 c;
4198 } cparams[] = {
4199         { 1, 1333, 301, 28664 },
4200         { 1, 1066, 294, 24460 },
4201         { 1, 800, 294, 25192 },
4202         { 0, 1333, 276, 27605 },
4203         { 0, 1066, 276, 27605 },
4204         { 0, 800, 231, 23784 },
4205 };
4206
4207 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4208 {
4209         u64 total_count, diff, ret;
4210         u32 count1, count2, count3, m = 0, c = 0;
4211         unsigned long now = jiffies_to_msecs(jiffies), diff1;
4212         int i;
4213
4214         assert_spin_locked(&mchdev_lock);
4215
4216         diff1 = now - dev_priv->ips.last_time1;
4217
4218         /* Prevent division-by-zero if we are asking too fast.
4219          * Also, we don't get interesting results if we are polling
4220          * faster than once in 10ms, so just return the saved value
4221          * in such cases.
4222          */
4223         if (diff1 <= 10)
4224                 return dev_priv->ips.chipset_power;
4225
4226         count1 = I915_READ(DMIEC);
4227         count2 = I915_READ(DDREC);
4228         count3 = I915_READ(CSIEC);
4229
4230         total_count = count1 + count2 + count3;
4231
4232         /* FIXME: handle per-counter overflow */
4233         if (total_count < dev_priv->ips.last_count1) {
4234                 diff = ~0UL - dev_priv->ips.last_count1;
4235                 diff += total_count;
4236         } else {
4237                 diff = total_count - dev_priv->ips.last_count1;
4238         }
4239
4240         for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4241                 if (cparams[i].i == dev_priv->ips.c_m &&
4242                     cparams[i].t == dev_priv->ips.r_t) {
4243                         m = cparams[i].m;
4244                         c = cparams[i].c;
4245                         break;
4246                 }
4247         }
4248
4249         diff = div_u64(diff, diff1);
4250         ret = ((m * diff) + c);
4251         ret = div_u64(ret, 10);
4252
4253         dev_priv->ips.last_count1 = total_count;
4254         dev_priv->ips.last_time1 = now;
4255
4256         dev_priv->ips.chipset_power = ret;
4257
4258         return ret;
4259 }
4260
4261 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4262 {
4263         unsigned long val;
4264
4265         if (dev_priv->info->gen != 5)
4266                 return 0;
4267
4268         spin_lock_irq(&mchdev_lock);
4269
4270         val = __i915_chipset_val(dev_priv);
4271
4272         spin_unlock_irq(&mchdev_lock);
4273
4274         return val;
4275 }
4276
4277 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4278 {
4279         unsigned long m, x, b;
4280         u32 tsfs;
4281
4282         tsfs = I915_READ(TSFS);
4283
4284         m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4285         x = I915_READ8(TR1);
4286
4287         b = tsfs & TSFS_INTR_MASK;
4288
4289         return ((m * x) / 127) - b;
4290 }
4291
4292 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4293 {
4294         static const struct v_table {
4295                 u16 vd; /* in .1 mil */
4296                 u16 vm; /* in .1 mil */
4297         } v_table[] = {
4298                 { 0, 0, },
4299                 { 375, 0, },
4300                 { 500, 0, },
4301                 { 625, 0, },
4302                 { 750, 0, },
4303                 { 875, 0, },
4304                 { 1000, 0, },
4305                 { 1125, 0, },
4306                 { 4125, 3000, },
4307                 { 4125, 3000, },
4308                 { 4125, 3000, },
4309                 { 4125, 3000, },
4310                 { 4125, 3000, },
4311                 { 4125, 3000, },
4312                 { 4125, 3000, },
4313                 { 4125, 3000, },
4314                 { 4125, 3000, },
4315                 { 4125, 3000, },
4316                 { 4125, 3000, },
4317                 { 4125, 3000, },
4318                 { 4125, 3000, },
4319                 { 4125, 3000, },
4320                 { 4125, 3000, },
4321                 { 4125, 3000, },
4322                 { 4125, 3000, },
4323                 { 4125, 3000, },
4324                 { 4125, 3000, },
4325                 { 4125, 3000, },
4326                 { 4125, 3000, },
4327                 { 4125, 3000, },
4328                 { 4125, 3000, },
4329                 { 4125, 3000, },
4330                 { 4250, 3125, },
4331                 { 4375, 3250, },
4332                 { 4500, 3375, },
4333                 { 4625, 3500, },
4334                 { 4750, 3625, },
4335                 { 4875, 3750, },
4336                 { 5000, 3875, },
4337                 { 5125, 4000, },
4338                 { 5250, 4125, },
4339                 { 5375, 4250, },
4340                 { 5500, 4375, },
4341                 { 5625, 4500, },
4342                 { 5750, 4625, },
4343                 { 5875, 4750, },
4344                 { 6000, 4875, },
4345                 { 6125, 5000, },
4346                 { 6250, 5125, },
4347                 { 6375, 5250, },
4348                 { 6500, 5375, },
4349                 { 6625, 5500, },
4350                 { 6750, 5625, },
4351                 { 6875, 5750, },
4352                 { 7000, 5875, },
4353                 { 7125, 6000, },
4354                 { 7250, 6125, },
4355                 { 7375, 6250, },
4356                 { 7500, 6375, },
4357                 { 7625, 6500, },
4358                 { 7750, 6625, },
4359                 { 7875, 6750, },
4360                 { 8000, 6875, },
4361                 { 8125, 7000, },
4362                 { 8250, 7125, },
4363                 { 8375, 7250, },
4364                 { 8500, 7375, },
4365                 { 8625, 7500, },
4366                 { 8750, 7625, },
4367                 { 8875, 7750, },
4368                 { 9000, 7875, },
4369                 { 9125, 8000, },
4370                 { 9250, 8125, },
4371                 { 9375, 8250, },
4372                 { 9500, 8375, },
4373                 { 9625, 8500, },
4374                 { 9750, 8625, },
4375                 { 9875, 8750, },
4376                 { 10000, 8875, },
4377                 { 10125, 9000, },
4378                 { 10250, 9125, },
4379                 { 10375, 9250, },
4380                 { 10500, 9375, },
4381                 { 10625, 9500, },
4382                 { 10750, 9625, },
4383                 { 10875, 9750, },
4384                 { 11000, 9875, },
4385                 { 11125, 10000, },
4386                 { 11250, 10125, },
4387                 { 11375, 10250, },
4388                 { 11500, 10375, },
4389                 { 11625, 10500, },
4390                 { 11750, 10625, },
4391                 { 11875, 10750, },
4392                 { 12000, 10875, },
4393                 { 12125, 11000, },
4394                 { 12250, 11125, },
4395                 { 12375, 11250, },
4396                 { 12500, 11375, },
4397                 { 12625, 11500, },
4398                 { 12750, 11625, },
4399                 { 12875, 11750, },
4400                 { 13000, 11875, },
4401                 { 13125, 12000, },
4402                 { 13250, 12125, },
4403                 { 13375, 12250, },
4404                 { 13500, 12375, },
4405                 { 13625, 12500, },
4406                 { 13750, 12625, },
4407                 { 13875, 12750, },
4408                 { 14000, 12875, },
4409                 { 14125, 13000, },
4410                 { 14250, 13125, },
4411                 { 14375, 13250, },
4412                 { 14500, 13375, },
4413                 { 14625, 13500, },
4414                 { 14750, 13625, },
4415                 { 14875, 13750, },
4416                 { 15000, 13875, },
4417                 { 15125, 14000, },
4418                 { 15250, 14125, },
4419                 { 15375, 14250, },
4420                 { 15500, 14375, },
4421                 { 15625, 14500, },
4422                 { 15750, 14625, },
4423                 { 15875, 14750, },
4424                 { 16000, 14875, },
4425                 { 16125, 15000, },
4426         };
4427         if (dev_priv->info->is_mobile)
4428                 return v_table[pxvid].vm;
4429         else
4430                 return v_table[pxvid].vd;
4431 }
4432
4433 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4434 {
4435         struct timespec now, diff1;
4436         u64 diff;
4437         unsigned long diffms;
4438         u32 count;
4439
4440         assert_spin_locked(&mchdev_lock);
4441
4442         getrawmonotonic(&now);
4443         diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4444
4445         /* Don't divide by 0 */
4446         diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4447         if (!diffms)
4448                 return;
4449
4450         count = I915_READ(GFXEC);
4451
4452         if (count < dev_priv->ips.last_count2) {
4453                 diff = ~0UL - dev_priv->ips.last_count2;
4454                 diff += count;
4455         } else {
4456                 diff = count - dev_priv->ips.last_count2;
4457         }
4458
4459         dev_priv->ips.last_count2 = count;
4460         dev_priv->ips.last_time2 = now;
4461
4462         /* More magic constants... */
4463         diff = diff * 1181;
4464         diff = div_u64(diff, diffms * 10);
4465         dev_priv->ips.gfx_power = diff;
4466 }
4467
4468 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4469 {
4470         if (dev_priv->info->gen != 5)
4471                 return;
4472
4473         spin_lock_irq(&mchdev_lock);
4474
4475         __i915_update_gfx_val(dev_priv);
4476
4477         spin_unlock_irq(&mchdev_lock);
4478 }
4479
4480 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4481 {
4482         unsigned long t, corr, state1, corr2, state2;
4483         u32 pxvid, ext_v;
4484
4485         assert_spin_locked(&mchdev_lock);
4486
4487         pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
4488         pxvid = (pxvid >> 24) & 0x7f;
4489         ext_v = pvid_to_extvid(dev_priv, pxvid);
4490
4491         state1 = ext_v;
4492
4493         t = i915_mch_val(dev_priv);
4494
4495         /* Revel in the empirically derived constants */
4496
4497         /* Correction factor in 1/100000 units */
4498         if (t > 80)
4499                 corr = ((t * 2349) + 135940);
4500         else if (t >= 50)
4501                 corr = ((t * 964) + 29317);
4502         else /* < 50 */
4503                 corr = ((t * 301) + 1004);
4504
4505         corr = corr * ((150142 * state1) / 10000 - 78642);
4506         corr /= 100000;
4507         corr2 = (corr * dev_priv->ips.corr);
4508
4509         state2 = (corr2 * state1) / 10000;
4510         state2 /= 100; /* convert to mW */
4511
4512         __i915_update_gfx_val(dev_priv);
4513
4514         return dev_priv->ips.gfx_power + state2;
4515 }
4516
4517 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4518 {
4519         unsigned long val;
4520
4521         if (dev_priv->info->gen != 5)
4522                 return 0;
4523
4524         spin_lock_irq(&mchdev_lock);
4525
4526         val = __i915_gfx_val(dev_priv);
4527
4528         spin_unlock_irq(&mchdev_lock);
4529
4530         return val;
4531 }
4532
4533 /**
4534  * i915_read_mch_val - return value for IPS use
4535  *
4536  * Calculate and return a value for the IPS driver to use when deciding whether
4537  * we have thermal and power headroom to increase CPU or GPU power budget.
4538  */
4539 unsigned long i915_read_mch_val(void)
4540 {
4541         struct drm_i915_private *dev_priv;
4542         unsigned long chipset_val, graphics_val, ret = 0;
4543
4544         spin_lock_irq(&mchdev_lock);
4545         if (!i915_mch_dev)
4546                 goto out_unlock;
4547         dev_priv = i915_mch_dev;
4548
4549         chipset_val = __i915_chipset_val(dev_priv);
4550         graphics_val = __i915_gfx_val(dev_priv);
4551
4552         ret = chipset_val + graphics_val;
4553
4554 out_unlock:
4555         spin_unlock_irq(&mchdev_lock);
4556
4557         return ret;
4558 }
4559 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4560
4561 /**
4562  * i915_gpu_raise - raise GPU frequency limit
4563  *
4564  * Raise the limit; IPS indicates we have thermal headroom.
4565  */
4566 bool i915_gpu_raise(void)
4567 {
4568         struct drm_i915_private *dev_priv;
4569         bool ret = true;
4570
4571         spin_lock_irq(&mchdev_lock);
4572         if (!i915_mch_dev) {
4573                 ret = false;
4574                 goto out_unlock;
4575         }
4576         dev_priv = i915_mch_dev;
4577
4578         if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4579                 dev_priv->ips.max_delay--;
4580
4581 out_unlock:
4582         spin_unlock_irq(&mchdev_lock);
4583
4584         return ret;
4585 }
4586 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4587
4588 /**
4589  * i915_gpu_lower - lower GPU frequency limit
4590  *
4591  * IPS indicates we're close to a thermal limit, so throttle back the GPU
4592  * frequency maximum.
4593  */
4594 bool i915_gpu_lower(void)
4595 {
4596         struct drm_i915_private *dev_priv;
4597         bool ret = true;
4598
4599         spin_lock_irq(&mchdev_lock);
4600         if (!i915_mch_dev) {
4601                 ret = false;
4602                 goto out_unlock;
4603         }
4604         dev_priv = i915_mch_dev;
4605
4606         if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4607                 dev_priv->ips.max_delay++;
4608
4609 out_unlock:
4610         spin_unlock_irq(&mchdev_lock);
4611
4612         return ret;
4613 }
4614 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4615
4616 /**
4617  * i915_gpu_busy - indicate GPU business to IPS
4618  *
4619  * Tell the IPS driver whether or not the GPU is busy.
4620  */
4621 bool i915_gpu_busy(void)
4622 {
4623         struct drm_i915_private *dev_priv;
4624         struct intel_ring_buffer *ring;
4625         bool ret = false;
4626         int i;
4627
4628         spin_lock_irq(&mchdev_lock);
4629         if (!i915_mch_dev)
4630                 goto out_unlock;
4631         dev_priv = i915_mch_dev;
4632
4633         for_each_ring(ring, dev_priv, i)
4634                 ret |= !list_empty(&ring->request_list);
4635
4636 out_unlock:
4637         spin_unlock_irq(&mchdev_lock);
4638
4639         return ret;
4640 }
4641 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4642
4643 /**
4644  * i915_gpu_turbo_disable - disable graphics turbo
4645  *
4646  * Disable graphics turbo by resetting the max frequency and setting the
4647  * current frequency to the default.
4648  */
4649 bool i915_gpu_turbo_disable(void)
4650 {
4651         struct drm_i915_private *dev_priv;
4652         bool ret = true;
4653
4654         spin_lock_irq(&mchdev_lock);
4655         if (!i915_mch_dev) {
4656                 ret = false;
4657                 goto out_unlock;
4658         }
4659         dev_priv = i915_mch_dev;
4660
4661         dev_priv->ips.max_delay = dev_priv->ips.fstart;
4662
4663         if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4664                 ret = false;
4665
4666 out_unlock:
4667         spin_unlock_irq(&mchdev_lock);
4668
4669         return ret;
4670 }
4671 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4672
4673 /**
4674  * Tells the intel_ips driver that the i915 driver is now loaded, if
4675  * IPS got loaded first.
4676  *
4677  * This awkward dance is so that neither module has to depend on the
4678  * other in order for IPS to do the appropriate communication of
4679  * GPU turbo limits to i915.
4680  */
4681 static void
4682 ips_ping_for_i915_load(void)
4683 {
4684         void (*link)(void);
4685
4686         link = symbol_get(ips_link_to_i915_driver);
4687         if (link) {
4688                 link();
4689                 symbol_put(ips_link_to_i915_driver);
4690         }
4691 }
4692
4693 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4694 {
4695         /* We only register the i915 ips part with intel-ips once everything is
4696          * set up, to avoid intel-ips sneaking in and reading bogus values. */
4697         spin_lock_irq(&mchdev_lock);
4698         i915_mch_dev = dev_priv;
4699         spin_unlock_irq(&mchdev_lock);
4700
4701         ips_ping_for_i915_load();
4702 }
4703
4704 void intel_gpu_ips_teardown(void)
4705 {
4706         spin_lock_irq(&mchdev_lock);
4707         i915_mch_dev = NULL;
4708         spin_unlock_irq(&mchdev_lock);
4709 }
4710 static void intel_init_emon(struct drm_device *dev)
4711 {
4712         struct drm_i915_private *dev_priv = dev->dev_private;
4713         u32 lcfuse;
4714         u8 pxw[16];
4715         int i;
4716
4717         /* Disable to program */
4718         I915_WRITE(ECR, 0);
4719         POSTING_READ(ECR);
4720
4721         /* Program energy weights for various events */
4722         I915_WRITE(SDEW, 0x15040d00);
4723         I915_WRITE(CSIEW0, 0x007f0000);
4724         I915_WRITE(CSIEW1, 0x1e220004);
4725         I915_WRITE(CSIEW2, 0x04000004);
4726
4727         for (i = 0; i < 5; i++)
4728                 I915_WRITE(PEW + (i * 4), 0);
4729         for (i = 0; i < 3; i++)
4730                 I915_WRITE(DEW + (i * 4), 0);
4731
4732         /* Program P-state weights to account for frequency power adjustment */
4733         for (i = 0; i < 16; i++) {
4734                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4735                 unsigned long freq = intel_pxfreq(pxvidfreq);
4736                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4737                         PXVFREQ_PX_SHIFT;
4738                 unsigned long val;
4739
4740                 val = vid * vid;
4741                 val *= (freq / 1000);
4742                 val *= 255;
4743                 val /= (127*127*900);
4744                 if (val > 0xff)
4745                         DRM_ERROR("bad pxval: %ld\n", val);
4746                 pxw[i] = val;
4747         }
4748         /* Render standby states get 0 weight */
4749         pxw[14] = 0;
4750         pxw[15] = 0;
4751
4752         for (i = 0; i < 4; i++) {
4753                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4754                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4755                 I915_WRITE(PXW + (i * 4), val);
4756         }
4757
4758         /* Adjust magic regs to magic values (more experimental results) */
4759         I915_WRITE(OGW0, 0);
4760         I915_WRITE(OGW1, 0);
4761         I915_WRITE(EG0, 0x00007f00);
4762         I915_WRITE(EG1, 0x0000000e);
4763         I915_WRITE(EG2, 0x000e0000);
4764         I915_WRITE(EG3, 0x68000300);
4765         I915_WRITE(EG4, 0x42000000);
4766         I915_WRITE(EG5, 0x00140031);
4767         I915_WRITE(EG6, 0);
4768         I915_WRITE(EG7, 0);
4769
4770         for (i = 0; i < 8; i++)
4771                 I915_WRITE(PXWL + (i * 4), 0);
4772
4773         /* Enable PMON + select events */
4774         I915_WRITE(ECR, 0x80000019);
4775
4776         lcfuse = I915_READ(LCFUSE02);
4777
4778         dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4779 }
4780
4781 void intel_disable_gt_powersave(struct drm_device *dev)
4782 {
4783         struct drm_i915_private *dev_priv = dev->dev_private;
4784
4785         /* Interrupts should be disabled already to avoid re-arming. */
4786         WARN_ON(dev->irq_enabled);
4787
4788         if (IS_IRONLAKE_M(dev)) {
4789                 ironlake_disable_drps(dev);
4790                 ironlake_disable_rc6(dev);
4791         } else if (INTEL_INFO(dev)->gen >= 6) {
4792                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4793                 cancel_work_sync(&dev_priv->rps.work);
4794                 mutex_lock(&dev_priv->rps.hw_lock);
4795                 if (IS_VALLEYVIEW(dev))
4796                         valleyview_disable_rps(dev);
4797                 else
4798                         gen6_disable_rps(dev);
4799                 dev_priv->rps.enabled = false;
4800                 mutex_unlock(&dev_priv->rps.hw_lock);
4801         }
4802 }
4803
4804 static void intel_gen6_powersave_work(struct work_struct *work)
4805 {
4806         struct drm_i915_private *dev_priv =
4807                 container_of(work, struct drm_i915_private,
4808                              rps.delayed_resume_work.work);
4809         struct drm_device *dev = dev_priv->dev;
4810
4811         mutex_lock(&dev_priv->rps.hw_lock);
4812
4813         if (IS_VALLEYVIEW(dev)) {
4814                 valleyview_enable_rps(dev);
4815         } else {
4816                 gen6_enable_rps(dev);
4817                 gen6_update_ring_freq(dev);
4818         }
4819         dev_priv->rps.enabled = true;
4820         mutex_unlock(&dev_priv->rps.hw_lock);
4821 }
4822
4823 void intel_enable_gt_powersave(struct drm_device *dev)
4824 {
4825         struct drm_i915_private *dev_priv = dev->dev_private;
4826
4827         if (IS_IRONLAKE_M(dev)) {
4828                 ironlake_enable_drps(dev);
4829                 ironlake_enable_rc6(dev);
4830                 intel_init_emon(dev);
4831         } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4832                 /*
4833                  * PCU communication is slow and this doesn't need to be
4834                  * done at any specific time, so do this out of our fast path
4835                  * to make resume and init faster.
4836                  */
4837                 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4838                                       round_jiffies_up_relative(HZ));
4839         }
4840 }
4841
4842 static void ibx_init_clock_gating(struct drm_device *dev)
4843 {
4844         struct drm_i915_private *dev_priv = dev->dev_private;
4845
4846         /*
4847          * On Ibex Peak and Cougar Point, we need to disable clock
4848          * gating for the panel power sequencer or it will fail to
4849          * start up when no ports are active.
4850          */
4851         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4852 }
4853
4854 static void g4x_disable_trickle_feed(struct drm_device *dev)
4855 {
4856         struct drm_i915_private *dev_priv = dev->dev_private;
4857         int pipe;
4858
4859         for_each_pipe(pipe) {
4860                 I915_WRITE(DSPCNTR(pipe),
4861                            I915_READ(DSPCNTR(pipe)) |
4862                            DISPPLANE_TRICKLE_FEED_DISABLE);
4863                 intel_flush_primary_plane(dev_priv, pipe);
4864         }
4865 }
4866
4867 static void ironlake_init_clock_gating(struct drm_device *dev)
4868 {
4869         struct drm_i915_private *dev_priv = dev->dev_private;
4870         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4871
4872         /*
4873          * Required for FBC
4874          * WaFbcDisableDpfcClockGating:ilk
4875          */
4876         dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4877                    ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4878                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4879
4880         I915_WRITE(PCH_3DCGDIS0,
4881                    MARIUNIT_CLOCK_GATE_DISABLE |
4882                    SVSMUNIT_CLOCK_GATE_DISABLE);
4883         I915_WRITE(PCH_3DCGDIS1,
4884                    VFMUNIT_CLOCK_GATE_DISABLE);
4885
4886         /*
4887          * According to the spec the following bits should be set in
4888          * order to enable memory self-refresh
4889          * The bit 22/21 of 0x42004
4890          * The bit 5 of 0x42020
4891          * The bit 15 of 0x45000
4892          */
4893         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4894                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
4895                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4896         dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4897         I915_WRITE(DISP_ARB_CTL,
4898                    (I915_READ(DISP_ARB_CTL) |
4899                     DISP_FBC_WM_DIS));
4900         I915_WRITE(WM3_LP_ILK, 0);
4901         I915_WRITE(WM2_LP_ILK, 0);
4902         I915_WRITE(WM1_LP_ILK, 0);
4903
4904         /*
4905          * Based on the document from hardware guys the following bits
4906          * should be set unconditionally in order to enable FBC.
4907          * The bit 22 of 0x42000
4908          * The bit 22 of 0x42004
4909          * The bit 7,8,9 of 0x42020.
4910          */
4911         if (IS_IRONLAKE_M(dev)) {
4912                 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4913                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4914                            I915_READ(ILK_DISPLAY_CHICKEN1) |
4915                            ILK_FBCQ_DIS);
4916                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4917                            I915_READ(ILK_DISPLAY_CHICKEN2) |
4918                            ILK_DPARB_GATE);
4919         }
4920
4921         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4922
4923         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4924                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4925                    ILK_ELPIN_409_SELECT);
4926         I915_WRITE(_3D_CHICKEN2,
4927                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4928                    _3D_CHICKEN2_WM_READ_PIPELINED);
4929
4930         /* WaDisableRenderCachePipelinedFlush:ilk */
4931         I915_WRITE(CACHE_MODE_0,
4932                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4933
4934         g4x_disable_trickle_feed(dev);
4935
4936         ibx_init_clock_gating(dev);
4937 }
4938
4939 static void cpt_init_clock_gating(struct drm_device *dev)
4940 {
4941         struct drm_i915_private *dev_priv = dev->dev_private;
4942         int pipe;
4943         uint32_t val;
4944
4945         /*
4946          * On Ibex Peak and Cougar Point, we need to disable clock
4947          * gating for the panel power sequencer or it will fail to
4948          * start up when no ports are active.
4949          */
4950         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4951         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4952                    DPLS_EDP_PPS_FIX_DIS);
4953         /* The below fixes the weird display corruption, a few pixels shifted
4954          * downward, on (only) LVDS of some HP laptops with IVY.
4955          */
4956         for_each_pipe(pipe) {
4957                 val = I915_READ(TRANS_CHICKEN2(pipe));
4958                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4959                 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4960                 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4961                         val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4962                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4963                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4964                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4965                 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4966         }
4967         /* WADP0ClockGatingDisable */
4968         for_each_pipe(pipe) {
4969                 I915_WRITE(TRANS_CHICKEN1(pipe),
4970                            TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4971         }
4972 }
4973
4974 static void gen6_check_mch_setup(struct drm_device *dev)
4975 {
4976         struct drm_i915_private *dev_priv = dev->dev_private;
4977         uint32_t tmp;
4978
4979         tmp = I915_READ(MCH_SSKPD);
4980         if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4981                 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4982                 DRM_INFO("This can cause pipe underruns and display issues.\n");
4983                 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4984         }
4985 }
4986
4987 static void gen6_init_clock_gating(struct drm_device *dev)
4988 {
4989         struct drm_i915_private *dev_priv = dev->dev_private;
4990         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4991
4992         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4993
4994         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4995                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4996                    ILK_ELPIN_409_SELECT);
4997
4998         /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4999         I915_WRITE(_3D_CHICKEN,
5000                    _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5001
5002         /* WaSetupGtModeTdRowDispatch:snb */
5003         if (IS_SNB_GT1(dev))
5004                 I915_WRITE(GEN6_GT_MODE,
5005                            _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5006
5007         I915_WRITE(WM3_LP_ILK, 0);
5008         I915_WRITE(WM2_LP_ILK, 0);
5009         I915_WRITE(WM1_LP_ILK, 0);
5010
5011         I915_WRITE(CACHE_MODE_0,
5012                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5013
5014         I915_WRITE(GEN6_UCGCTL1,
5015                    I915_READ(GEN6_UCGCTL1) |
5016                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5017                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5018
5019         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5020          * gating disable must be set.  Failure to set it results in
5021          * flickering pixels due to Z write ordering failures after
5022          * some amount of runtime in the Mesa "fire" demo, and Unigine
5023          * Sanctuary and Tropics, and apparently anything else with
5024          * alpha test or pixel discard.
5025          *
5026          * According to the spec, bit 11 (RCCUNIT) must also be set,
5027          * but we didn't debug actual testcases to find it out.
5028          *
5029          * Also apply WaDisableVDSUnitClockGating:snb and
5030          * WaDisableRCPBUnitClockGating:snb.
5031          */
5032         I915_WRITE(GEN6_UCGCTL2,
5033                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
5034                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5035                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5036
5037         /* Bspec says we need to always set all mask bits. */
5038         I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
5039                    _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
5040
5041         /*
5042          * According to the spec the following bits should be
5043          * set in order to enable memory self-refresh and fbc:
5044          * The bit21 and bit22 of 0x42000
5045          * The bit21 and bit22 of 0x42004
5046          * The bit5 and bit7 of 0x42020
5047          * The bit14 of 0x70180
5048          * The bit14 of 0x71180
5049          *
5050          * WaFbcAsynchFlipDisableFbcQueue:snb
5051          */
5052         I915_WRITE(ILK_DISPLAY_CHICKEN1,
5053                    I915_READ(ILK_DISPLAY_CHICKEN1) |
5054                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5055         I915_WRITE(ILK_DISPLAY_CHICKEN2,
5056                    I915_READ(ILK_DISPLAY_CHICKEN2) |
5057                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5058         I915_WRITE(ILK_DSPCLK_GATE_D,
5059                    I915_READ(ILK_DSPCLK_GATE_D) |
5060                    ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
5061                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5062
5063         g4x_disable_trickle_feed(dev);
5064
5065         /* The default value should be 0x200 according to docs, but the two
5066          * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
5067         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
5068         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
5069
5070         cpt_init_clock_gating(dev);
5071
5072         gen6_check_mch_setup(dev);
5073 }
5074
5075 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5076 {
5077         uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5078
5079         reg &= ~GEN7_FF_SCHED_MASK;
5080         reg |= GEN7_FF_TS_SCHED_HW;
5081         reg |= GEN7_FF_VS_SCHED_HW;
5082         reg |= GEN7_FF_DS_SCHED_HW;
5083
5084         if (IS_HASWELL(dev_priv->dev))
5085                 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
5086
5087         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5088 }
5089
5090 static void lpt_init_clock_gating(struct drm_device *dev)
5091 {
5092         struct drm_i915_private *dev_priv = dev->dev_private;
5093
5094         /*
5095          * TODO: this bit should only be enabled when really needed, then
5096          * disabled when not needed anymore in order to save power.
5097          */
5098         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5099                 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5100                            I915_READ(SOUTH_DSPCLK_GATE_D) |
5101                            PCH_LP_PARTITION_LEVEL_DISABLE);
5102
5103         /* WADPOClockGatingDisable:hsw */
5104         I915_WRITE(_TRANSA_CHICKEN1,
5105                    I915_READ(_TRANSA_CHICKEN1) |
5106                    TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5107 }
5108
5109 static void lpt_suspend_hw(struct drm_device *dev)
5110 {
5111         struct drm_i915_private *dev_priv = dev->dev_private;
5112
5113         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5114                 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5115
5116                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5117                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5118         }
5119 }
5120
5121 static void haswell_init_clock_gating(struct drm_device *dev)
5122 {
5123         struct drm_i915_private *dev_priv = dev->dev_private;
5124
5125         I915_WRITE(WM3_LP_ILK, 0);
5126         I915_WRITE(WM2_LP_ILK, 0);
5127         I915_WRITE(WM1_LP_ILK, 0);
5128
5129         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5130          * This implements the WaDisableRCZUnitClockGating:hsw workaround.
5131          */
5132         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5133
5134         /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
5135         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5136                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5137
5138         /* WaApplyL3ControlAndL3ChickenMode:hsw */
5139         I915_WRITE(GEN7_L3CNTLREG1,
5140                         GEN7_WA_FOR_GEN7_L3_CONTROL);
5141         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5142                         GEN7_WA_L3_CHICKEN_MODE);
5143
5144         /* This is required by WaCatErrorRejectionIssue:hsw */
5145         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5146                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5147                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5148
5149         /* WaVSRefCountFullforceMissDisable:hsw */
5150         gen7_setup_fixed_func_scheduler(dev_priv);
5151
5152         /* WaDisable4x2SubspanOptimization:hsw */
5153         I915_WRITE(CACHE_MODE_1,
5154                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5155
5156         /* WaSwitchSolVfFArbitrationPriority:hsw */
5157         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5158
5159         /* WaRsPkgCStateDisplayPMReq:hsw */
5160         I915_WRITE(CHICKEN_PAR1_1,
5161                    I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5162
5163         lpt_init_clock_gating(dev);
5164 }
5165
5166 static void ivybridge_init_clock_gating(struct drm_device *dev)
5167 {
5168         struct drm_i915_private *dev_priv = dev->dev_private;
5169         uint32_t snpcr;
5170
5171         I915_WRITE(WM3_LP_ILK, 0);
5172         I915_WRITE(WM2_LP_ILK, 0);
5173         I915_WRITE(WM1_LP_ILK, 0);
5174
5175         I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5176
5177         /* WaDisableEarlyCull:ivb */
5178         I915_WRITE(_3D_CHICKEN3,
5179                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5180
5181         /* WaDisableBackToBackFlipFix:ivb */
5182         I915_WRITE(IVB_CHICKEN3,
5183                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5184                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
5185
5186         /* WaDisablePSDDualDispatchEnable:ivb */
5187         if (IS_IVB_GT1(dev))
5188                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5189                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5190         else
5191                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
5192                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5193
5194         /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5195         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5196                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5197
5198         /* WaApplyL3ControlAndL3ChickenMode:ivb */
5199         I915_WRITE(GEN7_L3CNTLREG1,
5200                         GEN7_WA_FOR_GEN7_L3_CONTROL);
5201         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5202                    GEN7_WA_L3_CHICKEN_MODE);
5203         if (IS_IVB_GT1(dev))
5204                 I915_WRITE(GEN7_ROW_CHICKEN2,
5205                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5206         else
5207                 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5208                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5209
5210
5211         /* WaForceL3Serialization:ivb */
5212         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5213                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5214
5215         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5216          * gating disable must be set.  Failure to set it results in
5217          * flickering pixels due to Z write ordering failures after
5218          * some amount of runtime in the Mesa "fire" demo, and Unigine
5219          * Sanctuary and Tropics, and apparently anything else with
5220          * alpha test or pixel discard.
5221          *
5222          * According to the spec, bit 11 (RCCUNIT) must also be set,
5223          * but we didn't debug actual testcases to find it out.
5224          *
5225          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5226          * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5227          */
5228         I915_WRITE(GEN6_UCGCTL2,
5229                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5230                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5231
5232         /* This is required by WaCatErrorRejectionIssue:ivb */
5233         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5234                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5235                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5236
5237         g4x_disable_trickle_feed(dev);
5238
5239         /* WaVSRefCountFullforceMissDisable:ivb */
5240         gen7_setup_fixed_func_scheduler(dev_priv);
5241
5242         /* WaDisable4x2SubspanOptimization:ivb */
5243         I915_WRITE(CACHE_MODE_1,
5244                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5245
5246         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5247         snpcr &= ~GEN6_MBC_SNPCR_MASK;
5248         snpcr |= GEN6_MBC_SNPCR_MED;
5249         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5250
5251         if (!HAS_PCH_NOP(dev))
5252                 cpt_init_clock_gating(dev);
5253
5254         gen6_check_mch_setup(dev);
5255 }
5256
5257 static void valleyview_init_clock_gating(struct drm_device *dev)
5258 {
5259         struct drm_i915_private *dev_priv = dev->dev_private;
5260
5261         I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5262
5263         /* WaDisableEarlyCull:vlv */
5264         I915_WRITE(_3D_CHICKEN3,
5265                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5266
5267         /* WaDisableBackToBackFlipFix:vlv */
5268         I915_WRITE(IVB_CHICKEN3,
5269                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5270                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
5271
5272         /* WaDisablePSDDualDispatchEnable:vlv */
5273         I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5274                    _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5275                                       GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5276
5277         /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
5278         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5279                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5280
5281         /* WaApplyL3ControlAndL3ChickenMode:vlv */
5282         I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
5283         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
5284
5285         /* WaForceL3Serialization:vlv */
5286         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5287                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5288
5289         /* WaDisableDopClockGating:vlv */
5290         I915_WRITE(GEN7_ROW_CHICKEN2,
5291                    _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5292
5293         /* This is required by WaCatErrorRejectionIssue:vlv */
5294         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5295                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5296                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5297
5298         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5299          * gating disable must be set.  Failure to set it results in
5300          * flickering pixels due to Z write ordering failures after
5301          * some amount of runtime in the Mesa "fire" demo, and Unigine
5302          * Sanctuary and Tropics, and apparently anything else with
5303          * alpha test or pixel discard.
5304          *
5305          * According to the spec, bit 11 (RCCUNIT) must also be set,
5306          * but we didn't debug actual testcases to find it out.
5307          *
5308          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5309          * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5310          *
5311          * Also apply WaDisableVDSUnitClockGating:vlv and
5312          * WaDisableRCPBUnitClockGating:vlv.
5313          */
5314         I915_WRITE(GEN6_UCGCTL2,
5315                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
5316                    GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
5317                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5318                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5319                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5320
5321         I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5322
5323         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5324
5325         I915_WRITE(CACHE_MODE_1,
5326                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5327
5328         /*
5329          * WaDisableVLVClockGating_VBIIssue:vlv
5330          * Disable clock gating on th GCFG unit to prevent a delay
5331          * in the reporting of vblank events.
5332          */
5333         I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
5334
5335         /* Conservative clock gating settings for now */
5336         I915_WRITE(0x9400, 0xffffffff);
5337         I915_WRITE(0x9404, 0xffffffff);
5338         I915_WRITE(0x9408, 0xffffffff);
5339         I915_WRITE(0x940c, 0xffffffff);
5340         I915_WRITE(0x9410, 0xffffffff);
5341         I915_WRITE(0x9414, 0xffffffff);
5342         I915_WRITE(0x9418, 0xffffffff);
5343 }
5344
5345 static void g4x_init_clock_gating(struct drm_device *dev)
5346 {
5347         struct drm_i915_private *dev_priv = dev->dev_private;
5348         uint32_t dspclk_gate;
5349
5350         I915_WRITE(RENCLK_GATE_D1, 0);
5351         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5352                    GS_UNIT_CLOCK_GATE_DISABLE |
5353                    CL_UNIT_CLOCK_GATE_DISABLE);
5354         I915_WRITE(RAMCLK_GATE_D, 0);
5355         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5356                 OVRUNIT_CLOCK_GATE_DISABLE |
5357                 OVCUNIT_CLOCK_GATE_DISABLE;
5358         if (IS_GM45(dev))
5359                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5360         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5361
5362         /* WaDisableRenderCachePipelinedFlush */
5363         I915_WRITE(CACHE_MODE_0,
5364                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5365
5366         g4x_disable_trickle_feed(dev);
5367 }
5368
5369 static void crestline_init_clock_gating(struct drm_device *dev)
5370 {
5371         struct drm_i915_private *dev_priv = dev->dev_private;
5372
5373         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5374         I915_WRITE(RENCLK_GATE_D2, 0);
5375         I915_WRITE(DSPCLK_GATE_D, 0);
5376         I915_WRITE(RAMCLK_GATE_D, 0);
5377         I915_WRITE16(DEUC, 0);
5378         I915_WRITE(MI_ARB_STATE,
5379                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5380 }
5381
5382 static void broadwater_init_clock_gating(struct drm_device *dev)
5383 {
5384         struct drm_i915_private *dev_priv = dev->dev_private;
5385
5386         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5387                    I965_RCC_CLOCK_GATE_DISABLE |
5388                    I965_RCPB_CLOCK_GATE_DISABLE |
5389                    I965_ISC_CLOCK_GATE_DISABLE |
5390                    I965_FBC_CLOCK_GATE_DISABLE);
5391         I915_WRITE(RENCLK_GATE_D2, 0);
5392         I915_WRITE(MI_ARB_STATE,
5393                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5394 }
5395
5396 static void gen3_init_clock_gating(struct drm_device *dev)
5397 {
5398         struct drm_i915_private *dev_priv = dev->dev_private;
5399         u32 dstate = I915_READ(D_STATE);
5400
5401         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5402                 DSTATE_DOT_CLOCK_GATING;
5403         I915_WRITE(D_STATE, dstate);
5404
5405         if (IS_PINEVIEW(dev))
5406                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5407
5408         /* IIR "flip pending" means done if this bit is set */
5409         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5410 }
5411
5412 static void i85x_init_clock_gating(struct drm_device *dev)
5413 {
5414         struct drm_i915_private *dev_priv = dev->dev_private;
5415
5416         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5417 }
5418
5419 static void i830_init_clock_gating(struct drm_device *dev)
5420 {
5421         struct drm_i915_private *dev_priv = dev->dev_private;
5422
5423         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5424 }
5425
5426 void intel_init_clock_gating(struct drm_device *dev)
5427 {
5428         struct drm_i915_private *dev_priv = dev->dev_private;
5429
5430         dev_priv->display.init_clock_gating(dev);
5431 }
5432
5433 void intel_suspend_hw(struct drm_device *dev)
5434 {
5435         if (HAS_PCH_LPT(dev))
5436                 lpt_suspend_hw(dev);
5437 }
5438
5439 /**
5440  * We should only use the power well if we explicitly asked the hardware to
5441  * enable it, so check if it's enabled and also check if we've requested it to
5442  * be enabled.
5443  */
5444 bool intel_display_power_enabled(struct drm_device *dev,
5445                                  enum intel_display_power_domain domain)
5446 {
5447         struct drm_i915_private *dev_priv = dev->dev_private;
5448
5449         if (!HAS_POWER_WELL(dev))
5450                 return true;
5451
5452         switch (domain) {
5453         case POWER_DOMAIN_PIPE_A:
5454         case POWER_DOMAIN_TRANSCODER_EDP:
5455                 return true;
5456         case POWER_DOMAIN_VGA:
5457         case POWER_DOMAIN_PIPE_B:
5458         case POWER_DOMAIN_PIPE_C:
5459         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5460         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5461         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5462         case POWER_DOMAIN_TRANSCODER_A:
5463         case POWER_DOMAIN_TRANSCODER_B:
5464         case POWER_DOMAIN_TRANSCODER_C:
5465                 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5466                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5467         default:
5468                 BUG();
5469         }
5470 }
5471
5472 static void __intel_set_power_well(struct drm_device *dev, bool enable)
5473 {
5474         struct drm_i915_private *dev_priv = dev->dev_private;
5475         bool is_enabled, enable_requested;
5476         uint32_t tmp;
5477
5478         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5479         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5480         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5481
5482         if (enable) {
5483                 if (!enable_requested)
5484                         I915_WRITE(HSW_PWR_WELL_DRIVER,
5485                                    HSW_PWR_WELL_ENABLE_REQUEST);
5486
5487                 if (!is_enabled) {
5488                         DRM_DEBUG_KMS("Enabling power well\n");
5489                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5490                                       HSW_PWR_WELL_STATE_ENABLED), 20))
5491                                 DRM_ERROR("Timeout enabling power well\n");
5492                 }
5493         } else {
5494                 if (enable_requested) {
5495                         unsigned long irqflags;
5496                         enum pipe p;
5497
5498                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5499                         POSTING_READ(HSW_PWR_WELL_DRIVER);
5500                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
5501
5502                         /*
5503                          * After this, the registers on the pipes that are part
5504                          * of the power well will become zero, so we have to
5505                          * adjust our counters according to that.
5506                          *
5507                          * FIXME: Should we do this in general in
5508                          * drm_vblank_post_modeset?
5509                          */
5510                         spin_lock_irqsave(&dev->vbl_lock, irqflags);
5511                         for_each_pipe(p)
5512                                 if (p != PIPE_A)
5513                                         dev->vblank[p].last = 0;
5514                         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5515                 }
5516         }
5517 }
5518
5519 static void __intel_power_well_get(struct i915_power_well *power_well)
5520 {
5521         if (!power_well->count++)
5522                 __intel_set_power_well(power_well->device, true);
5523 }
5524
5525 static void __intel_power_well_put(struct i915_power_well *power_well)
5526 {
5527         WARN_ON(!power_well->count);
5528         if (!--power_well->count)
5529                 __intel_set_power_well(power_well->device, false);
5530 }
5531
5532 void intel_display_power_get(struct drm_device *dev,
5533                              enum intel_display_power_domain domain)
5534 {
5535         struct drm_i915_private *dev_priv = dev->dev_private;
5536         struct i915_power_well *power_well = &dev_priv->power_well;
5537
5538         if (!HAS_POWER_WELL(dev))
5539                 return;
5540
5541         switch (domain) {
5542         case POWER_DOMAIN_PIPE_A:
5543         case POWER_DOMAIN_TRANSCODER_EDP:
5544                 return;
5545         case POWER_DOMAIN_VGA:
5546         case POWER_DOMAIN_PIPE_B:
5547         case POWER_DOMAIN_PIPE_C:
5548         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5549         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5550         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5551         case POWER_DOMAIN_TRANSCODER_A:
5552         case POWER_DOMAIN_TRANSCODER_B:
5553         case POWER_DOMAIN_TRANSCODER_C:
5554                 spin_lock_irq(&power_well->lock);
5555                 __intel_power_well_get(power_well);
5556                 spin_unlock_irq(&power_well->lock);
5557                 return;
5558         default:
5559                 BUG();
5560         }
5561 }
5562
5563 void intel_display_power_put(struct drm_device *dev,
5564                              enum intel_display_power_domain domain)
5565 {
5566         struct drm_i915_private *dev_priv = dev->dev_private;
5567         struct i915_power_well *power_well = &dev_priv->power_well;
5568
5569         if (!HAS_POWER_WELL(dev))
5570                 return;
5571
5572         switch (domain) {
5573         case POWER_DOMAIN_PIPE_A:
5574         case POWER_DOMAIN_TRANSCODER_EDP:
5575                 return;
5576         case POWER_DOMAIN_VGA:
5577         case POWER_DOMAIN_PIPE_B:
5578         case POWER_DOMAIN_PIPE_C:
5579         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5580         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5581         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5582         case POWER_DOMAIN_TRANSCODER_A:
5583         case POWER_DOMAIN_TRANSCODER_B:
5584         case POWER_DOMAIN_TRANSCODER_C:
5585                 spin_lock_irq(&power_well->lock);
5586                 __intel_power_well_put(power_well);
5587                 spin_unlock_irq(&power_well->lock);
5588                 return;
5589         default:
5590                 BUG();
5591         }
5592 }
5593
5594 static struct i915_power_well *hsw_pwr;
5595
5596 /* Display audio driver power well request */
5597 void i915_request_power_well(void)
5598 {
5599         if (WARN_ON(!hsw_pwr))
5600                 return;
5601
5602         spin_lock_irq(&hsw_pwr->lock);
5603         __intel_power_well_get(hsw_pwr);
5604         spin_unlock_irq(&hsw_pwr->lock);
5605 }
5606 EXPORT_SYMBOL_GPL(i915_request_power_well);
5607
5608 /* Display audio driver power well release */
5609 void i915_release_power_well(void)
5610 {
5611         if (WARN_ON(!hsw_pwr))
5612                 return;
5613
5614         spin_lock_irq(&hsw_pwr->lock);
5615         __intel_power_well_put(hsw_pwr);
5616         spin_unlock_irq(&hsw_pwr->lock);
5617 }
5618 EXPORT_SYMBOL_GPL(i915_release_power_well);
5619
5620 int i915_init_power_well(struct drm_device *dev)
5621 {
5622         struct drm_i915_private *dev_priv = dev->dev_private;
5623
5624         hsw_pwr = &dev_priv->power_well;
5625
5626         hsw_pwr->device = dev;
5627         spin_lock_init(&hsw_pwr->lock);
5628         hsw_pwr->count = 0;
5629
5630         return 0;
5631 }
5632
5633 void i915_remove_power_well(struct drm_device *dev)
5634 {
5635         hsw_pwr = NULL;
5636 }
5637
5638 void intel_set_power_well(struct drm_device *dev, bool enable)
5639 {
5640         struct drm_i915_private *dev_priv = dev->dev_private;
5641         struct i915_power_well *power_well = &dev_priv->power_well;
5642
5643         if (!HAS_POWER_WELL(dev))
5644                 return;
5645
5646         if (!i915_disable_power_well && !enable)
5647                 return;
5648
5649         spin_lock_irq(&power_well->lock);
5650
5651         /*
5652          * This function will only ever contribute one
5653          * to the power well reference count. i915_request
5654          * is what tracks whether we have or have not
5655          * added the one to the reference count.
5656          */
5657         if (power_well->i915_request == enable)
5658                 goto out;
5659
5660         power_well->i915_request = enable;
5661
5662         if (enable)
5663                 __intel_power_well_get(power_well);
5664         else
5665                 __intel_power_well_put(power_well);
5666
5667  out:
5668         spin_unlock_irq(&power_well->lock);
5669 }
5670
5671 static void intel_resume_power_well(struct drm_device *dev)
5672 {
5673         struct drm_i915_private *dev_priv = dev->dev_private;
5674         struct i915_power_well *power_well = &dev_priv->power_well;
5675
5676         if (!HAS_POWER_WELL(dev))
5677                 return;
5678
5679         spin_lock_irq(&power_well->lock);
5680         __intel_set_power_well(dev, power_well->count > 0);
5681         spin_unlock_irq(&power_well->lock);
5682 }
5683
5684 /*
5685  * Starting with Haswell, we have a "Power Down Well" that can be turned off
5686  * when not needed anymore. We have 4 registers that can request the power well
5687  * to be enabled, and it will only be disabled if none of the registers is
5688  * requesting it to be enabled.
5689  */
5690 void intel_init_power_well(struct drm_device *dev)
5691 {
5692         struct drm_i915_private *dev_priv = dev->dev_private;
5693
5694         if (!HAS_POWER_WELL(dev))
5695                 return;
5696
5697         /* For now, we need the power well to be always enabled. */
5698         intel_set_power_well(dev, true);
5699         intel_resume_power_well(dev);
5700
5701         /* We're taking over the BIOS, so clear any requests made by it since
5702          * the driver is in charge now. */
5703         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5704                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5705 }
5706
5707 /* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5708 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5709 {
5710         hsw_disable_package_c8(dev_priv);
5711 }
5712
5713 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5714 {
5715         hsw_enable_package_c8(dev_priv);
5716 }
5717
5718 /* Set up chip specific power management-related functions */
5719 void intel_init_pm(struct drm_device *dev)
5720 {
5721         struct drm_i915_private *dev_priv = dev->dev_private;
5722
5723         if (I915_HAS_FBC(dev)) {
5724                 if (HAS_PCH_SPLIT(dev)) {
5725                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5726                         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5727                                 dev_priv->display.enable_fbc =
5728                                         gen7_enable_fbc;
5729                         else
5730                                 dev_priv->display.enable_fbc =
5731                                         ironlake_enable_fbc;
5732                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
5733                 } else if (IS_GM45(dev)) {
5734                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5735                         dev_priv->display.enable_fbc = g4x_enable_fbc;
5736                         dev_priv->display.disable_fbc = g4x_disable_fbc;
5737                 } else if (IS_CRESTLINE(dev)) {
5738                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5739                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
5740                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
5741                 }
5742                 /* 855GM needs testing */
5743         }
5744
5745         /* For cxsr */
5746         if (IS_PINEVIEW(dev))
5747                 i915_pineview_get_mem_freq(dev);
5748         else if (IS_GEN5(dev))
5749                 i915_ironlake_get_mem_freq(dev);
5750
5751         /* For FIFO watermark updates */
5752         if (HAS_PCH_SPLIT(dev)) {
5753                 intel_setup_wm_latency(dev);
5754
5755                 if (IS_GEN5(dev)) {
5756                         if (dev_priv->wm.pri_latency[1] &&
5757                             dev_priv->wm.spr_latency[1] &&
5758                             dev_priv->wm.cur_latency[1])
5759                                 dev_priv->display.update_wm = ironlake_update_wm;
5760                         else {
5761                                 DRM_DEBUG_KMS("Failed to get proper latency. "
5762                                               "Disable CxSR\n");
5763                                 dev_priv->display.update_wm = NULL;
5764                         }
5765                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5766                 } else if (IS_GEN6(dev)) {
5767                         if (dev_priv->wm.pri_latency[0] &&
5768                             dev_priv->wm.spr_latency[0] &&
5769                             dev_priv->wm.cur_latency[0]) {
5770                                 dev_priv->display.update_wm = sandybridge_update_wm;
5771                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5772                         } else {
5773                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5774                                               "Disable CxSR\n");
5775                                 dev_priv->display.update_wm = NULL;
5776                         }
5777                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5778                 } else if (IS_IVYBRIDGE(dev)) {
5779                         if (dev_priv->wm.pri_latency[0] &&
5780                             dev_priv->wm.spr_latency[0] &&
5781                             dev_priv->wm.cur_latency[0]) {
5782                                 dev_priv->display.update_wm = ivybridge_update_wm;
5783                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5784                         } else {
5785                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5786                                               "Disable CxSR\n");
5787                                 dev_priv->display.update_wm = NULL;
5788                         }
5789                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5790                 } else if (IS_HASWELL(dev)) {
5791                         if (dev_priv->wm.pri_latency[0] &&
5792                             dev_priv->wm.spr_latency[0] &&
5793                             dev_priv->wm.cur_latency[0]) {
5794                                 dev_priv->display.update_wm = haswell_update_wm;
5795                                 dev_priv->display.update_sprite_wm =
5796                                         haswell_update_sprite_wm;
5797                         } else {
5798                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5799                                               "Disable CxSR\n");
5800                                 dev_priv->display.update_wm = NULL;
5801                         }
5802                         dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5803                 } else
5804                         dev_priv->display.update_wm = NULL;
5805         } else if (IS_VALLEYVIEW(dev)) {
5806                 dev_priv->display.update_wm = valleyview_update_wm;
5807                 dev_priv->display.init_clock_gating =
5808                         valleyview_init_clock_gating;
5809         } else if (IS_PINEVIEW(dev)) {
5810                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5811                                             dev_priv->is_ddr3,
5812                                             dev_priv->fsb_freq,
5813                                             dev_priv->mem_freq)) {
5814                         DRM_INFO("failed to find known CxSR latency "
5815                                  "(found ddr%s fsb freq %d, mem freq %d), "
5816                                  "disabling CxSR\n",
5817                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
5818                                  dev_priv->fsb_freq, dev_priv->mem_freq);
5819                         /* Disable CxSR and never update its watermark again */
5820                         pineview_disable_cxsr(dev);
5821                         dev_priv->display.update_wm = NULL;
5822                 } else
5823                         dev_priv->display.update_wm = pineview_update_wm;
5824                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5825         } else if (IS_G4X(dev)) {
5826                 dev_priv->display.update_wm = g4x_update_wm;
5827                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
5828         } else if (IS_GEN4(dev)) {
5829                 dev_priv->display.update_wm = i965_update_wm;
5830                 if (IS_CRESTLINE(dev))
5831                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
5832                 else if (IS_BROADWATER(dev))
5833                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5834         } else if (IS_GEN3(dev)) {
5835                 dev_priv->display.update_wm = i9xx_update_wm;
5836                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5837                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5838         } else if (IS_I865G(dev)) {
5839                 dev_priv->display.update_wm = i830_update_wm;
5840                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5841                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5842         } else if (IS_I85X(dev)) {
5843                 dev_priv->display.update_wm = i9xx_update_wm;
5844                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5845                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5846         } else {
5847                 dev_priv->display.update_wm = i830_update_wm;
5848                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5849                 if (IS_845G(dev))
5850                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
5851                 else
5852                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
5853         }
5854 }
5855
5856 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5857 {
5858         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5859
5860         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5861                 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5862                 return -EAGAIN;
5863         }
5864
5865         I915_WRITE(GEN6_PCODE_DATA, *val);
5866         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5867
5868         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5869                      500)) {
5870                 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5871                 return -ETIMEDOUT;
5872         }
5873
5874         *val = I915_READ(GEN6_PCODE_DATA);
5875         I915_WRITE(GEN6_PCODE_DATA, 0);
5876
5877         return 0;
5878 }
5879
5880 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5881 {
5882         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5883
5884         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5885                 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5886                 return -EAGAIN;
5887         }
5888
5889         I915_WRITE(GEN6_PCODE_DATA, val);
5890         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5891
5892         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5893                      500)) {
5894                 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5895                 return -ETIMEDOUT;
5896         }
5897
5898         I915_WRITE(GEN6_PCODE_DATA, 0);
5899
5900         return 0;
5901 }
5902
5903 int vlv_gpu_freq(int ddr_freq, int val)
5904 {
5905         int mult, base;
5906
5907         switch (ddr_freq) {
5908         case 800:
5909                 mult = 20;
5910                 base = 120;
5911                 break;
5912         case 1066:
5913                 mult = 22;
5914                 base = 133;
5915                 break;
5916         case 1333:
5917                 mult = 21;
5918                 base = 125;
5919                 break;
5920         default:
5921                 return -1;
5922         }
5923
5924         return ((val - 0xbd) * mult) + base;
5925 }
5926
5927 int vlv_freq_opcode(int ddr_freq, int val)
5928 {
5929         int mult, base;
5930
5931         switch (ddr_freq) {
5932         case 800:
5933                 mult = 20;
5934                 base = 120;
5935                 break;
5936         case 1066:
5937                 mult = 22;
5938                 base = 133;
5939                 break;
5940         case 1333:
5941                 mult = 21;
5942                 base = 125;
5943                 break;
5944         default:
5945                 return -1;
5946         }
5947
5948         val /= mult;
5949         val -= base / mult;
5950         val += 0xbd;
5951
5952         if (val > 0xea)
5953                 val = 0xea;
5954
5955         return val;
5956 }
5957
5958 void intel_pm_init(struct drm_device *dev)
5959 {
5960         struct drm_i915_private *dev_priv = dev->dev_private;
5961
5962         INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5963                           intel_gen6_powersave_work);
5964 }
5965