2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
34 #define FORCEWAKE_ACK_TIMEOUT_MS 2
36 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
37 * framebuffer contents in-memory, aiming at reducing the required bandwidth
38 * during in-memory transfers and, therefore, reduce the power packet.
40 * The benefits of FBC are mostly visible with solid backgrounds and
41 * variation-less patterns.
43 * FBC-related functionality can be enabled by the means of the
44 * i915.i915_enable_fbc parameter
47 static void i8xx_disable_fbc(struct drm_device *dev)
49 struct drm_i915_private *dev_priv = dev->dev_private;
52 /* Disable compression */
53 fbc_ctl = I915_READ(FBC_CONTROL);
54 if ((fbc_ctl & FBC_CTL_EN) == 0)
57 fbc_ctl &= ~FBC_CTL_EN;
58 I915_WRITE(FBC_CONTROL, fbc_ctl);
60 /* Wait for compressing bit to clear */
61 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
62 DRM_DEBUG_KMS("FBC idle timed out\n");
66 DRM_DEBUG_KMS("disabled FBC\n");
69 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
71 struct drm_device *dev = crtc->dev;
72 struct drm_i915_private *dev_priv = dev->dev_private;
73 struct drm_framebuffer *fb = crtc->fb;
74 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
75 struct drm_i915_gem_object *obj = intel_fb->obj;
76 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79 u32 fbc_ctl, fbc_ctl2;
81 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
82 if (fb->pitches[0] < cfb_pitch)
83 cfb_pitch = fb->pitches[0];
85 /* FBC_CTL wants 64B units */
86 cfb_pitch = (cfb_pitch / 64) - 1;
87 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
90 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
91 I915_WRITE(FBC_TAG + (i * 4), 0);
94 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
96 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
97 I915_WRITE(FBC_FENCE_OFF, crtc->y);
100 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
102 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
103 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
104 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
105 fbc_ctl |= obj->fence_reg;
106 I915_WRITE(FBC_CONTROL, fbc_ctl);
108 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
109 cfb_pitch, crtc->y, intel_crtc->plane);
112 static bool i8xx_fbc_enabled(struct drm_device *dev)
114 struct drm_i915_private *dev_priv = dev->dev_private;
116 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
119 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
121 struct drm_device *dev = crtc->dev;
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct drm_framebuffer *fb = crtc->fb;
124 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
125 struct drm_i915_gem_object *obj = intel_fb->obj;
126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
127 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
128 unsigned long stall_watermark = 200;
131 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
132 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
133 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
135 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
136 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
137 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
141 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
143 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
146 static void g4x_disable_fbc(struct drm_device *dev)
148 struct drm_i915_private *dev_priv = dev->dev_private;
151 /* Disable compression */
152 dpfc_ctl = I915_READ(DPFC_CONTROL);
153 if (dpfc_ctl & DPFC_CTL_EN) {
154 dpfc_ctl &= ~DPFC_CTL_EN;
155 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
157 DRM_DEBUG_KMS("disabled FBC\n");
161 static bool g4x_fbc_enabled(struct drm_device *dev)
163 struct drm_i915_private *dev_priv = dev->dev_private;
165 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
168 static void sandybridge_blit_fbc_update(struct drm_device *dev)
170 struct drm_i915_private *dev_priv = dev->dev_private;
173 /* Make sure blitter notifies FBC of writes */
174 gen6_gt_force_wake_get(dev_priv);
175 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
176 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
177 GEN6_BLITTER_LOCK_SHIFT;
178 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
179 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
180 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
181 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
182 GEN6_BLITTER_LOCK_SHIFT);
183 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
184 POSTING_READ(GEN6_BLITTER_ECOSKPD);
185 gen6_gt_force_wake_put(dev_priv);
188 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
190 struct drm_device *dev = crtc->dev;
191 struct drm_i915_private *dev_priv = dev->dev_private;
192 struct drm_framebuffer *fb = crtc->fb;
193 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
194 struct drm_i915_gem_object *obj = intel_fb->obj;
195 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
196 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
197 unsigned long stall_watermark = 200;
200 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
201 dpfc_ctl &= DPFC_RESERVED;
202 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
203 /* Set persistent mode for front-buffer rendering, ala X. */
204 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
205 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
206 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
208 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
209 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
210 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
211 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
212 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
214 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
217 I915_WRITE(SNB_DPFC_CTL_SA,
218 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
219 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
220 sandybridge_blit_fbc_update(dev);
223 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
226 static void ironlake_disable_fbc(struct drm_device *dev)
228 struct drm_i915_private *dev_priv = dev->dev_private;
231 /* Disable compression */
232 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
233 if (dpfc_ctl & DPFC_CTL_EN) {
234 dpfc_ctl &= ~DPFC_CTL_EN;
235 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
237 DRM_DEBUG_KMS("disabled FBC\n");
241 static bool ironlake_fbc_enabled(struct drm_device *dev)
243 struct drm_i915_private *dev_priv = dev->dev_private;
245 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
248 bool intel_fbc_enabled(struct drm_device *dev)
250 struct drm_i915_private *dev_priv = dev->dev_private;
252 if (!dev_priv->display.fbc_enabled)
255 return dev_priv->display.fbc_enabled(dev);
258 static void intel_fbc_work_fn(struct work_struct *__work)
260 struct intel_fbc_work *work =
261 container_of(to_delayed_work(__work),
262 struct intel_fbc_work, work);
263 struct drm_device *dev = work->crtc->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private;
266 mutex_lock(&dev->struct_mutex);
267 if (work == dev_priv->fbc_work) {
268 /* Double check that we haven't switched fb without cancelling
271 if (work->crtc->fb == work->fb) {
272 dev_priv->display.enable_fbc(work->crtc,
275 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
276 dev_priv->cfb_fb = work->crtc->fb->base.id;
277 dev_priv->cfb_y = work->crtc->y;
280 dev_priv->fbc_work = NULL;
282 mutex_unlock(&dev->struct_mutex);
287 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
289 if (dev_priv->fbc_work == NULL)
292 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
294 /* Synchronisation is provided by struct_mutex and checking of
295 * dev_priv->fbc_work, so we can perform the cancellation
296 * entirely asynchronously.
298 if (cancel_delayed_work(&dev_priv->fbc_work->work))
299 /* tasklet was killed before being run, clean up */
300 kfree(dev_priv->fbc_work);
302 /* Mark the work as no longer wanted so that if it does
303 * wake-up (because the work was already running and waiting
304 * for our mutex), it will discover that is no longer
307 dev_priv->fbc_work = NULL;
310 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
312 struct intel_fbc_work *work;
313 struct drm_device *dev = crtc->dev;
314 struct drm_i915_private *dev_priv = dev->dev_private;
316 if (!dev_priv->display.enable_fbc)
319 intel_cancel_fbc_work(dev_priv);
321 work = kzalloc(sizeof *work, GFP_KERNEL);
323 dev_priv->display.enable_fbc(crtc, interval);
329 work->interval = interval;
330 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
332 dev_priv->fbc_work = work;
334 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
336 /* Delay the actual enabling to let pageflipping cease and the
337 * display to settle before starting the compression. Note that
338 * this delay also serves a second purpose: it allows for a
339 * vblank to pass after disabling the FBC before we attempt
340 * to modify the control registers.
342 * A more complicated solution would involve tracking vblanks
343 * following the termination of the page-flipping sequence
344 * and indeed performing the enable as a co-routine and not
345 * waiting synchronously upon the vblank.
347 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
350 void intel_disable_fbc(struct drm_device *dev)
352 struct drm_i915_private *dev_priv = dev->dev_private;
354 intel_cancel_fbc_work(dev_priv);
356 if (!dev_priv->display.disable_fbc)
359 dev_priv->display.disable_fbc(dev);
360 dev_priv->cfb_plane = -1;
364 * intel_update_fbc - enable/disable FBC as needed
365 * @dev: the drm_device
367 * Set up the framebuffer compression hardware at mode set time. We
368 * enable it if possible:
369 * - plane A only (on pre-965)
370 * - no pixel mulitply/line duplication
371 * - no alpha buffer discard
373 * - framebuffer <= 2048 in width, 1536 in height
375 * We can't assume that any compression will take place (worst case),
376 * so the compressed buffer has to be the same size as the uncompressed
377 * one. It also must reside (along with the line length buffer) in
380 * We need to enable/disable FBC on a global basis.
382 void intel_update_fbc(struct drm_device *dev)
384 struct drm_i915_private *dev_priv = dev->dev_private;
385 struct drm_crtc *crtc = NULL, *tmp_crtc;
386 struct intel_crtc *intel_crtc;
387 struct drm_framebuffer *fb;
388 struct intel_framebuffer *intel_fb;
389 struct drm_i915_gem_object *obj;
395 if (!I915_HAS_FBC(dev))
399 * If FBC is already on, we just have to verify that we can
400 * keep it that way...
401 * Need to disable if:
402 * - more than one pipe is active
403 * - changing FBC params (stride, fence, mode)
404 * - new fb is too large to fit in compressed buffer
405 * - going to an unsupported config (interlace, pixel multiply, etc.)
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled &&
409 !to_intel_crtc(tmp_crtc)->primary_disabled &&
412 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
413 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
420 if (!crtc || crtc->fb == NULL) {
421 DRM_DEBUG_KMS("no output, disabling\n");
422 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
426 intel_crtc = to_intel_crtc(crtc);
428 intel_fb = to_intel_framebuffer(fb);
431 enable_fbc = i915_enable_fbc;
432 if (enable_fbc < 0) {
433 DRM_DEBUG_KMS("fbc set to per-chip default\n");
435 if (INTEL_INFO(dev)->gen <= 6)
439 DRM_DEBUG_KMS("fbc disabled per module param\n");
440 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
443 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
444 DRM_DEBUG_KMS("framebuffer too large, disabling "
446 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
449 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
450 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
451 DRM_DEBUG_KMS("mode incompatible with compression, "
453 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
456 if ((crtc->mode.hdisplay > 2048) ||
457 (crtc->mode.vdisplay > 1536)) {
458 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
459 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
462 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
463 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
464 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
468 /* The use of a CPU fence is mandatory in order to detect writes
469 * by the CPU to the scanout and trigger updates to the FBC.
471 if (obj->tiling_mode != I915_TILING_X ||
472 obj->fence_reg == I915_FENCE_REG_NONE) {
473 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
474 dev_priv->no_fbc_reason = FBC_NOT_TILED;
478 /* If the kernel debugger is active, always disable compression */
482 /* If the scanout has not changed, don't modify the FBC settings.
483 * Note that we make the fundamental assumption that the fb->obj
484 * cannot be unpinned (and have its GTT offset and fence revoked)
485 * without first being decoupled from the scanout and FBC disabled.
487 if (dev_priv->cfb_plane == intel_crtc->plane &&
488 dev_priv->cfb_fb == fb->base.id &&
489 dev_priv->cfb_y == crtc->y)
492 if (intel_fbc_enabled(dev)) {
493 /* We update FBC along two paths, after changing fb/crtc
494 * configuration (modeswitching) and after page-flipping
495 * finishes. For the latter, we know that not only did
496 * we disable the FBC at the start of the page-flip
497 * sequence, but also more than one vblank has passed.
499 * For the former case of modeswitching, it is possible
500 * to switch between two FBC valid configurations
501 * instantaneously so we do need to disable the FBC
502 * before we can modify its control registers. We also
503 * have to wait for the next vblank for that to take
504 * effect. However, since we delay enabling FBC we can
505 * assume that a vblank has passed since disabling and
506 * that we can safely alter the registers in the deferred
509 * In the scenario that we go from a valid to invalid
510 * and then back to valid FBC configuration we have
511 * no strict enforcement that a vblank occurred since
512 * disabling the FBC. However, along all current pipe
513 * disabling paths we do need to wait for a vblank at
514 * some point. And we wait before enabling FBC anyway.
516 DRM_DEBUG_KMS("disabling active FBC for update\n");
517 intel_disable_fbc(dev);
520 intel_enable_fbc(crtc, 500);
524 /* Multiple disables should be harmless */
525 if (intel_fbc_enabled(dev)) {
526 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
527 intel_disable_fbc(dev);
531 static void i915_pineview_get_mem_freq(struct drm_device *dev)
533 drm_i915_private_t *dev_priv = dev->dev_private;
536 tmp = I915_READ(CLKCFG);
538 switch (tmp & CLKCFG_FSB_MASK) {
540 dev_priv->fsb_freq = 533; /* 133*4 */
543 dev_priv->fsb_freq = 800; /* 200*4 */
546 dev_priv->fsb_freq = 667; /* 167*4 */
549 dev_priv->fsb_freq = 400; /* 100*4 */
553 switch (tmp & CLKCFG_MEM_MASK) {
555 dev_priv->mem_freq = 533;
558 dev_priv->mem_freq = 667;
561 dev_priv->mem_freq = 800;
565 /* detect pineview DDR3 setting */
566 tmp = I915_READ(CSHRDDR3CTL);
567 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
570 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
572 drm_i915_private_t *dev_priv = dev->dev_private;
575 ddrpll = I915_READ16(DDRMPLL1);
576 csipll = I915_READ16(CSIPLL0);
578 switch (ddrpll & 0xff) {
580 dev_priv->mem_freq = 800;
583 dev_priv->mem_freq = 1066;
586 dev_priv->mem_freq = 1333;
589 dev_priv->mem_freq = 1600;
592 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
594 dev_priv->mem_freq = 0;
598 dev_priv->ips.r_t = dev_priv->mem_freq;
600 switch (csipll & 0x3ff) {
602 dev_priv->fsb_freq = 3200;
605 dev_priv->fsb_freq = 3733;
608 dev_priv->fsb_freq = 4266;
611 dev_priv->fsb_freq = 4800;
614 dev_priv->fsb_freq = 5333;
617 dev_priv->fsb_freq = 5866;
620 dev_priv->fsb_freq = 6400;
623 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
625 dev_priv->fsb_freq = 0;
629 if (dev_priv->fsb_freq == 3200) {
630 dev_priv->ips.c_m = 0;
631 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
632 dev_priv->ips.c_m = 1;
634 dev_priv->ips.c_m = 2;
638 static const struct cxsr_latency cxsr_latency_table[] = {
639 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
640 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
641 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
642 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
643 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
645 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
646 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
647 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
648 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
649 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
651 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
652 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
653 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
654 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
655 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
657 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
658 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
659 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
660 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
661 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
663 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
664 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
665 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
666 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
667 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
669 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
670 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
671 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
672 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
673 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
676 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
681 const struct cxsr_latency *latency;
684 if (fsb == 0 || mem == 0)
687 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
688 latency = &cxsr_latency_table[i];
689 if (is_desktop == latency->is_desktop &&
690 is_ddr3 == latency->is_ddr3 &&
691 fsb == latency->fsb_freq && mem == latency->mem_freq)
695 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
700 static void pineview_disable_cxsr(struct drm_device *dev)
702 struct drm_i915_private *dev_priv = dev->dev_private;
704 /* deactivate cxsr */
705 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
709 * Latency for FIFO fetches is dependent on several factors:
710 * - memory configuration (speed, channels)
712 * - current MCH state
713 * It can be fairly high in some situations, so here we assume a fairly
714 * pessimal value. It's a tradeoff between extra memory fetches (if we
715 * set this value too high, the FIFO will fetch frequently to stay full)
716 * and power consumption (set it too low to save power and we might see
717 * FIFO underruns and display "flicker").
719 * A value of 5us seems to be a good balance; safe for very low end
720 * platforms but not overly aggressive on lower latency configs.
722 static const int latency_ns = 5000;
724 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
726 struct drm_i915_private *dev_priv = dev->dev_private;
727 uint32_t dsparb = I915_READ(DSPARB);
730 size = dsparb & 0x7f;
732 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
734 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
735 plane ? "B" : "A", size);
740 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
742 struct drm_i915_private *dev_priv = dev->dev_private;
743 uint32_t dsparb = I915_READ(DSPARB);
746 size = dsparb & 0x1ff;
748 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
749 size >>= 1; /* Convert to cachelines */
751 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
752 plane ? "B" : "A", size);
757 static int i845_get_fifo_size(struct drm_device *dev, int plane)
759 struct drm_i915_private *dev_priv = dev->dev_private;
760 uint32_t dsparb = I915_READ(DSPARB);
763 size = dsparb & 0x7f;
764 size >>= 2; /* Convert to cachelines */
766 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
773 static int i830_get_fifo_size(struct drm_device *dev, int plane)
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 uint32_t dsparb = I915_READ(DSPARB);
779 size = dsparb & 0x7f;
780 size >>= 1; /* Convert to cachelines */
782 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
783 plane ? "B" : "A", size);
788 /* Pineview has different values for various configs */
789 static const struct intel_watermark_params pineview_display_wm = {
790 PINEVIEW_DISPLAY_FIFO,
794 PINEVIEW_FIFO_LINE_SIZE
796 static const struct intel_watermark_params pineview_display_hplloff_wm = {
797 PINEVIEW_DISPLAY_FIFO,
799 PINEVIEW_DFT_HPLLOFF_WM,
801 PINEVIEW_FIFO_LINE_SIZE
803 static const struct intel_watermark_params pineview_cursor_wm = {
804 PINEVIEW_CURSOR_FIFO,
805 PINEVIEW_CURSOR_MAX_WM,
806 PINEVIEW_CURSOR_DFT_WM,
807 PINEVIEW_CURSOR_GUARD_WM,
808 PINEVIEW_FIFO_LINE_SIZE,
810 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
811 PINEVIEW_CURSOR_FIFO,
812 PINEVIEW_CURSOR_MAX_WM,
813 PINEVIEW_CURSOR_DFT_WM,
814 PINEVIEW_CURSOR_GUARD_WM,
815 PINEVIEW_FIFO_LINE_SIZE
817 static const struct intel_watermark_params g4x_wm_info = {
824 static const struct intel_watermark_params g4x_cursor_wm_info = {
831 static const struct intel_watermark_params valleyview_wm_info = {
832 VALLEYVIEW_FIFO_SIZE,
838 static const struct intel_watermark_params valleyview_cursor_wm_info = {
840 VALLEYVIEW_CURSOR_MAX_WM,
845 static const struct intel_watermark_params i965_cursor_wm_info = {
852 static const struct intel_watermark_params i945_wm_info = {
859 static const struct intel_watermark_params i915_wm_info = {
866 static const struct intel_watermark_params i855_wm_info = {
873 static const struct intel_watermark_params i830_wm_info = {
881 static const struct intel_watermark_params ironlake_display_wm_info = {
888 static const struct intel_watermark_params ironlake_cursor_wm_info = {
895 static const struct intel_watermark_params ironlake_display_srwm_info = {
897 ILK_DISPLAY_MAX_SRWM,
898 ILK_DISPLAY_DFT_SRWM,
902 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
910 static const struct intel_watermark_params sandybridge_display_wm_info = {
917 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
924 static const struct intel_watermark_params sandybridge_display_srwm_info = {
926 SNB_DISPLAY_MAX_SRWM,
927 SNB_DISPLAY_DFT_SRWM,
931 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
941 * intel_calculate_wm - calculate watermark level
942 * @clock_in_khz: pixel clock
943 * @wm: chip FIFO params
944 * @pixel_size: display pixel size
945 * @latency_ns: memory latency for the platform
947 * Calculate the watermark level (the level at which the display plane will
948 * start fetching from memory again). Each chip has a different display
949 * FIFO size and allocation, so the caller needs to figure that out and pass
950 * in the correct intel_watermark_params structure.
952 * As the pixel clock runs, the FIFO will be drained at a rate that depends
953 * on the pixel size. When it reaches the watermark level, it'll start
954 * fetching FIFO line sized based chunks from memory until the FIFO fills
955 * past the watermark point. If the FIFO drains completely, a FIFO underrun
956 * will occur, and a display engine hang could result.
958 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
959 const struct intel_watermark_params *wm,
962 unsigned long latency_ns)
964 long entries_required, wm_size;
967 * Note: we need to make sure we don't overflow for various clock &
969 * clocks go from a few thousand to several hundred thousand.
970 * latency is usually a few thousand
972 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
974 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
976 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
978 wm_size = fifo_size - (entries_required + wm->guard_size);
980 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
982 /* Don't promote wm_size to unsigned... */
983 if (wm_size > (long)wm->max_wm)
984 wm_size = wm->max_wm;
986 wm_size = wm->default_wm;
990 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
992 struct drm_crtc *crtc, *enabled = NULL;
994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995 if (crtc->enabled && crtc->fb) {
1005 static void pineview_update_wm(struct drm_device *dev)
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 struct drm_crtc *crtc;
1009 const struct cxsr_latency *latency;
1013 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1014 dev_priv->fsb_freq, dev_priv->mem_freq);
1016 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1017 pineview_disable_cxsr(dev);
1021 crtc = single_enabled_crtc(dev);
1023 int clock = crtc->mode.clock;
1024 int pixel_size = crtc->fb->bits_per_pixel / 8;
1027 wm = intel_calculate_wm(clock, &pineview_display_wm,
1028 pineview_display_wm.fifo_size,
1029 pixel_size, latency->display_sr);
1030 reg = I915_READ(DSPFW1);
1031 reg &= ~DSPFW_SR_MASK;
1032 reg |= wm << DSPFW_SR_SHIFT;
1033 I915_WRITE(DSPFW1, reg);
1034 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1037 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1038 pineview_display_wm.fifo_size,
1039 pixel_size, latency->cursor_sr);
1040 reg = I915_READ(DSPFW3);
1041 reg &= ~DSPFW_CURSOR_SR_MASK;
1042 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1043 I915_WRITE(DSPFW3, reg);
1045 /* Display HPLL off SR */
1046 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1047 pineview_display_hplloff_wm.fifo_size,
1048 pixel_size, latency->display_hpll_disable);
1049 reg = I915_READ(DSPFW3);
1050 reg &= ~DSPFW_HPLL_SR_MASK;
1051 reg |= wm & DSPFW_HPLL_SR_MASK;
1052 I915_WRITE(DSPFW3, reg);
1054 /* cursor HPLL off SR */
1055 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1056 pineview_display_hplloff_wm.fifo_size,
1057 pixel_size, latency->cursor_hpll_disable);
1058 reg = I915_READ(DSPFW3);
1059 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1060 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1061 I915_WRITE(DSPFW3, reg);
1062 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1066 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1067 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1069 pineview_disable_cxsr(dev);
1070 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1074 static bool g4x_compute_wm0(struct drm_device *dev,
1076 const struct intel_watermark_params *display,
1077 int display_latency_ns,
1078 const struct intel_watermark_params *cursor,
1079 int cursor_latency_ns,
1083 struct drm_crtc *crtc;
1084 int htotal, hdisplay, clock, pixel_size;
1085 int line_time_us, line_count;
1086 int entries, tlb_miss;
1088 crtc = intel_get_crtc_for_plane(dev, plane);
1089 if (crtc->fb == NULL || !crtc->enabled) {
1090 *cursor_wm = cursor->guard_size;
1091 *plane_wm = display->guard_size;
1095 htotal = crtc->mode.htotal;
1096 hdisplay = crtc->mode.hdisplay;
1097 clock = crtc->mode.clock;
1098 pixel_size = crtc->fb->bits_per_pixel / 8;
1100 /* Use the small buffer method to calculate plane watermark */
1101 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1102 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1104 entries += tlb_miss;
1105 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1106 *plane_wm = entries + display->guard_size;
1107 if (*plane_wm > (int)display->max_wm)
1108 *plane_wm = display->max_wm;
1110 /* Use the large buffer method to calculate cursor watermark */
1111 line_time_us = ((htotal * 1000) / clock);
1112 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1113 entries = line_count * 64 * pixel_size;
1114 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1116 entries += tlb_miss;
1117 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1118 *cursor_wm = entries + cursor->guard_size;
1119 if (*cursor_wm > (int)cursor->max_wm)
1120 *cursor_wm = (int)cursor->max_wm;
1126 * Check the wm result.
1128 * If any calculated watermark values is larger than the maximum value that
1129 * can be programmed into the associated watermark register, that watermark
1132 static bool g4x_check_srwm(struct drm_device *dev,
1133 int display_wm, int cursor_wm,
1134 const struct intel_watermark_params *display,
1135 const struct intel_watermark_params *cursor)
1137 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1138 display_wm, cursor_wm);
1140 if (display_wm > display->max_wm) {
1141 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1142 display_wm, display->max_wm);
1146 if (cursor_wm > cursor->max_wm) {
1147 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1148 cursor_wm, cursor->max_wm);
1152 if (!(display_wm || cursor_wm)) {
1153 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1160 static bool g4x_compute_srwm(struct drm_device *dev,
1163 const struct intel_watermark_params *display,
1164 const struct intel_watermark_params *cursor,
1165 int *display_wm, int *cursor_wm)
1167 struct drm_crtc *crtc;
1168 int hdisplay, htotal, pixel_size, clock;
1169 unsigned long line_time_us;
1170 int line_count, line_size;
1175 *display_wm = *cursor_wm = 0;
1179 crtc = intel_get_crtc_for_plane(dev, plane);
1180 hdisplay = crtc->mode.hdisplay;
1181 htotal = crtc->mode.htotal;
1182 clock = crtc->mode.clock;
1183 pixel_size = crtc->fb->bits_per_pixel / 8;
1185 line_time_us = (htotal * 1000) / clock;
1186 line_count = (latency_ns / line_time_us + 1000) / 1000;
1187 line_size = hdisplay * pixel_size;
1189 /* Use the minimum of the small and large buffer method for primary */
1190 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1191 large = line_count * line_size;
1193 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1194 *display_wm = entries + display->guard_size;
1196 /* calculate the self-refresh watermark for display cursor */
1197 entries = line_count * pixel_size * 64;
1198 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1199 *cursor_wm = entries + cursor->guard_size;
1201 return g4x_check_srwm(dev,
1202 *display_wm, *cursor_wm,
1206 static bool vlv_compute_drain_latency(struct drm_device *dev,
1208 int *plane_prec_mult,
1210 int *cursor_prec_mult,
1213 struct drm_crtc *crtc;
1214 int clock, pixel_size;
1217 crtc = intel_get_crtc_for_plane(dev, plane);
1218 if (crtc->fb == NULL || !crtc->enabled)
1221 clock = crtc->mode.clock; /* VESA DOT Clock */
1222 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1224 entries = (clock / 1000) * pixel_size;
1225 *plane_prec_mult = (entries > 256) ?
1226 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1227 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1230 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1231 *cursor_prec_mult = (entries > 256) ?
1232 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1233 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1239 * Update drain latency registers of memory arbiter
1241 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1242 * to be programmed. Each plane has a drain latency multiplier and a drain
1246 static void vlv_update_drain_latency(struct drm_device *dev)
1248 struct drm_i915_private *dev_priv = dev->dev_private;
1249 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1250 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1251 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1254 /* For plane A, Cursor A */
1255 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1256 &cursor_prec_mult, &cursora_dl)) {
1257 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1258 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1259 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1260 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1262 I915_WRITE(VLV_DDL1, cursora_prec |
1263 (cursora_dl << DDL_CURSORA_SHIFT) |
1264 planea_prec | planea_dl);
1267 /* For plane B, Cursor B */
1268 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1269 &cursor_prec_mult, &cursorb_dl)) {
1270 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1272 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1273 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1275 I915_WRITE(VLV_DDL2, cursorb_prec |
1276 (cursorb_dl << DDL_CURSORB_SHIFT) |
1277 planeb_prec | planeb_dl);
1281 #define single_plane_enabled(mask) is_power_of_2(mask)
1283 static void valleyview_update_wm(struct drm_device *dev)
1285 static const int sr_latency_ns = 12000;
1286 struct drm_i915_private *dev_priv = dev->dev_private;
1287 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1288 int plane_sr, cursor_sr;
1289 unsigned int enabled = 0;
1291 vlv_update_drain_latency(dev);
1293 if (g4x_compute_wm0(dev, 0,
1294 &valleyview_wm_info, latency_ns,
1295 &valleyview_cursor_wm_info, latency_ns,
1296 &planea_wm, &cursora_wm))
1299 if (g4x_compute_wm0(dev, 1,
1300 &valleyview_wm_info, latency_ns,
1301 &valleyview_cursor_wm_info, latency_ns,
1302 &planeb_wm, &cursorb_wm))
1305 plane_sr = cursor_sr = 0;
1306 if (single_plane_enabled(enabled) &&
1307 g4x_compute_srwm(dev, ffs(enabled) - 1,
1309 &valleyview_wm_info,
1310 &valleyview_cursor_wm_info,
1311 &plane_sr, &cursor_sr))
1312 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1314 I915_WRITE(FW_BLC_SELF_VLV,
1315 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1317 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1318 planea_wm, cursora_wm,
1319 planeb_wm, cursorb_wm,
1320 plane_sr, cursor_sr);
1323 (plane_sr << DSPFW_SR_SHIFT) |
1324 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1325 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1328 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1329 (cursora_wm << DSPFW_CURSORA_SHIFT));
1331 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1334 static void g4x_update_wm(struct drm_device *dev)
1336 static const int sr_latency_ns = 12000;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1339 int plane_sr, cursor_sr;
1340 unsigned int enabled = 0;
1342 if (g4x_compute_wm0(dev, 0,
1343 &g4x_wm_info, latency_ns,
1344 &g4x_cursor_wm_info, latency_ns,
1345 &planea_wm, &cursora_wm))
1348 if (g4x_compute_wm0(dev, 1,
1349 &g4x_wm_info, latency_ns,
1350 &g4x_cursor_wm_info, latency_ns,
1351 &planeb_wm, &cursorb_wm))
1354 plane_sr = cursor_sr = 0;
1355 if (single_plane_enabled(enabled) &&
1356 g4x_compute_srwm(dev, ffs(enabled) - 1,
1359 &g4x_cursor_wm_info,
1360 &plane_sr, &cursor_sr))
1361 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1363 I915_WRITE(FW_BLC_SELF,
1364 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1366 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1367 planea_wm, cursora_wm,
1368 planeb_wm, cursorb_wm,
1369 plane_sr, cursor_sr);
1372 (plane_sr << DSPFW_SR_SHIFT) |
1373 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1374 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1377 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1378 (cursora_wm << DSPFW_CURSORA_SHIFT));
1379 /* HPLL off in SR has some issues on G4x... disable it */
1381 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1382 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1385 static void i965_update_wm(struct drm_device *dev)
1387 struct drm_i915_private *dev_priv = dev->dev_private;
1388 struct drm_crtc *crtc;
1392 /* Calc sr entries for one plane configs */
1393 crtc = single_enabled_crtc(dev);
1395 /* self-refresh has much higher latency */
1396 static const int sr_latency_ns = 12000;
1397 int clock = crtc->mode.clock;
1398 int htotal = crtc->mode.htotal;
1399 int hdisplay = crtc->mode.hdisplay;
1400 int pixel_size = crtc->fb->bits_per_pixel / 8;
1401 unsigned long line_time_us;
1404 line_time_us = ((htotal * 1000) / clock);
1406 /* Use ns/us then divide to preserve precision */
1407 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1408 pixel_size * hdisplay;
1409 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1410 srwm = I965_FIFO_SIZE - entries;
1414 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1417 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1419 entries = DIV_ROUND_UP(entries,
1420 i965_cursor_wm_info.cacheline_size);
1421 cursor_sr = i965_cursor_wm_info.fifo_size -
1422 (entries + i965_cursor_wm_info.guard_size);
1424 if (cursor_sr > i965_cursor_wm_info.max_wm)
1425 cursor_sr = i965_cursor_wm_info.max_wm;
1427 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1428 "cursor %d\n", srwm, cursor_sr);
1430 if (IS_CRESTLINE(dev))
1431 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1433 /* Turn off self refresh if both pipes are enabled */
1434 if (IS_CRESTLINE(dev))
1435 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1439 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1442 /* 965 has limitations... */
1443 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1444 (8 << 16) | (8 << 8) | (8 << 0));
1445 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1446 /* update cursor SR watermark */
1447 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1450 static void i9xx_update_wm(struct drm_device *dev)
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453 const struct intel_watermark_params *wm_info;
1458 int planea_wm, planeb_wm;
1459 struct drm_crtc *crtc, *enabled = NULL;
1462 wm_info = &i945_wm_info;
1463 else if (!IS_GEN2(dev))
1464 wm_info = &i915_wm_info;
1466 wm_info = &i855_wm_info;
1468 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1469 crtc = intel_get_crtc_for_plane(dev, 0);
1470 if (crtc->enabled && crtc->fb) {
1471 int cpp = crtc->fb->bits_per_pixel / 8;
1475 planea_wm = intel_calculate_wm(crtc->mode.clock,
1476 wm_info, fifo_size, cpp,
1480 planea_wm = fifo_size - wm_info->guard_size;
1482 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1483 crtc = intel_get_crtc_for_plane(dev, 1);
1484 if (crtc->enabled && crtc->fb) {
1485 int cpp = crtc->fb->bits_per_pixel / 8;
1489 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1490 wm_info, fifo_size, cpp,
1492 if (enabled == NULL)
1497 planeb_wm = fifo_size - wm_info->guard_size;
1499 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1502 * Overlay gets an aggressive default since video jitter is bad.
1506 /* Play safe and disable self-refresh before adjusting watermarks. */
1507 if (IS_I945G(dev) || IS_I945GM(dev))
1508 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1509 else if (IS_I915GM(dev))
1510 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1512 /* Calc sr entries for one plane configs */
1513 if (HAS_FW_BLC(dev) && enabled) {
1514 /* self-refresh has much higher latency */
1515 static const int sr_latency_ns = 6000;
1516 int clock = enabled->mode.clock;
1517 int htotal = enabled->mode.htotal;
1518 int hdisplay = enabled->mode.hdisplay;
1519 int pixel_size = enabled->fb->bits_per_pixel / 8;
1520 unsigned long line_time_us;
1523 line_time_us = (htotal * 1000) / clock;
1525 /* Use ns/us then divide to preserve precision */
1526 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1527 pixel_size * hdisplay;
1528 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1529 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1530 srwm = wm_info->fifo_size - entries;
1534 if (IS_I945G(dev) || IS_I945GM(dev))
1535 I915_WRITE(FW_BLC_SELF,
1536 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1537 else if (IS_I915GM(dev))
1538 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1541 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1542 planea_wm, planeb_wm, cwm, srwm);
1544 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1545 fwater_hi = (cwm & 0x1f);
1547 /* Set request length to 8 cachelines per fetch */
1548 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1549 fwater_hi = fwater_hi | (1 << 8);
1551 I915_WRITE(FW_BLC, fwater_lo);
1552 I915_WRITE(FW_BLC2, fwater_hi);
1554 if (HAS_FW_BLC(dev)) {
1556 if (IS_I945G(dev) || IS_I945GM(dev))
1557 I915_WRITE(FW_BLC_SELF,
1558 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1559 else if (IS_I915GM(dev))
1560 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1561 DRM_DEBUG_KMS("memory self refresh enabled\n");
1563 DRM_DEBUG_KMS("memory self refresh disabled\n");
1567 static void i830_update_wm(struct drm_device *dev)
1569 struct drm_i915_private *dev_priv = dev->dev_private;
1570 struct drm_crtc *crtc;
1574 crtc = single_enabled_crtc(dev);
1578 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1579 dev_priv->display.get_fifo_size(dev, 0),
1581 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1582 fwater_lo |= (3<<8) | planea_wm;
1584 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1586 I915_WRITE(FW_BLC, fwater_lo);
1589 #define ILK_LP0_PLANE_LATENCY 700
1590 #define ILK_LP0_CURSOR_LATENCY 1300
1593 * Check the wm result.
1595 * If any calculated watermark values is larger than the maximum value that
1596 * can be programmed into the associated watermark register, that watermark
1599 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1600 int fbc_wm, int display_wm, int cursor_wm,
1601 const struct intel_watermark_params *display,
1602 const struct intel_watermark_params *cursor)
1604 struct drm_i915_private *dev_priv = dev->dev_private;
1606 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1607 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1609 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1610 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1611 fbc_wm, SNB_FBC_MAX_SRWM, level);
1613 /* fbc has it's own way to disable FBC WM */
1614 I915_WRITE(DISP_ARB_CTL,
1615 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1619 if (display_wm > display->max_wm) {
1620 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1621 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1625 if (cursor_wm > cursor->max_wm) {
1626 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1627 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1631 if (!(fbc_wm || display_wm || cursor_wm)) {
1632 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1640 * Compute watermark values of WM[1-3],
1642 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1644 const struct intel_watermark_params *display,
1645 const struct intel_watermark_params *cursor,
1646 int *fbc_wm, int *display_wm, int *cursor_wm)
1648 struct drm_crtc *crtc;
1649 unsigned long line_time_us;
1650 int hdisplay, htotal, pixel_size, clock;
1651 int line_count, line_size;
1656 *fbc_wm = *display_wm = *cursor_wm = 0;
1660 crtc = intel_get_crtc_for_plane(dev, plane);
1661 hdisplay = crtc->mode.hdisplay;
1662 htotal = crtc->mode.htotal;
1663 clock = crtc->mode.clock;
1664 pixel_size = crtc->fb->bits_per_pixel / 8;
1666 line_time_us = (htotal * 1000) / clock;
1667 line_count = (latency_ns / line_time_us + 1000) / 1000;
1668 line_size = hdisplay * pixel_size;
1670 /* Use the minimum of the small and large buffer method for primary */
1671 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1672 large = line_count * line_size;
1674 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1675 *display_wm = entries + display->guard_size;
1679 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1681 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1683 /* calculate the self-refresh watermark for display cursor */
1684 entries = line_count * pixel_size * 64;
1685 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1686 *cursor_wm = entries + cursor->guard_size;
1688 return ironlake_check_srwm(dev, level,
1689 *fbc_wm, *display_wm, *cursor_wm,
1693 static void ironlake_update_wm(struct drm_device *dev)
1695 struct drm_i915_private *dev_priv = dev->dev_private;
1696 int fbc_wm, plane_wm, cursor_wm;
1697 unsigned int enabled;
1700 if (g4x_compute_wm0(dev, 0,
1701 &ironlake_display_wm_info,
1702 ILK_LP0_PLANE_LATENCY,
1703 &ironlake_cursor_wm_info,
1704 ILK_LP0_CURSOR_LATENCY,
1705 &plane_wm, &cursor_wm)) {
1706 I915_WRITE(WM0_PIPEA_ILK,
1707 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1708 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1709 " plane %d, " "cursor: %d\n",
1710 plane_wm, cursor_wm);
1714 if (g4x_compute_wm0(dev, 1,
1715 &ironlake_display_wm_info,
1716 ILK_LP0_PLANE_LATENCY,
1717 &ironlake_cursor_wm_info,
1718 ILK_LP0_CURSOR_LATENCY,
1719 &plane_wm, &cursor_wm)) {
1720 I915_WRITE(WM0_PIPEB_ILK,
1721 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1722 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1723 " plane %d, cursor: %d\n",
1724 plane_wm, cursor_wm);
1729 * Calculate and update the self-refresh watermark only when one
1730 * display plane is used.
1732 I915_WRITE(WM3_LP_ILK, 0);
1733 I915_WRITE(WM2_LP_ILK, 0);
1734 I915_WRITE(WM1_LP_ILK, 0);
1736 if (!single_plane_enabled(enabled))
1738 enabled = ffs(enabled) - 1;
1741 if (!ironlake_compute_srwm(dev, 1, enabled,
1742 ILK_READ_WM1_LATENCY() * 500,
1743 &ironlake_display_srwm_info,
1744 &ironlake_cursor_srwm_info,
1745 &fbc_wm, &plane_wm, &cursor_wm))
1748 I915_WRITE(WM1_LP_ILK,
1750 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1751 (fbc_wm << WM1_LP_FBC_SHIFT) |
1752 (plane_wm << WM1_LP_SR_SHIFT) |
1756 if (!ironlake_compute_srwm(dev, 2, enabled,
1757 ILK_READ_WM2_LATENCY() * 500,
1758 &ironlake_display_srwm_info,
1759 &ironlake_cursor_srwm_info,
1760 &fbc_wm, &plane_wm, &cursor_wm))
1763 I915_WRITE(WM2_LP_ILK,
1765 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1766 (fbc_wm << WM1_LP_FBC_SHIFT) |
1767 (plane_wm << WM1_LP_SR_SHIFT) |
1771 * WM3 is unsupported on ILK, probably because we don't have latency
1772 * data for that power state
1776 static void sandybridge_update_wm(struct drm_device *dev)
1778 struct drm_i915_private *dev_priv = dev->dev_private;
1779 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1781 int fbc_wm, plane_wm, cursor_wm;
1782 unsigned int enabled;
1785 if (g4x_compute_wm0(dev, 0,
1786 &sandybridge_display_wm_info, latency,
1787 &sandybridge_cursor_wm_info, latency,
1788 &plane_wm, &cursor_wm)) {
1789 val = I915_READ(WM0_PIPEA_ILK);
1790 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1791 I915_WRITE(WM0_PIPEA_ILK, val |
1792 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1793 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1794 " plane %d, " "cursor: %d\n",
1795 plane_wm, cursor_wm);
1799 if (g4x_compute_wm0(dev, 1,
1800 &sandybridge_display_wm_info, latency,
1801 &sandybridge_cursor_wm_info, latency,
1802 &plane_wm, &cursor_wm)) {
1803 val = I915_READ(WM0_PIPEB_ILK);
1804 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1805 I915_WRITE(WM0_PIPEB_ILK, val |
1806 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1807 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1808 " plane %d, cursor: %d\n",
1809 plane_wm, cursor_wm);
1813 if ((dev_priv->num_pipe == 3) &&
1814 g4x_compute_wm0(dev, 2,
1815 &sandybridge_display_wm_info, latency,
1816 &sandybridge_cursor_wm_info, latency,
1817 &plane_wm, &cursor_wm)) {
1818 val = I915_READ(WM0_PIPEC_IVB);
1819 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1820 I915_WRITE(WM0_PIPEC_IVB, val |
1821 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1822 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1823 " plane %d, cursor: %d\n",
1824 plane_wm, cursor_wm);
1829 * Calculate and update the self-refresh watermark only when one
1830 * display plane is used.
1832 * SNB support 3 levels of watermark.
1834 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1835 * and disabled in the descending order
1838 I915_WRITE(WM3_LP_ILK, 0);
1839 I915_WRITE(WM2_LP_ILK, 0);
1840 I915_WRITE(WM1_LP_ILK, 0);
1842 if (!single_plane_enabled(enabled) ||
1843 dev_priv->sprite_scaling_enabled)
1845 enabled = ffs(enabled) - 1;
1848 if (!ironlake_compute_srwm(dev, 1, enabled,
1849 SNB_READ_WM1_LATENCY() * 500,
1850 &sandybridge_display_srwm_info,
1851 &sandybridge_cursor_srwm_info,
1852 &fbc_wm, &plane_wm, &cursor_wm))
1855 I915_WRITE(WM1_LP_ILK,
1857 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1858 (fbc_wm << WM1_LP_FBC_SHIFT) |
1859 (plane_wm << WM1_LP_SR_SHIFT) |
1863 if (!ironlake_compute_srwm(dev, 2, enabled,
1864 SNB_READ_WM2_LATENCY() * 500,
1865 &sandybridge_display_srwm_info,
1866 &sandybridge_cursor_srwm_info,
1867 &fbc_wm, &plane_wm, &cursor_wm))
1870 I915_WRITE(WM2_LP_ILK,
1872 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1873 (fbc_wm << WM1_LP_FBC_SHIFT) |
1874 (plane_wm << WM1_LP_SR_SHIFT) |
1878 if (!ironlake_compute_srwm(dev, 3, enabled,
1879 SNB_READ_WM3_LATENCY() * 500,
1880 &sandybridge_display_srwm_info,
1881 &sandybridge_cursor_srwm_info,
1882 &fbc_wm, &plane_wm, &cursor_wm))
1885 I915_WRITE(WM3_LP_ILK,
1887 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1888 (fbc_wm << WM1_LP_FBC_SHIFT) |
1889 (plane_wm << WM1_LP_SR_SHIFT) |
1894 haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1895 struct drm_display_mode *mode)
1897 struct drm_i915_private *dev_priv = dev->dev_private;
1900 temp = I915_READ(PIPE_WM_LINETIME(pipe));
1901 temp &= ~PIPE_WM_LINETIME_MASK;
1903 /* The WM are computed with base on how long it takes to fill a single
1904 * row at the given clock rate, multiplied by 8.
1906 temp |= PIPE_WM_LINETIME_TIME(
1907 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1909 /* IPS watermarks are only used by pipe A, and are ignored by
1910 * pipes B and C. They are calculated similarly to the common
1911 * linetime values, except that we are using CD clock frequency
1912 * in MHz instead of pixel rate for the division.
1914 * This is a placeholder for the IPS watermark calculation code.
1917 I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1921 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1922 uint32_t sprite_width, int pixel_size,
1923 const struct intel_watermark_params *display,
1924 int display_latency_ns, int *sprite_wm)
1926 struct drm_crtc *crtc;
1928 int entries, tlb_miss;
1930 crtc = intel_get_crtc_for_plane(dev, plane);
1931 if (crtc->fb == NULL || !crtc->enabled) {
1932 *sprite_wm = display->guard_size;
1936 clock = crtc->mode.clock;
1938 /* Use the small buffer method to calculate the sprite watermark */
1939 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1940 tlb_miss = display->fifo_size*display->cacheline_size -
1943 entries += tlb_miss;
1944 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1945 *sprite_wm = entries + display->guard_size;
1946 if (*sprite_wm > (int)display->max_wm)
1947 *sprite_wm = display->max_wm;
1953 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1954 uint32_t sprite_width, int pixel_size,
1955 const struct intel_watermark_params *display,
1956 int latency_ns, int *sprite_wm)
1958 struct drm_crtc *crtc;
1959 unsigned long line_time_us;
1961 int line_count, line_size;
1970 crtc = intel_get_crtc_for_plane(dev, plane);
1971 clock = crtc->mode.clock;
1977 line_time_us = (sprite_width * 1000) / clock;
1978 if (!line_time_us) {
1983 line_count = (latency_ns / line_time_us + 1000) / 1000;
1984 line_size = sprite_width * pixel_size;
1986 /* Use the minimum of the small and large buffer method for primary */
1987 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1988 large = line_count * line_size;
1990 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1991 *sprite_wm = entries + display->guard_size;
1993 return *sprite_wm > 0x3ff ? false : true;
1996 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1997 uint32_t sprite_width, int pixel_size)
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
2007 reg = WM0_PIPEA_ILK;
2010 reg = WM0_PIPEB_ILK;
2013 reg = WM0_PIPEC_IVB;
2016 return; /* bad pipe */
2019 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2020 &sandybridge_display_wm_info,
2021 latency, &sprite_wm);
2023 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2028 val = I915_READ(reg);
2029 val &= ~WM0_PIPE_SPRITE_MASK;
2030 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2031 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2034 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2036 &sandybridge_display_srwm_info,
2037 SNB_READ_WM1_LATENCY() * 500,
2040 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2044 I915_WRITE(WM1S_LP_ILK, sprite_wm);
2046 /* Only IVB has two more LP watermarks for sprite */
2047 if (!IS_IVYBRIDGE(dev))
2050 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2052 &sandybridge_display_srwm_info,
2053 SNB_READ_WM2_LATENCY() * 500,
2056 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2060 I915_WRITE(WM2S_LP_IVB, sprite_wm);
2062 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2064 &sandybridge_display_srwm_info,
2065 SNB_READ_WM3_LATENCY() * 500,
2068 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2072 I915_WRITE(WM3S_LP_IVB, sprite_wm);
2076 * intel_update_watermarks - update FIFO watermark values based on current modes
2078 * Calculate watermark values for the various WM regs based on current mode
2079 * and plane configuration.
2081 * There are several cases to deal with here:
2082 * - normal (i.e. non-self-refresh)
2083 * - self-refresh (SR) mode
2084 * - lines are large relative to FIFO size (buffer can hold up to 2)
2085 * - lines are small relative to FIFO size (buffer can hold more than 2
2086 * lines), so need to account for TLB latency
2088 * The normal calculation is:
2089 * watermark = dotclock * bytes per pixel * latency
2090 * where latency is platform & configuration dependent (we assume pessimal
2093 * The SR calculation is:
2094 * watermark = (trunc(latency/line time)+1) * surface width *
2097 * line time = htotal / dotclock
2098 * surface width = hdisplay for normal plane and 64 for cursor
2099 * and latency is assumed to be high, as above.
2101 * The final value programmed to the register should always be rounded up,
2102 * and include an extra 2 entries to account for clock crossings.
2104 * We don't use the sprite, so we can ignore that. And on Crestline we have
2105 * to set the non-SR watermarks to 8.
2107 void intel_update_watermarks(struct drm_device *dev)
2109 struct drm_i915_private *dev_priv = dev->dev_private;
2111 if (dev_priv->display.update_wm)
2112 dev_priv->display.update_wm(dev);
2115 void intel_update_linetime_watermarks(struct drm_device *dev,
2116 int pipe, struct drm_display_mode *mode)
2118 struct drm_i915_private *dev_priv = dev->dev_private;
2120 if (dev_priv->display.update_linetime_wm)
2121 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2124 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2125 uint32_t sprite_width, int pixel_size)
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2129 if (dev_priv->display.update_sprite_wm)
2130 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2134 static struct drm_i915_gem_object *
2135 intel_alloc_context_page(struct drm_device *dev)
2137 struct drm_i915_gem_object *ctx;
2140 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2142 ctx = i915_gem_alloc_object(dev, 4096);
2144 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2148 ret = i915_gem_object_pin(ctx, 4096, true, false);
2150 DRM_ERROR("failed to pin power context: %d\n", ret);
2154 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2156 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2163 i915_gem_object_unpin(ctx);
2165 drm_gem_object_unreference(&ctx->base);
2166 mutex_unlock(&dev->struct_mutex);
2171 * Lock protecting IPS related data structures
2173 DEFINE_SPINLOCK(mchdev_lock);
2175 /* Global for IPS driver to get at the current i915 device. Protected by
2177 static struct drm_i915_private *i915_mch_dev;
2179 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2181 struct drm_i915_private *dev_priv = dev->dev_private;
2184 assert_spin_locked(&mchdev_lock);
2186 rgvswctl = I915_READ16(MEMSWCTL);
2187 if (rgvswctl & MEMCTL_CMD_STS) {
2188 DRM_DEBUG("gpu busy, RCS change rejected\n");
2189 return false; /* still busy with another command */
2192 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2193 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2194 I915_WRITE16(MEMSWCTL, rgvswctl);
2195 POSTING_READ16(MEMSWCTL);
2197 rgvswctl |= MEMCTL_CMD_STS;
2198 I915_WRITE16(MEMSWCTL, rgvswctl);
2203 static void ironlake_enable_drps(struct drm_device *dev)
2205 struct drm_i915_private *dev_priv = dev->dev_private;
2206 u32 rgvmodectl = I915_READ(MEMMODECTL);
2207 u8 fmax, fmin, fstart, vstart;
2209 spin_lock_irq(&mchdev_lock);
2211 /* Enable temp reporting */
2212 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2213 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2215 /* 100ms RC evaluation intervals */
2216 I915_WRITE(RCUPEI, 100000);
2217 I915_WRITE(RCDNEI, 100000);
2219 /* Set max/min thresholds to 90ms and 80ms respectively */
2220 I915_WRITE(RCBMAXAVG, 90000);
2221 I915_WRITE(RCBMINAVG, 80000);
2223 I915_WRITE(MEMIHYST, 1);
2225 /* Set up min, max, and cur for interrupt handling */
2226 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2227 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2228 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2229 MEMMODE_FSTART_SHIFT;
2231 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2234 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2235 dev_priv->ips.fstart = fstart;
2237 dev_priv->ips.max_delay = fstart;
2238 dev_priv->ips.min_delay = fmin;
2239 dev_priv->ips.cur_delay = fstart;
2241 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2242 fmax, fmin, fstart);
2244 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2247 * Interrupts will be enabled in ironlake_irq_postinstall
2250 I915_WRITE(VIDSTART, vstart);
2251 POSTING_READ(VIDSTART);
2253 rgvmodectl |= MEMMODE_SWMODE_EN;
2254 I915_WRITE(MEMMODECTL, rgvmodectl);
2256 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2257 DRM_ERROR("stuck trying to change perf mode\n");
2260 ironlake_set_drps(dev, fstart);
2262 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2264 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2265 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2266 getrawmonotonic(&dev_priv->ips.last_time2);
2268 spin_unlock_irq(&mchdev_lock);
2271 static void ironlake_disable_drps(struct drm_device *dev)
2273 struct drm_i915_private *dev_priv = dev->dev_private;
2276 spin_lock_irq(&mchdev_lock);
2278 rgvswctl = I915_READ16(MEMSWCTL);
2280 /* Ack interrupts, disable EFC interrupt */
2281 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2282 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2283 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2284 I915_WRITE(DEIIR, DE_PCU_EVENT);
2285 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2287 /* Go back to the starting frequency */
2288 ironlake_set_drps(dev, dev_priv->ips.fstart);
2290 rgvswctl |= MEMCTL_CMD_STS;
2291 I915_WRITE(MEMSWCTL, rgvswctl);
2294 spin_unlock_irq(&mchdev_lock);
2297 /* There's a funny hw issue where the hw returns all 0 when reading from
2298 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2299 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2300 * all limits and the gpu stuck at whatever frequency it is at atm).
2302 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2308 if (*val >= dev_priv->rps.max_delay)
2309 *val = dev_priv->rps.max_delay;
2310 limits |= dev_priv->rps.max_delay << 24;
2312 /* Only set the down limit when we've reached the lowest level to avoid
2313 * getting more interrupts, otherwise leave this clear. This prevents a
2314 * race in the hw when coming out of rc6: There's a tiny window where
2315 * the hw runs at the minimal clock before selecting the desired
2316 * frequency, if the down threshold expires in that window we will not
2317 * receive a down interrupt. */
2318 if (*val <= dev_priv->rps.min_delay) {
2319 *val = dev_priv->rps.min_delay;
2320 limits |= dev_priv->rps.min_delay << 16;
2326 void gen6_set_rps(struct drm_device *dev, u8 val)
2328 struct drm_i915_private *dev_priv = dev->dev_private;
2329 u32 limits = gen6_rps_limits(dev_priv, &val);
2331 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2332 WARN_ON(val > dev_priv->rps.max_delay);
2333 WARN_ON(val < dev_priv->rps.min_delay);
2335 if (val == dev_priv->rps.cur_delay)
2338 I915_WRITE(GEN6_RPNSWREQ,
2339 GEN6_FREQUENCY(val) |
2341 GEN6_AGGRESSIVE_TURBO);
2343 /* Make sure we continue to get interrupts
2344 * until we hit the minimum or maximum frequencies.
2346 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2348 POSTING_READ(GEN6_RPNSWREQ);
2350 dev_priv->rps.cur_delay = val;
2352 trace_intel_gpu_freq_change(val * 50);
2355 static void gen6_disable_rps(struct drm_device *dev)
2357 struct drm_i915_private *dev_priv = dev->dev_private;
2359 I915_WRITE(GEN6_RC_CONTROL, 0);
2360 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2361 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2362 I915_WRITE(GEN6_PMIER, 0);
2363 /* Complete PM interrupt masking here doesn't race with the rps work
2364 * item again unmasking PM interrupts because that is using a different
2365 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2366 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2368 spin_lock_irq(&dev_priv->rps.lock);
2369 dev_priv->rps.pm_iir = 0;
2370 spin_unlock_irq(&dev_priv->rps.lock);
2372 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2375 int intel_enable_rc6(const struct drm_device *dev)
2377 /* Respect the kernel parameter if it is set */
2378 if (i915_enable_rc6 >= 0)
2379 return i915_enable_rc6;
2381 if (INTEL_INFO(dev)->gen == 5) {
2382 #ifdef CONFIG_INTEL_IOMMU
2383 /* Disable rc6 on ilk if VT-d is on. */
2384 if (intel_iommu_gfx_mapped)
2387 DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
2388 return INTEL_RC6_ENABLE;
2391 if (IS_HASWELL(dev)) {
2392 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2393 return INTEL_RC6_ENABLE;
2396 /* snb/ivb have more than one rc6 state. */
2397 if (INTEL_INFO(dev)->gen == 6) {
2398 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2399 return INTEL_RC6_ENABLE;
2402 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2403 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2406 static void gen6_enable_rps(struct drm_device *dev)
2408 struct drm_i915_private *dev_priv = dev->dev_private;
2409 struct intel_ring_buffer *ring;
2412 u32 rc6vids, pcu_mbox, rc6_mask = 0;
2417 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2419 /* Here begins a magic sequence of register writes to enable
2420 * auto-downclocking.
2422 * Perhaps there might be some value in exposing these to
2425 I915_WRITE(GEN6_RC_STATE, 0);
2427 /* Clear the DBG now so we don't confuse earlier errors */
2428 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2429 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2430 I915_WRITE(GTFIFODBG, gtfifodbg);
2433 gen6_gt_force_wake_get(dev_priv);
2435 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2436 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2438 /* In units of 100MHz */
2439 dev_priv->rps.max_delay = rp_state_cap & 0xff;
2440 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2441 dev_priv->rps.cur_delay = 0;
2443 /* disable the counters and set deterministic thresholds */
2444 I915_WRITE(GEN6_RC_CONTROL, 0);
2446 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2447 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2448 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2449 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2450 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2452 for_each_ring(ring, dev_priv, i)
2453 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2455 I915_WRITE(GEN6_RC_SLEEP, 0);
2456 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2457 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2458 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2459 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2461 /* Check if we are enabling RC6 */
2462 rc6_mode = intel_enable_rc6(dev_priv->dev);
2463 if (rc6_mode & INTEL_RC6_ENABLE)
2464 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2466 /* We don't use those on Haswell */
2467 if (!IS_HASWELL(dev)) {
2468 if (rc6_mode & INTEL_RC6p_ENABLE)
2469 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2471 if (rc6_mode & INTEL_RC6pp_ENABLE)
2472 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2475 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2476 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
2477 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
2478 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
2480 I915_WRITE(GEN6_RC_CONTROL,
2482 GEN6_RC_CTL_EI_MODE(1) |
2483 GEN6_RC_CTL_HW_ENABLE);
2485 I915_WRITE(GEN6_RPNSWREQ,
2486 GEN6_FREQUENCY(10) |
2488 GEN6_AGGRESSIVE_TURBO);
2489 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2490 GEN6_FREQUENCY(12));
2492 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2493 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2494 dev_priv->rps.max_delay << 24 |
2495 dev_priv->rps.min_delay << 16);
2497 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2498 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
2499 I915_WRITE(GEN6_RP_UP_EI, 66000);
2500 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2502 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2503 I915_WRITE(GEN6_RP_CONTROL,
2504 GEN6_RP_MEDIA_TURBO |
2505 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2506 GEN6_RP_MEDIA_IS_GFX |
2508 GEN6_RP_UP_BUSY_AVG |
2509 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2511 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2514 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2515 if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2516 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2517 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2520 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2523 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2525 /* requires MSI enabled */
2526 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
2527 spin_lock_irq(&dev_priv->rps.lock);
2528 WARN_ON(dev_priv->rps.pm_iir != 0);
2529 I915_WRITE(GEN6_PMIMR, 0);
2530 spin_unlock_irq(&dev_priv->rps.lock);
2531 /* enable all PM interrupts */
2532 I915_WRITE(GEN6_PMINTRMSK, 0);
2535 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
2536 if (IS_GEN6(dev) && ret) {
2537 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
2538 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
2539 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
2540 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
2541 rc6vids &= 0xffff00;
2542 rc6vids |= GEN6_ENCODE_RC6_VID(450);
2543 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
2545 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
2548 gen6_gt_force_wake_put(dev_priv);
2551 static void gen6_update_ring_freq(struct drm_device *dev)
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2555 int gpu_freq, ia_freq, max_ia_freq;
2556 int scaling_factor = 180;
2558 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2560 max_ia_freq = cpufreq_quick_get_max(0);
2562 * Default to measured freq if none found, PCU will ensure we don't go
2566 max_ia_freq = tsc_khz;
2568 /* Convert from kHz to MHz */
2569 max_ia_freq /= 1000;
2572 * For each potential GPU frequency, load a ring frequency we'd like
2573 * to use for memory access. We do this by specifying the IA frequency
2574 * the PCU should use as a reference to determine the ring frequency.
2576 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2578 int diff = dev_priv->rps.max_delay - gpu_freq;
2581 * For GPU frequencies less than 750MHz, just use the lowest
2584 if (gpu_freq < min_freq)
2587 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2588 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2589 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2591 sandybridge_pcode_write(dev_priv,
2592 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2593 ia_freq | gpu_freq);
2597 void ironlake_teardown_rc6(struct drm_device *dev)
2599 struct drm_i915_private *dev_priv = dev->dev_private;
2601 if (dev_priv->ips.renderctx) {
2602 i915_gem_object_unpin(dev_priv->ips.renderctx);
2603 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
2604 dev_priv->ips.renderctx = NULL;
2607 if (dev_priv->ips.pwrctx) {
2608 i915_gem_object_unpin(dev_priv->ips.pwrctx);
2609 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
2610 dev_priv->ips.pwrctx = NULL;
2614 static void ironlake_disable_rc6(struct drm_device *dev)
2616 struct drm_i915_private *dev_priv = dev->dev_private;
2618 if (I915_READ(PWRCTXA)) {
2619 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2620 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2621 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2624 I915_WRITE(PWRCTXA, 0);
2625 POSTING_READ(PWRCTXA);
2627 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2628 POSTING_READ(RSTDBYCTL);
2632 static int ironlake_setup_rc6(struct drm_device *dev)
2634 struct drm_i915_private *dev_priv = dev->dev_private;
2636 if (dev_priv->ips.renderctx == NULL)
2637 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
2638 if (!dev_priv->ips.renderctx)
2641 if (dev_priv->ips.pwrctx == NULL)
2642 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
2643 if (!dev_priv->ips.pwrctx) {
2644 ironlake_teardown_rc6(dev);
2651 static void ironlake_enable_rc6(struct drm_device *dev)
2653 struct drm_i915_private *dev_priv = dev->dev_private;
2654 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2657 /* rc6 disabled by default due to repeated reports of hanging during
2660 if (!intel_enable_rc6(dev))
2663 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2665 ret = ironlake_setup_rc6(dev);
2670 * GPU can automatically power down the render unit if given a page
2673 ret = intel_ring_begin(ring, 6);
2675 ironlake_teardown_rc6(dev);
2679 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2680 intel_ring_emit(ring, MI_SET_CONTEXT);
2681 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
2683 MI_SAVE_EXT_STATE_EN |
2684 MI_RESTORE_EXT_STATE_EN |
2685 MI_RESTORE_INHIBIT);
2686 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2687 intel_ring_emit(ring, MI_NOOP);
2688 intel_ring_emit(ring, MI_FLUSH);
2689 intel_ring_advance(ring);
2692 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2693 * does an implicit flush, combined with MI_FLUSH above, it should be
2694 * safe to assume that renderctx is valid
2696 ret = intel_wait_ring_idle(ring);
2698 DRM_ERROR("failed to enable ironlake power power savings\n");
2699 ironlake_teardown_rc6(dev);
2703 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
2704 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2707 static unsigned long intel_pxfreq(u32 vidfreq)
2710 int div = (vidfreq & 0x3f0000) >> 16;
2711 int post = (vidfreq & 0x3000) >> 12;
2712 int pre = (vidfreq & 0x7);
2717 freq = ((div * 133333) / ((1<<post) * pre));
2722 static const struct cparams {
2728 { 1, 1333, 301, 28664 },
2729 { 1, 1066, 294, 24460 },
2730 { 1, 800, 294, 25192 },
2731 { 0, 1333, 276, 27605 },
2732 { 0, 1066, 276, 27605 },
2733 { 0, 800, 231, 23784 },
2736 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
2738 u64 total_count, diff, ret;
2739 u32 count1, count2, count3, m = 0, c = 0;
2740 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2743 assert_spin_locked(&mchdev_lock);
2745 diff1 = now - dev_priv->ips.last_time1;
2747 /* Prevent division-by-zero if we are asking too fast.
2748 * Also, we don't get interesting results if we are polling
2749 * faster than once in 10ms, so just return the saved value
2753 return dev_priv->ips.chipset_power;
2755 count1 = I915_READ(DMIEC);
2756 count2 = I915_READ(DDREC);
2757 count3 = I915_READ(CSIEC);
2759 total_count = count1 + count2 + count3;
2761 /* FIXME: handle per-counter overflow */
2762 if (total_count < dev_priv->ips.last_count1) {
2763 diff = ~0UL - dev_priv->ips.last_count1;
2764 diff += total_count;
2766 diff = total_count - dev_priv->ips.last_count1;
2769 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2770 if (cparams[i].i == dev_priv->ips.c_m &&
2771 cparams[i].t == dev_priv->ips.r_t) {
2778 diff = div_u64(diff, diff1);
2779 ret = ((m * diff) + c);
2780 ret = div_u64(ret, 10);
2782 dev_priv->ips.last_count1 = total_count;
2783 dev_priv->ips.last_time1 = now;
2785 dev_priv->ips.chipset_power = ret;
2790 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2794 if (dev_priv->info->gen != 5)
2797 spin_lock_irq(&mchdev_lock);
2799 val = __i915_chipset_val(dev_priv);
2801 spin_unlock_irq(&mchdev_lock);
2806 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2808 unsigned long m, x, b;
2811 tsfs = I915_READ(TSFS);
2813 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2814 x = I915_READ8(TR1);
2816 b = tsfs & TSFS_INTR_MASK;
2818 return ((m * x) / 127) - b;
2821 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2823 static const struct v_table {
2824 u16 vd; /* in .1 mil */
2825 u16 vm; /* in .1 mil */
2956 if (dev_priv->info->is_mobile)
2957 return v_table[pxvid].vm;
2959 return v_table[pxvid].vd;
2962 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
2964 struct timespec now, diff1;
2966 unsigned long diffms;
2969 assert_spin_locked(&mchdev_lock);
2971 getrawmonotonic(&now);
2972 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
2974 /* Don't divide by 0 */
2975 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2979 count = I915_READ(GFXEC);
2981 if (count < dev_priv->ips.last_count2) {
2982 diff = ~0UL - dev_priv->ips.last_count2;
2985 diff = count - dev_priv->ips.last_count2;
2988 dev_priv->ips.last_count2 = count;
2989 dev_priv->ips.last_time2 = now;
2991 /* More magic constants... */
2993 diff = div_u64(diff, diffms * 10);
2994 dev_priv->ips.gfx_power = diff;
2997 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2999 if (dev_priv->info->gen != 5)
3002 spin_lock_irq(&mchdev_lock);
3004 __i915_update_gfx_val(dev_priv);
3006 spin_unlock_irq(&mchdev_lock);
3009 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
3011 unsigned long t, corr, state1, corr2, state2;
3014 assert_spin_locked(&mchdev_lock);
3016 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
3017 pxvid = (pxvid >> 24) & 0x7f;
3018 ext_v = pvid_to_extvid(dev_priv, pxvid);
3022 t = i915_mch_val(dev_priv);
3024 /* Revel in the empirically derived constants */
3026 /* Correction factor in 1/100000 units */
3028 corr = ((t * 2349) + 135940);
3030 corr = ((t * 964) + 29317);
3032 corr = ((t * 301) + 1004);
3034 corr = corr * ((150142 * state1) / 10000 - 78642);
3036 corr2 = (corr * dev_priv->ips.corr);
3038 state2 = (corr2 * state1) / 10000;
3039 state2 /= 100; /* convert to mW */
3041 __i915_update_gfx_val(dev_priv);
3043 return dev_priv->ips.gfx_power + state2;
3046 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
3050 if (dev_priv->info->gen != 5)
3053 spin_lock_irq(&mchdev_lock);
3055 val = __i915_gfx_val(dev_priv);
3057 spin_unlock_irq(&mchdev_lock);
3063 * i915_read_mch_val - return value for IPS use
3065 * Calculate and return a value for the IPS driver to use when deciding whether
3066 * we have thermal and power headroom to increase CPU or GPU power budget.
3068 unsigned long i915_read_mch_val(void)
3070 struct drm_i915_private *dev_priv;
3071 unsigned long chipset_val, graphics_val, ret = 0;
3073 spin_lock_irq(&mchdev_lock);
3076 dev_priv = i915_mch_dev;
3078 chipset_val = __i915_chipset_val(dev_priv);
3079 graphics_val = __i915_gfx_val(dev_priv);
3081 ret = chipset_val + graphics_val;
3084 spin_unlock_irq(&mchdev_lock);
3088 EXPORT_SYMBOL_GPL(i915_read_mch_val);
3091 * i915_gpu_raise - raise GPU frequency limit
3093 * Raise the limit; IPS indicates we have thermal headroom.
3095 bool i915_gpu_raise(void)
3097 struct drm_i915_private *dev_priv;
3100 spin_lock_irq(&mchdev_lock);
3101 if (!i915_mch_dev) {
3105 dev_priv = i915_mch_dev;
3107 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
3108 dev_priv->ips.max_delay--;
3111 spin_unlock_irq(&mchdev_lock);
3115 EXPORT_SYMBOL_GPL(i915_gpu_raise);
3118 * i915_gpu_lower - lower GPU frequency limit
3120 * IPS indicates we're close to a thermal limit, so throttle back the GPU
3121 * frequency maximum.
3123 bool i915_gpu_lower(void)
3125 struct drm_i915_private *dev_priv;
3128 spin_lock_irq(&mchdev_lock);
3129 if (!i915_mch_dev) {
3133 dev_priv = i915_mch_dev;
3135 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
3136 dev_priv->ips.max_delay++;
3139 spin_unlock_irq(&mchdev_lock);
3143 EXPORT_SYMBOL_GPL(i915_gpu_lower);
3146 * i915_gpu_busy - indicate GPU business to IPS
3148 * Tell the IPS driver whether or not the GPU is busy.
3150 bool i915_gpu_busy(void)
3152 struct drm_i915_private *dev_priv;
3153 struct intel_ring_buffer *ring;
3157 spin_lock_irq(&mchdev_lock);
3160 dev_priv = i915_mch_dev;
3162 for_each_ring(ring, dev_priv, i)
3163 ret |= !list_empty(&ring->request_list);
3166 spin_unlock_irq(&mchdev_lock);
3170 EXPORT_SYMBOL_GPL(i915_gpu_busy);
3173 * i915_gpu_turbo_disable - disable graphics turbo
3175 * Disable graphics turbo by resetting the max frequency and setting the
3176 * current frequency to the default.
3178 bool i915_gpu_turbo_disable(void)
3180 struct drm_i915_private *dev_priv;
3183 spin_lock_irq(&mchdev_lock);
3184 if (!i915_mch_dev) {
3188 dev_priv = i915_mch_dev;
3190 dev_priv->ips.max_delay = dev_priv->ips.fstart;
3192 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
3196 spin_unlock_irq(&mchdev_lock);
3200 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3203 * Tells the intel_ips driver that the i915 driver is now loaded, if
3204 * IPS got loaded first.
3206 * This awkward dance is so that neither module has to depend on the
3207 * other in order for IPS to do the appropriate communication of
3208 * GPU turbo limits to i915.
3211 ips_ping_for_i915_load(void)
3215 link = symbol_get(ips_link_to_i915_driver);
3218 symbol_put(ips_link_to_i915_driver);
3222 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3224 /* We only register the i915 ips part with intel-ips once everything is
3225 * set up, to avoid intel-ips sneaking in and reading bogus values. */
3226 spin_lock_irq(&mchdev_lock);
3227 i915_mch_dev = dev_priv;
3228 spin_unlock_irq(&mchdev_lock);
3230 ips_ping_for_i915_load();
3233 void intel_gpu_ips_teardown(void)
3235 spin_lock_irq(&mchdev_lock);
3236 i915_mch_dev = NULL;
3237 spin_unlock_irq(&mchdev_lock);
3239 static void intel_init_emon(struct drm_device *dev)
3241 struct drm_i915_private *dev_priv = dev->dev_private;
3246 /* Disable to program */
3250 /* Program energy weights for various events */
3251 I915_WRITE(SDEW, 0x15040d00);
3252 I915_WRITE(CSIEW0, 0x007f0000);
3253 I915_WRITE(CSIEW1, 0x1e220004);
3254 I915_WRITE(CSIEW2, 0x04000004);
3256 for (i = 0; i < 5; i++)
3257 I915_WRITE(PEW + (i * 4), 0);
3258 for (i = 0; i < 3; i++)
3259 I915_WRITE(DEW + (i * 4), 0);
3261 /* Program P-state weights to account for frequency power adjustment */
3262 for (i = 0; i < 16; i++) {
3263 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3264 unsigned long freq = intel_pxfreq(pxvidfreq);
3265 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3270 val *= (freq / 1000);
3272 val /= (127*127*900);
3274 DRM_ERROR("bad pxval: %ld\n", val);
3277 /* Render standby states get 0 weight */
3281 for (i = 0; i < 4; i++) {
3282 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3283 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3284 I915_WRITE(PXW + (i * 4), val);
3287 /* Adjust magic regs to magic values (more experimental results) */
3288 I915_WRITE(OGW0, 0);
3289 I915_WRITE(OGW1, 0);
3290 I915_WRITE(EG0, 0x00007f00);
3291 I915_WRITE(EG1, 0x0000000e);
3292 I915_WRITE(EG2, 0x000e0000);
3293 I915_WRITE(EG3, 0x68000300);
3294 I915_WRITE(EG4, 0x42000000);
3295 I915_WRITE(EG5, 0x00140031);
3299 for (i = 0; i < 8; i++)
3300 I915_WRITE(PXWL + (i * 4), 0);
3302 /* Enable PMON + select events */
3303 I915_WRITE(ECR, 0x80000019);
3305 lcfuse = I915_READ(LCFUSE02);
3307 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3310 void intel_disable_gt_powersave(struct drm_device *dev)
3312 struct drm_i915_private *dev_priv = dev->dev_private;
3314 if (IS_IRONLAKE_M(dev)) {
3315 ironlake_disable_drps(dev);
3316 ironlake_disable_rc6(dev);
3317 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3318 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
3319 mutex_lock(&dev_priv->rps.hw_lock);
3320 gen6_disable_rps(dev);
3321 mutex_unlock(&dev_priv->rps.hw_lock);
3325 static void intel_gen6_powersave_work(struct work_struct *work)
3327 struct drm_i915_private *dev_priv =
3328 container_of(work, struct drm_i915_private,
3329 rps.delayed_resume_work.work);
3330 struct drm_device *dev = dev_priv->dev;
3332 mutex_lock(&dev_priv->rps.hw_lock);
3333 gen6_enable_rps(dev);
3334 gen6_update_ring_freq(dev);
3335 mutex_unlock(&dev_priv->rps.hw_lock);
3338 void intel_enable_gt_powersave(struct drm_device *dev)
3340 struct drm_i915_private *dev_priv = dev->dev_private;
3342 if (IS_IRONLAKE_M(dev)) {
3343 ironlake_enable_drps(dev);
3344 ironlake_enable_rc6(dev);
3345 intel_init_emon(dev);
3346 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
3348 * PCU communication is slow and this doesn't need to be
3349 * done at any specific time, so do this out of our fast path
3350 * to make resume and init faster.
3352 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
3353 round_jiffies_up_relative(HZ));
3357 static void ibx_init_clock_gating(struct drm_device *dev)
3359 struct drm_i915_private *dev_priv = dev->dev_private;
3362 * On Ibex Peak and Cougar Point, we need to disable clock
3363 * gating for the panel power sequencer or it will fail to
3364 * start up when no ports are active.
3366 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3369 static void ironlake_init_clock_gating(struct drm_device *dev)
3371 struct drm_i915_private *dev_priv = dev->dev_private;
3372 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3374 /* Required for FBC */
3375 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
3376 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
3377 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3379 I915_WRITE(PCH_3DCGDIS0,
3380 MARIUNIT_CLOCK_GATE_DISABLE |
3381 SVSMUNIT_CLOCK_GATE_DISABLE);
3382 I915_WRITE(PCH_3DCGDIS1,
3383 VFMUNIT_CLOCK_GATE_DISABLE);
3386 * According to the spec the following bits should be set in
3387 * order to enable memory self-refresh
3388 * The bit 22/21 of 0x42004
3389 * The bit 5 of 0x42020
3390 * The bit 15 of 0x45000
3392 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3393 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3394 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3395 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
3396 I915_WRITE(DISP_ARB_CTL,
3397 (I915_READ(DISP_ARB_CTL) |
3399 I915_WRITE(WM3_LP_ILK, 0);
3400 I915_WRITE(WM2_LP_ILK, 0);
3401 I915_WRITE(WM1_LP_ILK, 0);
3404 * Based on the document from hardware guys the following bits
3405 * should be set unconditionally in order to enable FBC.
3406 * The bit 22 of 0x42000
3407 * The bit 22 of 0x42004
3408 * The bit 7,8,9 of 0x42020.
3410 if (IS_IRONLAKE_M(dev)) {
3411 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3412 I915_READ(ILK_DISPLAY_CHICKEN1) |
3414 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3415 I915_READ(ILK_DISPLAY_CHICKEN2) |
3419 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3421 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3422 I915_READ(ILK_DISPLAY_CHICKEN2) |
3423 ILK_ELPIN_409_SELECT);
3424 I915_WRITE(_3D_CHICKEN2,
3425 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3426 _3D_CHICKEN2_WM_READ_PIPELINED);
3428 /* WaDisableRenderCachePipelinedFlush */
3429 I915_WRITE(CACHE_MODE_0,
3430 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3432 ibx_init_clock_gating(dev);
3435 static void cpt_init_clock_gating(struct drm_device *dev)
3437 struct drm_i915_private *dev_priv = dev->dev_private;
3441 * On Ibex Peak and Cougar Point, we need to disable clock
3442 * gating for the panel power sequencer or it will fail to
3443 * start up when no ports are active.
3445 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3446 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3447 DPLS_EDP_PPS_FIX_DIS);
3448 /* WADP0ClockGatingDisable */
3449 for_each_pipe(pipe) {
3450 I915_WRITE(TRANS_CHICKEN1(pipe),
3451 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3455 static void gen6_init_clock_gating(struct drm_device *dev)
3457 struct drm_i915_private *dev_priv = dev->dev_private;
3459 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3461 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3463 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3464 I915_READ(ILK_DISPLAY_CHICKEN2) |
3465 ILK_ELPIN_409_SELECT);
3467 I915_WRITE(WM3_LP_ILK, 0);
3468 I915_WRITE(WM2_LP_ILK, 0);
3469 I915_WRITE(WM1_LP_ILK, 0);
3471 I915_WRITE(CACHE_MODE_0,
3472 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3474 I915_WRITE(GEN6_UCGCTL1,
3475 I915_READ(GEN6_UCGCTL1) |
3476 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3477 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3479 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3480 * gating disable must be set. Failure to set it results in
3481 * flickering pixels due to Z write ordering failures after
3482 * some amount of runtime in the Mesa "fire" demo, and Unigine
3483 * Sanctuary and Tropics, and apparently anything else with
3484 * alpha test or pixel discard.
3486 * According to the spec, bit 11 (RCCUNIT) must also be set,
3487 * but we didn't debug actual testcases to find it out.
3489 * Also apply WaDisableVDSUnitClockGating and
3490 * WaDisableRCPBUnitClockGating.
3492 I915_WRITE(GEN6_UCGCTL2,
3493 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3494 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3495 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3497 /* Bspec says we need to always set all mask bits. */
3498 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
3499 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
3502 * According to the spec the following bits should be
3503 * set in order to enable memory self-refresh and fbc:
3504 * The bit21 and bit22 of 0x42000
3505 * The bit21 and bit22 of 0x42004
3506 * The bit5 and bit7 of 0x42020
3507 * The bit14 of 0x70180
3508 * The bit14 of 0x71180
3510 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3511 I915_READ(ILK_DISPLAY_CHICKEN1) |
3512 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3513 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3514 I915_READ(ILK_DISPLAY_CHICKEN2) |
3515 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3516 I915_WRITE(ILK_DSPCLK_GATE_D,
3517 I915_READ(ILK_DSPCLK_GATE_D) |
3518 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3519 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3521 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3522 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3524 for_each_pipe(pipe) {
3525 I915_WRITE(DSPCNTR(pipe),
3526 I915_READ(DSPCNTR(pipe)) |
3527 DISPPLANE_TRICKLE_FEED_DISABLE);
3528 intel_flush_display_plane(dev_priv, pipe);
3531 /* The default value should be 0x200 according to docs, but the two
3532 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3533 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3534 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3536 cpt_init_clock_gating(dev);
3539 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3541 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3543 reg &= ~GEN7_FF_SCHED_MASK;
3544 reg |= GEN7_FF_TS_SCHED_HW;
3545 reg |= GEN7_FF_VS_SCHED_HW;
3546 reg |= GEN7_FF_DS_SCHED_HW;
3548 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3551 static void haswell_init_clock_gating(struct drm_device *dev)
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3556 I915_WRITE(WM3_LP_ILK, 0);
3557 I915_WRITE(WM2_LP_ILK, 0);
3558 I915_WRITE(WM1_LP_ILK, 0);
3560 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3561 * This implements the WaDisableRCZUnitClockGating workaround.
3563 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3565 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3566 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3567 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3569 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3570 I915_WRITE(GEN7_L3CNTLREG1,
3571 GEN7_WA_FOR_GEN7_L3_CONTROL);
3572 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3573 GEN7_WA_L3_CHICKEN_MODE);
3575 /* This is required by WaCatErrorRejectionIssue */
3576 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3577 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3578 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3580 for_each_pipe(pipe) {
3581 I915_WRITE(DSPCNTR(pipe),
3582 I915_READ(DSPCNTR(pipe)) |
3583 DISPPLANE_TRICKLE_FEED_DISABLE);
3584 intel_flush_display_plane(dev_priv, pipe);
3587 gen7_setup_fixed_func_scheduler(dev_priv);
3589 /* WaDisable4x2SubspanOptimization */
3590 I915_WRITE(CACHE_MODE_1,
3591 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3593 /* XXX: This is a workaround for early silicon revisions and should be
3598 WM_DBG_DISALLOW_MULTIPLE_LP |
3599 WM_DBG_DISALLOW_SPRITE |
3600 WM_DBG_DISALLOW_MAXFIFO);
3604 static void ivybridge_init_clock_gating(struct drm_device *dev)
3606 struct drm_i915_private *dev_priv = dev->dev_private;
3610 I915_WRITE(WM3_LP_ILK, 0);
3611 I915_WRITE(WM2_LP_ILK, 0);
3612 I915_WRITE(WM1_LP_ILK, 0);
3614 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3616 /* WaDisableEarlyCull */
3617 I915_WRITE(_3D_CHICKEN3,
3618 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3620 /* WaDisableBackToBackFlipFix */
3621 I915_WRITE(IVB_CHICKEN3,
3622 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3623 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3625 /* WaDisablePSDDualDispatchEnable */
3626 if (IS_IVB_GT1(dev))
3627 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3628 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3630 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
3631 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3633 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3634 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3635 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3637 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3638 I915_WRITE(GEN7_L3CNTLREG1,
3639 GEN7_WA_FOR_GEN7_L3_CONTROL);
3640 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3641 GEN7_WA_L3_CHICKEN_MODE);
3642 if (IS_IVB_GT1(dev))
3643 I915_WRITE(GEN7_ROW_CHICKEN2,
3644 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3646 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
3647 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3650 /* WaForceL3Serialization */
3651 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3652 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3654 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3655 * gating disable must be set. Failure to set it results in
3656 * flickering pixels due to Z write ordering failures after
3657 * some amount of runtime in the Mesa "fire" demo, and Unigine
3658 * Sanctuary and Tropics, and apparently anything else with
3659 * alpha test or pixel discard.
3661 * According to the spec, bit 11 (RCCUNIT) must also be set,
3662 * but we didn't debug actual testcases to find it out.
3664 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3665 * This implements the WaDisableRCZUnitClockGating workaround.
3667 I915_WRITE(GEN6_UCGCTL2,
3668 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3669 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3671 /* This is required by WaCatErrorRejectionIssue */
3672 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3673 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3674 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3676 for_each_pipe(pipe) {
3677 I915_WRITE(DSPCNTR(pipe),
3678 I915_READ(DSPCNTR(pipe)) |
3679 DISPPLANE_TRICKLE_FEED_DISABLE);
3680 intel_flush_display_plane(dev_priv, pipe);
3683 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3684 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3686 gen7_setup_fixed_func_scheduler(dev_priv);
3688 /* WaDisable4x2SubspanOptimization */
3689 I915_WRITE(CACHE_MODE_1,
3690 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3692 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3693 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3694 snpcr |= GEN6_MBC_SNPCR_MED;
3695 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3697 cpt_init_clock_gating(dev);
3700 static void valleyview_init_clock_gating(struct drm_device *dev)
3702 struct drm_i915_private *dev_priv = dev->dev_private;
3705 I915_WRITE(WM3_LP_ILK, 0);
3706 I915_WRITE(WM2_LP_ILK, 0);
3707 I915_WRITE(WM1_LP_ILK, 0);
3709 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3711 /* WaDisableEarlyCull */
3712 I915_WRITE(_3D_CHICKEN3,
3713 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3715 /* WaDisableBackToBackFlipFix */
3716 I915_WRITE(IVB_CHICKEN3,
3717 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3718 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3720 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3721 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3723 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3724 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3725 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3727 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3728 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
3729 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3731 /* WaForceL3Serialization */
3732 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3733 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3735 /* WaDisableDopClockGating */
3736 I915_WRITE(GEN7_ROW_CHICKEN2,
3737 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3739 /* WaForceL3Serialization */
3740 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3741 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3743 /* This is required by WaCatErrorRejectionIssue */
3744 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3745 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3746 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3748 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3749 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3752 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3753 * gating disable must be set. Failure to set it results in
3754 * flickering pixels due to Z write ordering failures after
3755 * some amount of runtime in the Mesa "fire" demo, and Unigine
3756 * Sanctuary and Tropics, and apparently anything else with
3757 * alpha test or pixel discard.
3759 * According to the spec, bit 11 (RCCUNIT) must also be set,
3760 * but we didn't debug actual testcases to find it out.
3762 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3763 * This implements the WaDisableRCZUnitClockGating workaround.
3765 * Also apply WaDisableVDSUnitClockGating and
3766 * WaDisableRCPBUnitClockGating.
3768 I915_WRITE(GEN6_UCGCTL2,
3769 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3770 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
3771 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3772 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3773 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3775 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
3777 for_each_pipe(pipe) {
3778 I915_WRITE(DSPCNTR(pipe),
3779 I915_READ(DSPCNTR(pipe)) |
3780 DISPPLANE_TRICKLE_FEED_DISABLE);
3781 intel_flush_display_plane(dev_priv, pipe);
3784 I915_WRITE(CACHE_MODE_1,
3785 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3788 * On ValleyView, the GUnit needs to signal the GT
3789 * when flip and other events complete. So enable
3790 * all the GUnit->GT interrupts here
3792 I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
3793 PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
3794 SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
3795 PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
3796 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3797 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3798 PLANEA_FLIPDONE_INT_EN);
3801 * WaDisableVLVClockGating_VBIIssue
3802 * Disable clock gating on th GCFG unit to prevent a delay
3803 * in the reporting of vblank events.
3805 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
3808 static void g4x_init_clock_gating(struct drm_device *dev)
3810 struct drm_i915_private *dev_priv = dev->dev_private;
3811 uint32_t dspclk_gate;
3813 I915_WRITE(RENCLK_GATE_D1, 0);
3814 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3815 GS_UNIT_CLOCK_GATE_DISABLE |
3816 CL_UNIT_CLOCK_GATE_DISABLE);
3817 I915_WRITE(RAMCLK_GATE_D, 0);
3818 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3819 OVRUNIT_CLOCK_GATE_DISABLE |
3820 OVCUNIT_CLOCK_GATE_DISABLE;
3822 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3823 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3825 /* WaDisableRenderCachePipelinedFlush */
3826 I915_WRITE(CACHE_MODE_0,
3827 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3830 static void crestline_init_clock_gating(struct drm_device *dev)
3832 struct drm_i915_private *dev_priv = dev->dev_private;
3834 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3835 I915_WRITE(RENCLK_GATE_D2, 0);
3836 I915_WRITE(DSPCLK_GATE_D, 0);
3837 I915_WRITE(RAMCLK_GATE_D, 0);
3838 I915_WRITE16(DEUC, 0);
3841 static void broadwater_init_clock_gating(struct drm_device *dev)
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3845 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3846 I965_RCC_CLOCK_GATE_DISABLE |
3847 I965_RCPB_CLOCK_GATE_DISABLE |
3848 I965_ISC_CLOCK_GATE_DISABLE |
3849 I965_FBC_CLOCK_GATE_DISABLE);
3850 I915_WRITE(RENCLK_GATE_D2, 0);
3853 static void gen3_init_clock_gating(struct drm_device *dev)
3855 struct drm_i915_private *dev_priv = dev->dev_private;
3856 u32 dstate = I915_READ(D_STATE);
3858 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3859 DSTATE_DOT_CLOCK_GATING;
3860 I915_WRITE(D_STATE, dstate);
3862 if (IS_PINEVIEW(dev))
3863 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3865 /* IIR "flip pending" means done if this bit is set */
3866 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
3869 static void i85x_init_clock_gating(struct drm_device *dev)
3871 struct drm_i915_private *dev_priv = dev->dev_private;
3873 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3876 static void i830_init_clock_gating(struct drm_device *dev)
3878 struct drm_i915_private *dev_priv = dev->dev_private;
3880 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3883 void intel_init_clock_gating(struct drm_device *dev)
3885 struct drm_i915_private *dev_priv = dev->dev_private;
3887 dev_priv->display.init_clock_gating(dev);
3890 /* Starting with Haswell, we have different power wells for
3891 * different parts of the GPU. This attempts to enable them all.
3893 void intel_init_power_wells(struct drm_device *dev)
3895 struct drm_i915_private *dev_priv = dev->dev_private;
3896 unsigned long power_wells[] = {
3903 if (!IS_HASWELL(dev))
3906 mutex_lock(&dev->struct_mutex);
3908 for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
3909 int well = I915_READ(power_wells[i]);
3911 if ((well & HSW_PWR_WELL_STATE) == 0) {
3912 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3913 if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
3914 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3918 mutex_unlock(&dev->struct_mutex);
3921 /* Set up chip specific power management-related functions */
3922 void intel_init_pm(struct drm_device *dev)
3924 struct drm_i915_private *dev_priv = dev->dev_private;
3926 if (I915_HAS_FBC(dev)) {
3927 if (HAS_PCH_SPLIT(dev)) {
3928 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3929 dev_priv->display.enable_fbc = ironlake_enable_fbc;
3930 dev_priv->display.disable_fbc = ironlake_disable_fbc;
3931 } else if (IS_GM45(dev)) {
3932 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3933 dev_priv->display.enable_fbc = g4x_enable_fbc;
3934 dev_priv->display.disable_fbc = g4x_disable_fbc;
3935 } else if (IS_CRESTLINE(dev)) {
3936 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3937 dev_priv->display.enable_fbc = i8xx_enable_fbc;
3938 dev_priv->display.disable_fbc = i8xx_disable_fbc;
3940 /* 855GM needs testing */
3944 if (IS_PINEVIEW(dev))
3945 i915_pineview_get_mem_freq(dev);
3946 else if (IS_GEN5(dev))
3947 i915_ironlake_get_mem_freq(dev);
3949 /* For FIFO watermark updates */
3950 if (HAS_PCH_SPLIT(dev)) {
3952 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3953 dev_priv->display.update_wm = ironlake_update_wm;
3955 DRM_DEBUG_KMS("Failed to get proper latency. "
3957 dev_priv->display.update_wm = NULL;
3959 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3960 } else if (IS_GEN6(dev)) {
3961 if (SNB_READ_WM0_LATENCY()) {
3962 dev_priv->display.update_wm = sandybridge_update_wm;
3963 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3965 DRM_DEBUG_KMS("Failed to read display plane latency. "
3967 dev_priv->display.update_wm = NULL;
3969 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3970 } else if (IS_IVYBRIDGE(dev)) {
3971 /* FIXME: detect B0+ stepping and use auto training */
3972 if (SNB_READ_WM0_LATENCY()) {
3973 dev_priv->display.update_wm = sandybridge_update_wm;
3974 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3976 DRM_DEBUG_KMS("Failed to read display plane latency. "
3978 dev_priv->display.update_wm = NULL;
3980 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3981 } else if (IS_HASWELL(dev)) {
3982 if (SNB_READ_WM0_LATENCY()) {
3983 dev_priv->display.update_wm = sandybridge_update_wm;
3984 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3985 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3987 DRM_DEBUG_KMS("Failed to read display plane latency. "
3989 dev_priv->display.update_wm = NULL;
3991 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
3993 dev_priv->display.update_wm = NULL;
3994 } else if (IS_VALLEYVIEW(dev)) {
3995 dev_priv->display.update_wm = valleyview_update_wm;
3996 dev_priv->display.init_clock_gating =
3997 valleyview_init_clock_gating;
3998 } else if (IS_PINEVIEW(dev)) {
3999 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4002 dev_priv->mem_freq)) {
4003 DRM_INFO("failed to find known CxSR latency "
4004 "(found ddr%s fsb freq %d, mem freq %d), "
4006 (dev_priv->is_ddr3 == 1) ? "3" : "2",
4007 dev_priv->fsb_freq, dev_priv->mem_freq);
4008 /* Disable CxSR and never update its watermark again */
4009 pineview_disable_cxsr(dev);
4010 dev_priv->display.update_wm = NULL;
4012 dev_priv->display.update_wm = pineview_update_wm;
4013 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4014 } else if (IS_G4X(dev)) {
4015 dev_priv->display.update_wm = g4x_update_wm;
4016 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
4017 } else if (IS_GEN4(dev)) {
4018 dev_priv->display.update_wm = i965_update_wm;
4019 if (IS_CRESTLINE(dev))
4020 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
4021 else if (IS_BROADWATER(dev))
4022 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
4023 } else if (IS_GEN3(dev)) {
4024 dev_priv->display.update_wm = i9xx_update_wm;
4025 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4026 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4027 } else if (IS_I865G(dev)) {
4028 dev_priv->display.update_wm = i830_update_wm;
4029 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4030 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4031 } else if (IS_I85X(dev)) {
4032 dev_priv->display.update_wm = i9xx_update_wm;
4033 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4034 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4036 dev_priv->display.update_wm = i830_update_wm;
4037 dev_priv->display.init_clock_gating = i830_init_clock_gating;
4039 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4041 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4045 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4047 u32 gt_thread_status_mask;
4049 if (IS_HASWELL(dev_priv->dev))
4050 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
4052 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
4054 /* w/a for a sporadic read returning 0 by waiting for the GT
4055 * thread to wake up.
4057 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
4058 DRM_ERROR("GT thread status wait timed out\n");
4061 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4063 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4064 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4067 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4071 if (IS_HASWELL(dev_priv->dev))
4072 forcewake_ack = FORCEWAKE_ACK_HSW;
4074 forcewake_ack = FORCEWAKE_ACK;
4076 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4077 FORCEWAKE_ACK_TIMEOUT_MS))
4078 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4080 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4081 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4083 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4084 FORCEWAKE_ACK_TIMEOUT_MS))
4085 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4087 __gen6_gt_wait_for_thread_c0(dev_priv);
4090 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4092 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4093 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4096 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4100 if (IS_HASWELL(dev_priv->dev))
4101 forcewake_ack = FORCEWAKE_ACK_HSW;
4103 forcewake_ack = FORCEWAKE_MT_ACK;
4105 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4106 FORCEWAKE_ACK_TIMEOUT_MS))
4107 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4109 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4110 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4112 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4113 FORCEWAKE_ACK_TIMEOUT_MS))
4114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4116 __gen6_gt_wait_for_thread_c0(dev_priv);
4120 * Generally this is called implicitly by the register read function. However,
4121 * if some sequence requires the GT to not power down then this function should
4122 * be called at the beginning of the sequence followed by a call to
4123 * gen6_gt_force_wake_put() at the end of the sequence.
4125 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4127 unsigned long irqflags;
4129 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4130 if (dev_priv->forcewake_count++ == 0)
4131 dev_priv->gt.force_wake_get(dev_priv);
4132 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4135 void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4138 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
4139 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
4140 "MMIO read or write has been dropped %x\n", gtfifodbg))
4141 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
4144 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4146 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4147 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4148 gen6_gt_check_fifodbg(dev_priv);
4151 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4153 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4154 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4155 gen6_gt_check_fifodbg(dev_priv);
4159 * see gen6_gt_force_wake_get()
4161 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4163 unsigned long irqflags;
4165 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4166 if (--dev_priv->forcewake_count == 0)
4167 dev_priv->gt.force_wake_put(dev_priv);
4168 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4171 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4175 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
4177 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4178 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
4180 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4182 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
4184 dev_priv->gt_fifo_count = fifo;
4186 dev_priv->gt_fifo_count--;
4191 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4193 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4196 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4198 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4199 FORCEWAKE_ACK_TIMEOUT_MS))
4200 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4202 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4204 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4205 FORCEWAKE_ACK_TIMEOUT_MS))
4206 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4208 __gen6_gt_wait_for_thread_c0(dev_priv);
4211 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4213 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4214 /* The below doubles as a POSTING_READ */
4215 gen6_gt_check_fifodbg(dev_priv);
4218 void intel_gt_reset(struct drm_device *dev)
4220 struct drm_i915_private *dev_priv = dev->dev_private;
4222 if (IS_VALLEYVIEW(dev)) {
4223 vlv_force_wake_reset(dev_priv);
4224 } else if (INTEL_INFO(dev)->gen >= 6) {
4225 __gen6_gt_force_wake_reset(dev_priv);
4226 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4227 __gen6_gt_force_wake_mt_reset(dev_priv);
4231 void intel_gt_init(struct drm_device *dev)
4233 struct drm_i915_private *dev_priv = dev->dev_private;
4235 spin_lock_init(&dev_priv->gt_lock);
4237 intel_gt_reset(dev);
4239 if (IS_VALLEYVIEW(dev)) {
4240 dev_priv->gt.force_wake_get = vlv_force_wake_get;
4241 dev_priv->gt.force_wake_put = vlv_force_wake_put;
4242 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4243 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
4244 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
4245 } else if (IS_GEN6(dev)) {
4246 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
4247 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
4249 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
4250 intel_gen6_powersave_work);
4253 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
4255 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4257 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4258 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4262 I915_WRITE(GEN6_PCODE_DATA, *val);
4263 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4265 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4267 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
4271 *val = I915_READ(GEN6_PCODE_DATA);
4272 I915_WRITE(GEN6_PCODE_DATA, 0);
4277 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4279 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4281 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4282 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4286 I915_WRITE(GEN6_PCODE_DATA, val);
4287 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4289 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4291 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
4295 I915_WRITE(GEN6_PCODE_DATA, 0);