2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp *intel_dp)
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
64 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
66 struct drm_i915_private *dev_priv = to_i915(dev);
69 val = I915_READ(VLV_PSRSTAT(pipe)) &
70 VLV_EDP_PSR_CURR_STATE_MASK;
71 return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
72 (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
75 static void intel_psr_write_vsc(struct intel_dp *intel_dp,
76 const struct edp_vsc_psr *vsc_psr)
78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79 struct drm_device *dev = dig_port->base.base.dev;
80 struct drm_i915_private *dev_priv = to_i915(dev);
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84 uint32_t *data = (uint32_t *) vsc_psr;
87 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88 the video DIP being updated before program video DIP data buffer
89 registers for DIP being updated. */
90 I915_WRITE(ctl_reg, 0);
91 POSTING_READ(ctl_reg);
93 for (i = 0; i < sizeof(*vsc_psr); i += 4) {
94 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
98 for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
99 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
102 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
103 POSTING_READ(ctl_reg);
106 static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109 struct drm_device *dev = intel_dig_port->base.base.dev;
110 struct drm_i915_private *dev_priv = to_i915(dev);
111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112 enum pipe pipe = to_intel_crtc(crtc)->pipe;
115 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
116 val = I915_READ(VLV_VSCSDP(pipe));
117 val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
118 val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
119 I915_WRITE(VLV_VSCSDP(pipe), val);
122 static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
124 struct edp_vsc_psr psr_vsc;
125 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
126 struct drm_device *dev = intel_dig_port->base.base.dev;
127 struct drm_i915_private *dev_priv = to_i915(dev);
129 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
130 memset(&psr_vsc, 0, sizeof(psr_vsc));
131 psr_vsc.sdp_header.HB0 = 0;
132 psr_vsc.sdp_header.HB1 = 0x7;
133 if (dev_priv->psr.colorimetry_support &&
134 dev_priv->psr.y_cord_support) {
135 psr_vsc.sdp_header.HB2 = 0x5;
136 psr_vsc.sdp_header.HB3 = 0x13;
137 } else if (dev_priv->psr.y_cord_support) {
138 psr_vsc.sdp_header.HB2 = 0x4;
139 psr_vsc.sdp_header.HB3 = 0xe;
141 psr_vsc.sdp_header.HB2 = 0x3;
142 psr_vsc.sdp_header.HB3 = 0xc;
145 intel_psr_write_vsc(intel_dp, &psr_vsc);
148 static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
150 struct edp_vsc_psr psr_vsc;
152 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
153 memset(&psr_vsc, 0, sizeof(psr_vsc));
154 psr_vsc.sdp_header.HB0 = 0;
155 psr_vsc.sdp_header.HB1 = 0x7;
156 psr_vsc.sdp_header.HB2 = 0x2;
157 psr_vsc.sdp_header.HB3 = 0x8;
158 intel_psr_write_vsc(intel_dp, &psr_vsc);
161 static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
163 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
164 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
167 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
170 if (INTEL_INFO(dev_priv)->gen >= 9)
171 return DP_AUX_CH_CTL(port);
173 return EDP_PSR_AUX_CTL;
176 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
177 enum port port, int index)
179 if (INTEL_INFO(dev_priv)->gen >= 9)
180 return DP_AUX_CH_DATA(port, index);
182 return EDP_PSR_AUX_DATA(index);
185 static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
187 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
188 struct drm_device *dev = dig_port->base.base.dev;
189 struct drm_i915_private *dev_priv = to_i915(dev);
190 uint32_t aux_clock_divider;
191 i915_reg_t aux_ctl_reg;
192 static const uint8_t aux_msg[] = {
193 [0] = DP_AUX_NATIVE_WRITE << 4,
194 [1] = DP_SET_POWER >> 8,
195 [2] = DP_SET_POWER & 0xff,
197 [4] = DP_SET_POWER_D0,
199 enum port port = dig_port->port;
203 BUILD_BUG_ON(sizeof(aux_msg) > 20);
205 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
207 /* Enable AUX frame sync at sink */
208 if (dev_priv->psr.aux_frame_sync)
209 drm_dp_dpcd_writeb(&intel_dp->aux,
210 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
211 DP_AUX_FRAME_SYNC_ENABLE);
213 if (dev_priv->psr.link_standby)
214 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
215 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
217 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
220 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
222 /* Setup AUX registers */
223 for (i = 0; i < sizeof(aux_msg); i += 4)
224 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
225 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
227 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
229 I915_WRITE(aux_ctl_reg, aux_ctl);
232 static void vlv_psr_enable_source(struct intel_dp *intel_dp)
234 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
235 struct drm_device *dev = dig_port->base.base.dev;
236 struct drm_i915_private *dev_priv = to_i915(dev);
237 struct drm_crtc *crtc = dig_port->base.base.crtc;
238 enum pipe pipe = to_intel_crtc(crtc)->pipe;
240 /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
241 I915_WRITE(VLV_PSRCTL(pipe),
242 VLV_EDP_PSR_MODE_SW_TIMER |
243 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
247 static void vlv_psr_activate(struct intel_dp *intel_dp)
249 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
250 struct drm_device *dev = dig_port->base.base.dev;
251 struct drm_i915_private *dev_priv = to_i915(dev);
252 struct drm_crtc *crtc = dig_port->base.base.crtc;
253 enum pipe pipe = to_intel_crtc(crtc)->pipe;
255 /* Let's do the transition from PSR_state 1 to PSR_state 2
256 * that is PSR transition to active - static frame transmission.
257 * Then Hardware is responsible for the transition to PSR_state 3
258 * that is PSR active - no Remote Frame Buffer (RFB) update.
260 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
261 VLV_EDP_PSR_ACTIVE_ENTRY);
264 static void intel_enable_source_psr1(struct intel_dp *intel_dp)
266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
267 struct drm_device *dev = dig_port->base.base.dev;
268 struct drm_i915_private *dev_priv = to_i915(dev);
270 uint32_t max_sleep_time = 0x1f;
272 * Let's respect VBT in case VBT asks a higher idle_frame value.
273 * Let's use 6 as the minimum to cover all known cases including
274 * the off-by-one issue that HW has in some cases. Also there are
275 * cases where sink should be able to train
276 * with the 5 or 6 idle patterns.
278 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
279 uint32_t val = EDP_PSR_ENABLE;
281 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
282 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
284 if (IS_HASWELL(dev_priv))
285 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
287 if (dev_priv->psr.link_standby)
288 val |= EDP_PSR_LINK_STANDBY;
290 if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
291 val |= EDP_PSR_TP1_TIME_2500us;
292 else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
293 val |= EDP_PSR_TP1_TIME_500us;
294 else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
295 val |= EDP_PSR_TP1_TIME_100us;
297 val |= EDP_PSR_TP1_TIME_0us;
299 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
300 val |= EDP_PSR_TP2_TP3_TIME_2500us;
301 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
302 val |= EDP_PSR_TP2_TP3_TIME_500us;
303 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
304 val |= EDP_PSR_TP2_TP3_TIME_100us;
306 val |= EDP_PSR_TP2_TP3_TIME_0us;
308 if (intel_dp_source_supports_hbr2(intel_dp) &&
309 drm_dp_tps3_supported(intel_dp->dpcd))
310 val |= EDP_PSR_TP1_TP3_SEL;
312 val |= EDP_PSR_TP1_TP2_SEL;
314 I915_WRITE(EDP_PSR_CTL, val);
317 static void intel_enable_source_psr2(struct intel_dp *intel_dp)
319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
320 struct drm_device *dev = dig_port->base.base.dev;
321 struct drm_i915_private *dev_priv = to_i915(dev);
323 * Let's respect VBT in case VBT asks a higher idle_frame value.
324 * Let's use 6 as the minimum to cover all known cases including
325 * the off-by-one issue that HW has in some cases. Also there are
326 * cases where sink should be able to train
327 * with the 5 or 6 idle patterns.
329 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
332 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
334 /* FIXME: selective update is probably totally broken because it doesn't
335 * mesh at all with our frontbuffer tracking. And the hw alone isn't
337 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
339 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
340 val |= EDP_PSR2_TP2_TIME_2500;
341 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
342 val |= EDP_PSR2_TP2_TIME_500;
343 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
344 val |= EDP_PSR2_TP2_TIME_100;
346 val |= EDP_PSR2_TP2_TIME_50;
348 I915_WRITE(EDP_PSR2_CTL, val);
351 static void hsw_psr_enable_source(struct intel_dp *intel_dp)
353 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
354 struct drm_device *dev = dig_port->base.base.dev;
355 struct drm_i915_private *dev_priv = to_i915(dev);
357 /* psr1 and psr2 are mutually exclusive.*/
358 if (dev_priv->psr.psr2_support)
359 intel_enable_source_psr2(intel_dp);
361 intel_enable_source_psr1(intel_dp);
364 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
367 struct drm_device *dev = dig_port->base.base.dev;
368 struct drm_i915_private *dev_priv = to_i915(dev);
369 struct drm_crtc *crtc = dig_port->base.base.crtc;
370 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
371 const struct drm_display_mode *adjusted_mode =
372 &intel_crtc->config->base.adjusted_mode;
375 lockdep_assert_held(&dev_priv->psr.lock);
376 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
377 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
379 dev_priv->psr.source_ok = false;
382 * HSW spec explicitly says PSR is tied to port A.
383 * BDW+ platforms with DDI implementation of PSR have different
384 * PSR registers per transcoder and we only implement transcoder EDP
385 * ones. Since by Display design transcoder EDP is tied to port A
386 * we can safely escape based on the port A.
388 if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
389 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
393 if (!i915.enable_psr) {
394 DRM_DEBUG_KMS("PSR disable by flag\n");
398 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
399 !dev_priv->psr.link_standby) {
400 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
404 if (IS_HASWELL(dev_priv) &&
405 I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
407 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
411 if (IS_HASWELL(dev_priv) &&
412 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
413 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
417 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
418 if (psr_setup_time < 0) {
419 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
420 intel_dp->psr_dpcd[1]);
424 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
425 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
426 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
431 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
432 if (intel_crtc->config->pipe_src_w > 3200 ||
433 intel_crtc->config->pipe_src_h > 2000) {
434 dev_priv->psr.psr2_support = false;
438 dev_priv->psr.source_ok = true;
442 static void intel_psr_activate(struct intel_dp *intel_dp)
444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
445 struct drm_device *dev = intel_dig_port->base.base.dev;
446 struct drm_i915_private *dev_priv = to_i915(dev);
448 if (dev_priv->psr.psr2_support)
449 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
451 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
452 WARN_ON(dev_priv->psr.active);
453 lockdep_assert_held(&dev_priv->psr.lock);
455 /* Enable/Re-enable PSR on the host */
456 if (HAS_DDI(dev_priv))
457 /* On HSW+ after we enable PSR on source it will activate it
458 * as soon as it match configure idle_frame count. So
459 * we just actually enable it here on activation time.
461 hsw_psr_enable_source(intel_dp);
463 vlv_psr_activate(intel_dp);
465 dev_priv->psr.active = true;
469 * intel_psr_enable - Enable PSR
470 * @intel_dp: Intel DP
472 * This function can only be called after the pipe is fully trained and enabled.
474 void intel_psr_enable(struct intel_dp *intel_dp)
476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
477 struct drm_device *dev = intel_dig_port->base.base.dev;
478 struct drm_i915_private *dev_priv = to_i915(dev);
480 if (!HAS_PSR(dev_priv)) {
481 DRM_DEBUG_KMS("PSR not supported on this platform\n");
485 if (!is_edp_psr(intel_dp)) {
486 DRM_DEBUG_KMS("PSR not supported by this panel\n");
490 mutex_lock(&dev_priv->psr.lock);
491 if (dev_priv->psr.enabled) {
492 DRM_DEBUG_KMS("PSR already in use\n");
496 if (!intel_psr_match_conditions(intel_dp))
499 dev_priv->psr.busy_frontbuffer_bits = 0;
501 if (HAS_DDI(dev_priv)) {
502 if (dev_priv->psr.psr2_support) {
503 skl_psr_setup_su_vsc(intel_dp);
505 /* set up vsc header for psr1 */
506 hsw_psr_setup_vsc(intel_dp);
510 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
511 * Also mask LPSP to avoid dependency on other drivers that
512 * might block runtime_pm besides preventing other hw tracking
513 * issues now we can rely on frontbuffer tracking.
515 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
516 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
518 /* Enable PSR on the panel */
519 hsw_psr_enable_sink(intel_dp);
521 if (INTEL_GEN(dev_priv) >= 9)
522 intel_psr_activate(intel_dp);
524 vlv_psr_setup_vsc(intel_dp);
526 /* Enable PSR on the panel */
527 vlv_psr_enable_sink(intel_dp);
529 /* On HSW+ enable_source also means go to PSR entry/active
530 * state as soon as idle_frame achieved and here would be
531 * to soon. However on VLV enable_source just enable PSR
532 * but let it on inactive state. So we might do this prior
533 * to active transition, i.e. here.
535 vlv_psr_enable_source(intel_dp);
539 * FIXME: Activation should happen immediately since this function
540 * is just called after pipe is fully trained and enabled.
541 * However on every platform we face issues when first activation
542 * follows a modeset so quickly.
543 * - On VLV/CHV we get bank screen on first activation
544 * - On HSW/BDW we get a recoverable frozen screen until next
545 * exit-activate sequence.
547 if (INTEL_GEN(dev_priv) < 9)
548 schedule_delayed_work(&dev_priv->psr.work,
549 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
551 dev_priv->psr.enabled = intel_dp;
553 mutex_unlock(&dev_priv->psr.lock);
556 static void vlv_psr_disable(struct intel_dp *intel_dp)
558 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
559 struct drm_device *dev = intel_dig_port->base.base.dev;
560 struct drm_i915_private *dev_priv = to_i915(dev);
561 struct intel_crtc *intel_crtc =
562 to_intel_crtc(intel_dig_port->base.base.crtc);
565 if (dev_priv->psr.active) {
566 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
567 if (intel_wait_for_register(dev_priv,
568 VLV_PSRSTAT(intel_crtc->pipe),
569 VLV_EDP_PSR_IN_TRANS,
572 WARN(1, "PSR transition took longer than expected\n");
574 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
575 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
576 val &= ~VLV_EDP_PSR_ENABLE;
577 val &= ~VLV_EDP_PSR_MODE_MASK;
578 I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
580 dev_priv->psr.active = false;
582 WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
586 static void hsw_psr_disable(struct intel_dp *intel_dp)
588 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
589 struct drm_device *dev = intel_dig_port->base.base.dev;
590 struct drm_i915_private *dev_priv = to_i915(dev);
592 if (dev_priv->psr.active) {
593 if (dev_priv->psr.aux_frame_sync)
594 drm_dp_dpcd_writeb(&intel_dp->aux,
595 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
598 if (dev_priv->psr.psr2_support) {
599 I915_WRITE(EDP_PSR2_CTL,
600 I915_READ(EDP_PSR2_CTL) &
602 EDP_SU_TRACK_ENABLE));
603 /* Wait till PSR2 is idle */
604 if (intel_wait_for_register(dev_priv,
606 EDP_PSR2_STATUS_STATE_MASK,
609 DRM_ERROR("Timed out waiting for PSR2 Idle State\n");
611 I915_WRITE(EDP_PSR_CTL,
612 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
613 /* Wait till PSR1 is idle */
614 if (intel_wait_for_register(dev_priv,
616 EDP_PSR_STATUS_STATE_MASK,
619 DRM_ERROR("Timed out waiting for PSR Idle State\n");
621 dev_priv->psr.active = false;
623 if (dev_priv->psr.psr2_support)
624 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
626 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
631 * intel_psr_disable - Disable PSR
632 * @intel_dp: Intel DP
634 * This function needs to be called before disabling pipe.
636 void intel_psr_disable(struct intel_dp *intel_dp)
638 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
639 struct drm_device *dev = intel_dig_port->base.base.dev;
640 struct drm_i915_private *dev_priv = to_i915(dev);
642 mutex_lock(&dev_priv->psr.lock);
643 if (!dev_priv->psr.enabled) {
644 mutex_unlock(&dev_priv->psr.lock);
648 /* Disable PSR on Source */
649 if (HAS_DDI(dev_priv))
650 hsw_psr_disable(intel_dp);
652 vlv_psr_disable(intel_dp);
654 /* Disable PSR on Sink */
655 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
657 dev_priv->psr.enabled = NULL;
658 mutex_unlock(&dev_priv->psr.lock);
660 cancel_delayed_work_sync(&dev_priv->psr.work);
663 static void intel_psr_work(struct work_struct *work)
665 struct drm_i915_private *dev_priv =
666 container_of(work, typeof(*dev_priv), psr.work.work);
667 struct intel_dp *intel_dp = dev_priv->psr.enabled;
668 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
669 enum pipe pipe = to_intel_crtc(crtc)->pipe;
671 /* We have to make sure PSR is ready for re-enable
672 * otherwise it keeps disabled until next full enable/disable cycle.
673 * PSR might take some time to get fully disabled
674 * and be ready for re-enable.
676 if (HAS_DDI(dev_priv)) {
677 if (dev_priv->psr.psr2_support) {
678 if (intel_wait_for_register(dev_priv,
680 EDP_PSR2_STATUS_STATE_MASK,
683 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
687 if (intel_wait_for_register(dev_priv,
689 EDP_PSR_STATUS_STATE_MASK,
692 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
697 if (intel_wait_for_register(dev_priv,
699 VLV_EDP_PSR_IN_TRANS,
702 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
706 mutex_lock(&dev_priv->psr.lock);
707 intel_dp = dev_priv->psr.enabled;
713 * The delayed work can race with an invalidate hence we need to
714 * recheck. Since psr_flush first clears this and then reschedules we
715 * won't ever miss a flush when bailing out here.
717 if (dev_priv->psr.busy_frontbuffer_bits)
720 intel_psr_activate(intel_dp);
722 mutex_unlock(&dev_priv->psr.lock);
725 static void intel_psr_exit(struct drm_i915_private *dev_priv)
727 struct intel_dp *intel_dp = dev_priv->psr.enabled;
728 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
729 enum pipe pipe = to_intel_crtc(crtc)->pipe;
732 if (!dev_priv->psr.active)
735 if (HAS_DDI(dev_priv)) {
736 if (dev_priv->psr.aux_frame_sync)
737 drm_dp_dpcd_writeb(&intel_dp->aux,
738 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
740 if (dev_priv->psr.psr2_support) {
741 val = I915_READ(EDP_PSR2_CTL);
742 WARN_ON(!(val & EDP_PSR2_ENABLE));
743 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
745 val = I915_READ(EDP_PSR_CTL);
746 WARN_ON(!(val & EDP_PSR_ENABLE));
747 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
750 val = I915_READ(VLV_PSRCTL(pipe));
752 /* Here we do the transition from PSR_state 3 to PSR_state 5
753 * directly once PSR State 4 that is active with single frame
754 * update can be skipped. PSR_state 5 that is PSR exit then
755 * Hardware is responsible to transition back to PSR_state 1
756 * that is PSR inactive. Same state after
757 * vlv_edp_psr_enable_source.
759 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
760 I915_WRITE(VLV_PSRCTL(pipe), val);
762 /* Send AUX wake up - Spec says after transitioning to PSR
763 * active we have to send AUX wake up by writing 01h in DPCD
764 * 600h of sink device.
765 * XXX: This might slow down the transition, but without this
766 * HW doesn't complete the transition to PSR_state 1 and we
767 * never get the screen updated.
769 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
773 dev_priv->psr.active = false;
777 * intel_psr_single_frame_update - Single Frame Update
778 * @dev_priv: i915 device
779 * @frontbuffer_bits: frontbuffer plane tracking bits
781 * Some platforms support a single frame update feature that is used to
782 * send and update only one frame on Remote Frame Buffer.
783 * So far it is only implemented for Valleyview and Cherryview because
784 * hardware requires this to be done before a page flip.
786 void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
787 unsigned frontbuffer_bits)
789 struct drm_crtc *crtc;
794 * Single frame update is already supported on BDW+ but it requires
795 * many W/A and it isn't really needed.
797 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
800 mutex_lock(&dev_priv->psr.lock);
801 if (!dev_priv->psr.enabled) {
802 mutex_unlock(&dev_priv->psr.lock);
806 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
807 pipe = to_intel_crtc(crtc)->pipe;
809 if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
810 val = I915_READ(VLV_PSRCTL(pipe));
813 * We need to set this bit before writing registers for a flip.
814 * This bit will be self-clear when it gets to the PSR active state.
816 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
818 mutex_unlock(&dev_priv->psr.lock);
822 * intel_psr_invalidate - Invalidade PSR
823 * @dev_priv: i915 device
824 * @frontbuffer_bits: frontbuffer plane tracking bits
826 * Since the hardware frontbuffer tracking has gaps we need to integrate
827 * with the software frontbuffer tracking. This function gets called every
828 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
829 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
831 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
833 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
834 unsigned frontbuffer_bits)
836 struct drm_crtc *crtc;
839 mutex_lock(&dev_priv->psr.lock);
840 if (!dev_priv->psr.enabled) {
841 mutex_unlock(&dev_priv->psr.lock);
845 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
846 pipe = to_intel_crtc(crtc)->pipe;
848 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
849 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
851 if (frontbuffer_bits)
852 intel_psr_exit(dev_priv);
854 mutex_unlock(&dev_priv->psr.lock);
858 * intel_psr_flush - Flush PSR
859 * @dev_priv: i915 device
860 * @frontbuffer_bits: frontbuffer plane tracking bits
861 * @origin: which operation caused the flush
863 * Since the hardware frontbuffer tracking has gaps we need to integrate
864 * with the software frontbuffer tracking. This function gets called every
865 * time frontbuffer rendering has completed and flushed out to memory. PSR
866 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
868 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
870 void intel_psr_flush(struct drm_i915_private *dev_priv,
871 unsigned frontbuffer_bits, enum fb_op_origin origin)
873 struct drm_crtc *crtc;
876 mutex_lock(&dev_priv->psr.lock);
877 if (!dev_priv->psr.enabled) {
878 mutex_unlock(&dev_priv->psr.lock);
882 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
883 pipe = to_intel_crtc(crtc)->pipe;
885 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
886 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
888 /* By definition flush = invalidate + flush */
889 if (frontbuffer_bits)
890 intel_psr_exit(dev_priv);
892 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
893 if (!work_busy(&dev_priv->psr.work.work))
894 schedule_delayed_work(&dev_priv->psr.work,
895 msecs_to_jiffies(100));
896 mutex_unlock(&dev_priv->psr.lock);
900 * intel_psr_init - Init basic PSR work and mutex.
901 * @dev_priv: i915 device private
903 * This function is called only once at driver load to initialize basic
906 void intel_psr_init(struct drm_i915_private *dev_priv)
908 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
909 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
911 /* Per platform default: all disabled. */
912 if (i915.enable_psr == -1)
915 /* Set link_standby x link_off defaults */
916 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
917 /* HSW and BDW require workarounds that we don't implement. */
918 dev_priv->psr.link_standby = false;
919 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
920 /* On VLV and CHV only standby mode is supported. */
921 dev_priv->psr.link_standby = true;
923 /* For new platforms let's respect VBT back again */
924 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
926 /* Override link_standby x link_off defaults */
927 if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
928 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
929 dev_priv->psr.link_standby = true;
931 if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
932 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
933 dev_priv->psr.link_standby = false;
936 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
937 mutex_init(&dev_priv->psr.lock);