2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool psr_global_enabled(u32 debug)
61 switch (debug & I915_PSR_DEBUG_MODE_MASK) {
62 case I915_PSR_DEBUG_DEFAULT:
63 return i915_modparams.enable_psr;
64 case I915_PSR_DEBUG_DISABLE:
71 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72 const struct intel_crtc_state *crtc_state)
74 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
75 case I915_PSR_DEBUG_FORCE_PSR1:
78 return crtc_state->has_psr2;
82 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
86 mask = EDP_PSR_ERROR(TRANSCODER_EDP);
87 debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
88 EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
90 if (INTEL_GEN(dev_priv) >= 8) {
91 mask |= EDP_PSR_ERROR(TRANSCODER_A) |
92 EDP_PSR_ERROR(TRANSCODER_B) |
93 EDP_PSR_ERROR(TRANSCODER_C);
95 debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
96 EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
97 EDP_PSR_POST_EXIT(TRANSCODER_B) |
98 EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
99 EDP_PSR_POST_EXIT(TRANSCODER_C) |
100 EDP_PSR_PRE_ENTRY(TRANSCODER_C);
103 if (debug & I915_PSR_DEBUG_IRQ)
106 I915_WRITE(EDP_PSR_IMR, ~mask);
109 static void psr_event_print(u32 val, bool psr2_enabled)
111 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
112 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
113 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
114 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
115 DRM_DEBUG_KMS("\tPSR2 disabled\n");
116 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
117 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
118 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
119 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
120 if (val & PSR_EVENT_GRAPHICS_RESET)
121 DRM_DEBUG_KMS("\tGraphics reset\n");
122 if (val & PSR_EVENT_PCH_INTERRUPT)
123 DRM_DEBUG_KMS("\tPCH interrupt\n");
124 if (val & PSR_EVENT_MEMORY_UP)
125 DRM_DEBUG_KMS("\tMemory up\n");
126 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
127 DRM_DEBUG_KMS("\tFront buffer modification\n");
128 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
129 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
130 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
131 DRM_DEBUG_KMS("\tPIPE registers updated\n");
132 if (val & PSR_EVENT_REGISTER_UPDATE)
133 DRM_DEBUG_KMS("\tRegister updated\n");
134 if (val & PSR_EVENT_HDCP_ENABLE)
135 DRM_DEBUG_KMS("\tHDCP enabled\n");
136 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
137 DRM_DEBUG_KMS("\tKVMR session enabled\n");
138 if (val & PSR_EVENT_VBI_ENABLE)
139 DRM_DEBUG_KMS("\tVBI enabled\n");
140 if (val & PSR_EVENT_LPSP_MODE_EXIT)
141 DRM_DEBUG_KMS("\tLPSP mode exited\n");
142 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
143 DRM_DEBUG_KMS("\tPSR disabled\n");
146 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
148 u32 transcoders = BIT(TRANSCODER_EDP);
149 enum transcoder cpu_transcoder;
150 ktime_t time_ns = ktime_get();
152 if (INTEL_GEN(dev_priv) >= 8)
153 transcoders |= BIT(TRANSCODER_A) |
157 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
158 /* FIXME: Exit PSR and link train manually when this happens. */
159 if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
160 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
161 transcoder_name(cpu_transcoder));
163 if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
164 dev_priv->psr.last_entry_attempt = time_ns;
165 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
166 transcoder_name(cpu_transcoder));
169 if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
170 dev_priv->psr.last_exit = time_ns;
171 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
172 transcoder_name(cpu_transcoder));
174 if (INTEL_GEN(dev_priv) >= 9) {
175 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
176 bool psr2_enabled = dev_priv->psr.psr2_enabled;
178 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
179 psr_event_print(val, psr2_enabled);
185 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
189 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
192 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
195 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
197 uint8_t alpm_caps = 0;
199 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
202 return alpm_caps & DP_ALPM_CAP;
205 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
207 u8 val = 8; /* assume the worst if we can't read the value */
209 if (drm_dp_dpcd_readb(&intel_dp->aux,
210 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
211 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
213 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
217 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
219 struct drm_i915_private *dev_priv =
220 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
222 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
223 sizeof(intel_dp->psr_dpcd));
225 if (!intel_dp->psr_dpcd[0])
227 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
228 intel_dp->psr_dpcd[0]);
230 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
231 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
234 dev_priv->psr.sink_support = true;
235 dev_priv->psr.sink_sync_latency =
236 intel_dp_get_sink_sync_latency(intel_dp);
238 WARN_ON(dev_priv->psr.dp);
239 dev_priv->psr.dp = intel_dp;
241 if (INTEL_GEN(dev_priv) >= 9 &&
242 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
243 bool y_req = intel_dp->psr_dpcd[1] &
244 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
245 bool alpm = intel_dp_get_alpm_status(intel_dp);
248 * All panels that supports PSR version 03h (PSR2 +
249 * Y-coordinate) can handle Y-coordinates in VSC but we are
250 * only sure that it is going to be used when required by the
251 * panel. This way panel is capable to do selective update
252 * without a aux frame sync.
254 * To support PSR version 02h and PSR version 03h without
255 * Y-coordinate requirement panels we would need to enable
258 dev_priv->psr.sink_psr2_support = y_req && alpm;
259 DRM_DEBUG_KMS("PSR2 %ssupported\n",
260 dev_priv->psr.sink_psr2_support ? "" : "not ");
262 if (dev_priv->psr.sink_psr2_support) {
263 dev_priv->psr.colorimetry_support =
264 intel_dp_get_colorimetry_status(intel_dp);
269 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
270 const struct intel_crtc_state *crtc_state)
272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
274 struct edp_vsc_psr psr_vsc;
276 if (dev_priv->psr.psr2_enabled) {
277 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
278 memset(&psr_vsc, 0, sizeof(psr_vsc));
279 psr_vsc.sdp_header.HB0 = 0;
280 psr_vsc.sdp_header.HB1 = 0x7;
281 if (dev_priv->psr.colorimetry_support) {
282 psr_vsc.sdp_header.HB2 = 0x5;
283 psr_vsc.sdp_header.HB3 = 0x13;
285 psr_vsc.sdp_header.HB2 = 0x4;
286 psr_vsc.sdp_header.HB3 = 0xe;
289 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
290 memset(&psr_vsc, 0, sizeof(psr_vsc));
291 psr_vsc.sdp_header.HB0 = 0;
292 psr_vsc.sdp_header.HB1 = 0x7;
293 psr_vsc.sdp_header.HB2 = 0x2;
294 psr_vsc.sdp_header.HB3 = 0x8;
297 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
298 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
301 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
303 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
304 u32 aux_clock_divider, aux_ctl;
306 static const uint8_t aux_msg[] = {
307 [0] = DP_AUX_NATIVE_WRITE << 4,
308 [1] = DP_SET_POWER >> 8,
309 [2] = DP_SET_POWER & 0xff,
311 [4] = DP_SET_POWER_D0,
313 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
314 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
315 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
316 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
318 BUILD_BUG_ON(sizeof(aux_msg) > 20);
319 for (i = 0; i < sizeof(aux_msg); i += 4)
320 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
321 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
323 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
325 /* Start with bits set for DDI_AUX_CTL register */
326 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
329 /* Select only valid bits for SRD_AUX_CTL */
330 aux_ctl &= psr_aux_mask;
331 I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
334 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
336 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337 u8 dpcd_val = DP_PSR_ENABLE;
339 /* Enable ALPM at sink for psr2 */
340 if (dev_priv->psr.psr2_enabled) {
341 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
343 dpcd_val |= DP_PSR_ENABLE_PSR2;
346 if (dev_priv->psr.link_standby)
347 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
348 if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
349 dpcd_val |= DP_PSR_CRC_VERIFICATION;
350 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
352 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
355 static void hsw_activate_psr1(struct intel_dp *intel_dp)
357 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
358 u32 max_sleep_time = 0x1f;
359 u32 val = EDP_PSR_ENABLE;
361 /* Let's use 6 as the minimum to cover all known cases including the
362 * off-by-one issue that HW has in some cases.
364 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
366 /* sink_sync_latency of 8 means source has to wait for more than 8
367 * frames, we'll go with 9 frames for now
369 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
370 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
372 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
373 if (IS_HASWELL(dev_priv))
374 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
376 if (dev_priv->psr.link_standby)
377 val |= EDP_PSR_LINK_STANDBY;
379 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
380 val |= EDP_PSR_TP1_TIME_0us;
381 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
382 val |= EDP_PSR_TP1_TIME_100us;
383 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
384 val |= EDP_PSR_TP1_TIME_500us;
386 val |= EDP_PSR_TP1_TIME_2500us;
388 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
389 val |= EDP_PSR_TP2_TP3_TIME_0us;
390 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
391 val |= EDP_PSR_TP2_TP3_TIME_100us;
392 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
393 val |= EDP_PSR_TP2_TP3_TIME_500us;
395 val |= EDP_PSR_TP2_TP3_TIME_2500us;
397 if (intel_dp_source_supports_hbr2(intel_dp) &&
398 drm_dp_tps3_supported(intel_dp->dpcd))
399 val |= EDP_PSR_TP1_TP3_SEL;
401 val |= EDP_PSR_TP1_TP2_SEL;
403 if (INTEL_GEN(dev_priv) >= 8)
404 val |= EDP_PSR_CRC_ENABLE;
406 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
407 I915_WRITE(EDP_PSR_CTL, val);
410 static void hsw_activate_psr2(struct intel_dp *intel_dp)
412 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
415 /* Let's use 6 as the minimum to cover all known cases including the
416 * off-by-one issue that HW has in some cases.
418 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
420 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
421 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
423 /* FIXME: selective update is probably totally broken because it doesn't
424 * mesh at all with our frontbuffer tracking. And the hw alone isn't
426 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
427 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
428 val |= EDP_Y_COORDINATE_ENABLE;
430 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
432 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
433 dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
434 val |= EDP_PSR2_TP2_TIME_50us;
435 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
436 val |= EDP_PSR2_TP2_TIME_100us;
437 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
438 val |= EDP_PSR2_TP2_TIME_500us;
440 val |= EDP_PSR2_TP2_TIME_2500us;
442 I915_WRITE(EDP_PSR2_CTL, val);
445 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
446 struct intel_crtc_state *crtc_state)
448 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
449 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
450 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
451 int psr_max_h = 0, psr_max_v = 0;
454 * FIXME psr2_support is messed up. It's both computed
455 * dynamically during PSR enable, and extracted from sink
456 * caps during eDP detection.
458 if (!dev_priv->psr.sink_psr2_support)
461 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
464 } else if (IS_GEN9(dev_priv)) {
469 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
470 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
471 crtc_hdisplay, crtc_vdisplay,
472 psr_max_h, psr_max_v);
479 void intel_psr_compute_config(struct intel_dp *intel_dp,
480 struct intel_crtc_state *crtc_state)
482 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
483 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
484 const struct drm_display_mode *adjusted_mode =
485 &crtc_state->base.adjusted_mode;
488 if (!CAN_PSR(dev_priv))
491 if (intel_dp != dev_priv->psr.dp)
495 * HSW spec explicitly says PSR is tied to port A.
496 * BDW+ platforms with DDI implementation of PSR have different
497 * PSR registers per transcoder and we only implement transcoder EDP
498 * ones. Since by Display design transcoder EDP is tied to port A
499 * we can safely escape based on the port A.
501 if (dig_port->base.port != PORT_A) {
502 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
506 if (IS_HASWELL(dev_priv) &&
507 I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
509 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
513 if (IS_HASWELL(dev_priv) &&
514 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
515 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
519 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
520 if (psr_setup_time < 0) {
521 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
522 intel_dp->psr_dpcd[1]);
526 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
527 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
528 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
533 crtc_state->has_psr = true;
534 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
537 static void intel_psr_activate(struct intel_dp *intel_dp)
539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
541 if (INTEL_GEN(dev_priv) >= 9)
542 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
543 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
544 WARN_ON(dev_priv->psr.active);
545 lockdep_assert_held(&dev_priv->psr.lock);
547 /* psr1 and psr2 are mutually exclusive.*/
548 if (dev_priv->psr.psr2_enabled)
549 hsw_activate_psr2(intel_dp);
551 hsw_activate_psr1(intel_dp);
553 dev_priv->psr.active = true;
556 static void intel_psr_enable_source(struct intel_dp *intel_dp,
557 const struct intel_crtc_state *crtc_state)
559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
560 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
562 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
563 * use hardcoded values PSR AUX transactions
565 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
566 hsw_psr_setup_aux(intel_dp);
568 if (dev_priv->psr.psr2_enabled) {
569 u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
571 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
572 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
573 | PSR2_ADD_VERTICAL_LINE_COUNT);
576 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
577 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
579 I915_WRITE(EDP_PSR_DEBUG,
580 EDP_PSR_DEBUG_MASK_MEMUP |
581 EDP_PSR_DEBUG_MASK_HPD |
582 EDP_PSR_DEBUG_MASK_LPSP |
583 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
584 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
587 * Per Spec: Avoid continuous PSR exit by masking MEMUP
588 * and HPD. also mask LPSP to avoid dependency on other
589 * drivers that might block runtime_pm besides
590 * preventing other hw tracking issues now we can rely
591 * on frontbuffer tracking.
593 I915_WRITE(EDP_PSR_DEBUG,
594 EDP_PSR_DEBUG_MASK_MEMUP |
595 EDP_PSR_DEBUG_MASK_HPD |
596 EDP_PSR_DEBUG_MASK_LPSP |
597 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
598 EDP_PSR_DEBUG_MASK_MAX_SLEEP);
602 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
603 const struct intel_crtc_state *crtc_state)
605 struct intel_dp *intel_dp = dev_priv->psr.dp;
607 if (dev_priv->psr.enabled)
610 DRM_DEBUG_KMS("Enabling PSR%s\n",
611 dev_priv->psr.psr2_enabled ? "2" : "1");
612 intel_psr_setup_vsc(intel_dp, crtc_state);
613 intel_psr_enable_sink(intel_dp);
614 intel_psr_enable_source(intel_dp, crtc_state);
615 dev_priv->psr.enabled = true;
617 intel_psr_activate(intel_dp);
621 * intel_psr_enable - Enable PSR
622 * @intel_dp: Intel DP
623 * @crtc_state: new CRTC state
625 * This function can only be called after the pipe is fully trained and enabled.
627 void intel_psr_enable(struct intel_dp *intel_dp,
628 const struct intel_crtc_state *crtc_state)
630 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
632 if (!crtc_state->has_psr)
635 if (WARN_ON(!CAN_PSR(dev_priv)))
638 WARN_ON(dev_priv->drrs.dp);
640 mutex_lock(&dev_priv->psr.lock);
641 if (dev_priv->psr.prepared) {
642 DRM_DEBUG_KMS("PSR already in use\n");
646 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
647 dev_priv->psr.busy_frontbuffer_bits = 0;
648 dev_priv->psr.prepared = true;
650 if (psr_global_enabled(dev_priv->psr.debug))
651 intel_psr_enable_locked(dev_priv, crtc_state);
653 DRM_DEBUG_KMS("PSR disabled by flag\n");
656 mutex_unlock(&dev_priv->psr.lock);
660 intel_psr_disable_source(struct intel_dp *intel_dp)
662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
664 if (dev_priv->psr.active) {
665 i915_reg_t psr_status;
668 if (dev_priv->psr.psr2_enabled) {
669 psr_status = EDP_PSR2_STATUS;
670 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
672 I915_WRITE(EDP_PSR2_CTL,
673 I915_READ(EDP_PSR2_CTL) &
674 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
677 psr_status = EDP_PSR_STATUS;
678 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
680 I915_WRITE(EDP_PSR_CTL,
681 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
684 /* Wait till PSR is idle */
685 if (intel_wait_for_register(dev_priv,
686 psr_status, psr_status_mask, 0,
688 DRM_ERROR("Timed out waiting for PSR Idle State\n");
690 dev_priv->psr.active = false;
692 if (dev_priv->psr.psr2_enabled)
693 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
695 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
699 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
701 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
703 lockdep_assert_held(&dev_priv->psr.lock);
705 if (!dev_priv->psr.enabled)
708 DRM_DEBUG_KMS("Disabling PSR%s\n",
709 dev_priv->psr.psr2_enabled ? "2" : "1");
710 intel_psr_disable_source(intel_dp);
712 /* Disable PSR on Sink */
713 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
715 dev_priv->psr.enabled = false;
719 * intel_psr_disable - Disable PSR
720 * @intel_dp: Intel DP
721 * @old_crtc_state: old CRTC state
723 * This function needs to be called before disabling pipe.
725 void intel_psr_disable(struct intel_dp *intel_dp,
726 const struct intel_crtc_state *old_crtc_state)
728 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
730 if (!old_crtc_state->has_psr)
733 if (WARN_ON(!CAN_PSR(dev_priv)))
736 mutex_lock(&dev_priv->psr.lock);
737 if (!dev_priv->psr.prepared) {
738 mutex_unlock(&dev_priv->psr.lock);
742 intel_psr_disable_locked(intel_dp);
744 dev_priv->psr.prepared = false;
745 mutex_unlock(&dev_priv->psr.lock);
746 cancel_work_sync(&dev_priv->psr.work);
750 * intel_psr_wait_for_idle - wait for PSR1 to idle
751 * @new_crtc_state: new CRTC state
752 * @out_value: PSR status in case of failure
754 * This function is expected to be called from pipe_update_start() where it is
755 * not expected to race with PSR enable or disable.
757 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
759 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
762 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
763 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
765 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
768 /* FIXME: Update this for PSR2 if we need to wait for idle */
769 if (READ_ONCE(dev_priv->psr.psr2_enabled))
773 * From bspec: Panel Self Refresh (BDW+)
774 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
775 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
776 * defensive enough to cover everything.
779 return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
780 EDP_PSR_STATUS_STATE_MASK,
781 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
785 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
791 if (!dev_priv->psr.enabled)
794 if (dev_priv->psr.psr2_enabled) {
795 reg = EDP_PSR2_STATUS;
796 mask = EDP_PSR2_STATUS_STATE_MASK;
798 reg = EDP_PSR_STATUS;
799 mask = EDP_PSR_STATUS_STATE_MASK;
802 mutex_unlock(&dev_priv->psr.lock);
804 err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
806 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
808 /* After the unlocked wait, verify that PSR is still wanted! */
809 mutex_lock(&dev_priv->psr.lock);
810 return err == 0 && dev_priv->psr.enabled;
813 static bool switching_psr(struct drm_i915_private *dev_priv,
814 struct intel_crtc_state *crtc_state,
817 /* Can't switch psr state anyway if PSR2 is not supported. */
818 if (!crtc_state || !crtc_state->has_psr2)
821 if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
824 if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
830 int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
831 struct drm_modeset_acquire_ctx *ctx,
834 struct drm_device *dev = &dev_priv->drm;
835 struct drm_connector_state *conn_state;
836 struct intel_crtc_state *crtc_state = NULL;
837 struct drm_crtc_commit *commit;
838 struct drm_crtc *crtc;
842 u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
844 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
845 mode > I915_PSR_DEBUG_FORCE_PSR1) {
846 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
850 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
854 /* dev_priv->psr.dp should be set once and then never touched again. */
855 dp = READ_ONCE(dev_priv->psr.dp);
856 conn_state = dp->attached_connector->base.state;
857 crtc = conn_state->crtc;
859 ret = drm_modeset_lock(&crtc->mutex, ctx);
863 crtc_state = to_intel_crtc_state(crtc->state);
864 commit = crtc_state->base.commit;
866 commit = conn_state->commit;
869 ret = wait_for_completion_interruptible(&commit->hw_done);
874 ret = mutex_lock_interruptible(&dev_priv->psr.lock);
878 enable = psr_global_enabled(val);
880 if (!enable || switching_psr(dev_priv, crtc_state, mode))
881 intel_psr_disable_locked(dev_priv->psr.dp);
883 dev_priv->psr.debug = val;
885 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
887 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
889 if (dev_priv->psr.prepared && enable)
890 intel_psr_enable_locked(dev_priv, crtc_state);
892 mutex_unlock(&dev_priv->psr.lock);
896 static void intel_psr_work(struct work_struct *work)
898 struct drm_i915_private *dev_priv =
899 container_of(work, typeof(*dev_priv), psr.work);
901 mutex_lock(&dev_priv->psr.lock);
903 if (!dev_priv->psr.enabled)
907 * We have to make sure PSR is ready for re-enable
908 * otherwise it keeps disabled until next full enable/disable cycle.
909 * PSR might take some time to get fully disabled
910 * and be ready for re-enable.
912 if (!__psr_wait_for_idle_locked(dev_priv))
916 * The delayed work can race with an invalidate hence we need to
917 * recheck. Since psr_flush first clears this and then reschedules we
918 * won't ever miss a flush when bailing out here.
920 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
923 intel_psr_activate(dev_priv->psr.dp);
925 mutex_unlock(&dev_priv->psr.lock);
928 static void intel_psr_exit(struct drm_i915_private *dev_priv)
932 if (!dev_priv->psr.active)
935 if (dev_priv->psr.psr2_enabled) {
936 val = I915_READ(EDP_PSR2_CTL);
937 WARN_ON(!(val & EDP_PSR2_ENABLE));
938 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
940 val = I915_READ(EDP_PSR_CTL);
941 WARN_ON(!(val & EDP_PSR_ENABLE));
942 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
944 dev_priv->psr.active = false;
948 * intel_psr_invalidate - Invalidade PSR
949 * @dev_priv: i915 device
950 * @frontbuffer_bits: frontbuffer plane tracking bits
951 * @origin: which operation caused the invalidate
953 * Since the hardware frontbuffer tracking has gaps we need to integrate
954 * with the software frontbuffer tracking. This function gets called every
955 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
956 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
958 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
960 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
961 unsigned frontbuffer_bits, enum fb_op_origin origin)
963 struct drm_crtc *crtc;
966 if (!CAN_PSR(dev_priv))
969 if (origin == ORIGIN_FLIP)
972 mutex_lock(&dev_priv->psr.lock);
973 if (!dev_priv->psr.enabled) {
974 mutex_unlock(&dev_priv->psr.lock);
978 crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
979 pipe = to_intel_crtc(crtc)->pipe;
981 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
982 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
984 if (frontbuffer_bits)
985 intel_psr_exit(dev_priv);
987 mutex_unlock(&dev_priv->psr.lock);
991 * intel_psr_flush - Flush PSR
992 * @dev_priv: i915 device
993 * @frontbuffer_bits: frontbuffer plane tracking bits
994 * @origin: which operation caused the flush
996 * Since the hardware frontbuffer tracking has gaps we need to integrate
997 * with the software frontbuffer tracking. This function gets called every
998 * time frontbuffer rendering has completed and flushed out to memory. PSR
999 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1001 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1003 void intel_psr_flush(struct drm_i915_private *dev_priv,
1004 unsigned frontbuffer_bits, enum fb_op_origin origin)
1006 struct drm_crtc *crtc;
1009 if (!CAN_PSR(dev_priv))
1012 if (origin == ORIGIN_FLIP)
1015 mutex_lock(&dev_priv->psr.lock);
1016 if (!dev_priv->psr.enabled) {
1017 mutex_unlock(&dev_priv->psr.lock);
1021 crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
1022 pipe = to_intel_crtc(crtc)->pipe;
1024 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1025 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1027 /* By definition flush = invalidate + flush */
1028 if (frontbuffer_bits) {
1029 if (dev_priv->psr.psr2_enabled) {
1030 intel_psr_exit(dev_priv);
1033 * Display WA #0884: all
1034 * This documented WA for bxt can be safely applied
1035 * broadly so we can force HW tracking to exit PSR
1036 * instead of disabling and re-enabling.
1037 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1038 * but it makes more sense write to the current active
1041 I915_WRITE(CURSURFLIVE(pipe), 0);
1045 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1046 schedule_work(&dev_priv->psr.work);
1047 mutex_unlock(&dev_priv->psr.lock);
1051 * intel_psr_init - Init basic PSR work and mutex.
1052 * @dev_priv: i915 device private
1054 * This function is called only once at driver load to initialize basic
1057 void intel_psr_init(struct drm_i915_private *dev_priv)
1059 if (!HAS_PSR(dev_priv))
1062 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1063 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1065 if (!dev_priv->psr.sink_support)
1068 if (i915_modparams.enable_psr == -1) {
1069 i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
1071 /* Per platform default: all disabled. */
1072 i915_modparams.enable_psr = 0;
1075 /* Set link_standby x link_off defaults */
1076 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1077 /* HSW and BDW require workarounds that we don't implement. */
1078 dev_priv->psr.link_standby = false;
1080 /* For new platforms let's respect VBT back again */
1081 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1083 INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1084 mutex_init(&dev_priv->psr.lock);
1087 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1089 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1090 struct i915_psr *psr = &dev_priv->psr;
1092 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1093 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1094 DP_PSR_LINK_CRC_ERROR;
1096 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1099 mutex_lock(&psr->lock);
1101 if (!psr->enabled || psr->dp != intel_dp)
1104 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1105 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1109 if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1110 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1111 intel_psr_disable_locked(intel_dp);
1114 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1115 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1119 if (val & DP_PSR_RFB_STORAGE_ERROR)
1120 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1121 if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1122 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1123 if (val & DP_PSR_LINK_CRC_ERROR)
1124 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1127 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1130 intel_psr_disable_locked(intel_dp);
1131 /* clear status register */
1132 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1134 /* TODO: handle PSR2 errors */
1136 mutex_unlock(&psr->lock);