2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
32 #include "intel_display_types.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
42 * DOC: Panel Self Refresh (PSR/SRD)
44 * Since Haswell Display controller supports Panel Self-Refresh on display
45 * panels witch have a remote frame buffer (RFB) implemented according to PSR
46 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47 * when system is idle but display is on as it eliminates display refresh
48 * request to DDR memory completely as long as the frame buffer for that
49 * display is unchanged.
51 * Panel Self Refresh must be supported by both Hardware (source) and
54 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55 * to power down the link and memory controller. For DSI panels the same idea
56 * is called "manual mode".
58 * The implementation uses the hardware-based PSR support which automatically
59 * enters/exits self-refresh mode. The hardware takes care of sending the
60 * required DP aux message and could even retrain the link (that part isn't
61 * enabled yet though). The hardware also keeps track of any frontbuffer
62 * changes to know when to exit self-refresh mode again. Unfortunately that
63 * part doesn't work too well, hence why the i915 PSR support uses the
64 * software frontbuffer tracking to make sure it doesn't miss a screen
65 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66 * get called by the frontbuffer tracking code. Note that because of locking
67 * issues the self-refresh re-enable code is done from a work queue, which
68 * must be correctly synchronized/cancelled when shutting down the pipe."
70 * DC3CO (DC3 clock off)
72 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73 * clock off automatically during PSR2 idle state.
74 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75 * entry/exit allows the HW to enter a low-power state even when page flipping
76 * periodically (for instance a 30fps video playback scenario).
78 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80 * frames, if no other flip occurs and the function above is executed, DC3CO is
81 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
83 * Front buffer modifications do not trigger DC3CO activation on purpose as it
84 * would bring a lot of complexity and most of the moderns systems will only
89 * Description of PSR mask bits:
91 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
93 * When unmasked (nearly) all display register writes (eg. even
94 * SWF) trigger a PSR exit. Some registers are excluded from this
95 * and they have a more specific mask (described below). On icl+
96 * this bit no longer exists and is effectively always set.
98 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
100 * When unmasked (nearly) all pipe/plane register writes
101 * trigger a PSR exit. Some plane registers are excluded from this
102 * and they have a more specific mask (described below).
104 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
108 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109 * SPR_SURF/CURBASE are not included in this and instead are
110 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
113 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
116 * When unmasked PSR is blocked as long as the sprite
117 * plane is enabled. skl+ with their universal planes no
118 * longer have a mask bit like this, and no plane being
119 * enabledb blocks PSR.
121 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
124 * When umasked CURPOS writes trigger a PSR exit. On skl+
125 * this doesn't exit but CURPOS is included in the
126 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
128 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
131 * When unmasked PSR is blocked as long as vblank and/or vsync
132 * interrupt is unmasked in IMR *and* enabled in IER.
134 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
137 * Selectcs whether PSR exit generates an extra vblank before
138 * the first frame is transmitted. Also note the opposite polarity
139 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140 * unmasked==do not generate the extra vblank).
142 * With DC states enabled the extra vblank happens after link training,
143 * with DC states disabled it happens immediately upuon PSR exit trigger.
144 * No idea as of now why there is a difference. HSW/BDW (which don't
145 * even have DMC) always generate it after link training. Go figure.
147 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
148 * and thus won't latch until the first vblank. So with DC states
149 * enabled the register effctively uses the reset value during DC5
150 * exit+PSR exit sequence, and thus the bit does nothing until
151 * latched by the vblank that it was trying to prevent from being
152 * generated in the first place. So we should probably call this
153 * one a chicken/egg bit instead on skl+.
155 * In standby mode (as opposed to link-off) this makes no difference
156 * as the timing generator keeps running the whole time generating
157 * normal periodic vblanks.
159 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160 * and doing so makes the behaviour match the skl+ reset value.
162 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
165 * On BDW without this bit is no vblanks whatsoever are
166 * generated after PSR exit. On HSW this has no apparant effect.
167 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
169 * The rest of the bits are more self-explanatory and/or
170 * irrelevant for normal operation.
173 static bool psr_global_enabled(struct intel_dp *intel_dp)
175 struct intel_connector *connector = intel_dp->attached_connector;
176 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
178 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179 case I915_PSR_DEBUG_DEFAULT:
180 if (i915->params.enable_psr == -1)
181 return connector->panel.vbt.psr.enable;
182 return i915->params.enable_psr;
183 case I915_PSR_DEBUG_DISABLE:
190 static bool psr2_global_enabled(struct intel_dp *intel_dp)
192 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
194 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195 case I915_PSR_DEBUG_DISABLE:
196 case I915_PSR_DEBUG_FORCE_PSR1:
199 if (i915->params.enable_psr == 1)
205 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
207 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
209 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210 EDP_PSR_ERROR(intel_dp->psr.transcoder);
213 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
215 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
217 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
221 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
223 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
225 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
229 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
233 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234 EDP_PSR_MASK(intel_dp->psr.transcoder);
237 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238 enum transcoder cpu_transcoder)
240 if (DISPLAY_VER(dev_priv) >= 8)
241 return EDP_PSR_CTL(cpu_transcoder);
246 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247 enum transcoder cpu_transcoder)
249 if (DISPLAY_VER(dev_priv) >= 8)
250 return EDP_PSR_DEBUG(cpu_transcoder);
252 return HSW_SRD_DEBUG;
255 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256 enum transcoder cpu_transcoder)
258 if (DISPLAY_VER(dev_priv) >= 8)
259 return EDP_PSR_PERF_CNT(cpu_transcoder);
261 return HSW_SRD_PERF_CNT;
264 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265 enum transcoder cpu_transcoder)
267 if (DISPLAY_VER(dev_priv) >= 8)
268 return EDP_PSR_STATUS(cpu_transcoder);
270 return HSW_SRD_STATUS;
273 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274 enum transcoder cpu_transcoder)
276 if (DISPLAY_VER(dev_priv) >= 12)
277 return TRANS_PSR_IMR(cpu_transcoder);
282 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283 enum transcoder cpu_transcoder)
285 if (DISPLAY_VER(dev_priv) >= 12)
286 return TRANS_PSR_IIR(cpu_transcoder);
291 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292 enum transcoder cpu_transcoder)
294 if (DISPLAY_VER(dev_priv) >= 8)
295 return EDP_PSR_AUX_CTL(cpu_transcoder);
297 return HSW_SRD_AUX_CTL;
300 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301 enum transcoder cpu_transcoder, int i)
303 if (DISPLAY_VER(dev_priv) >= 8)
304 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
306 return HSW_SRD_AUX_DATA(i);
309 static void psr_irq_control(struct intel_dp *intel_dp)
311 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
315 mask = psr_irq_psr_error_bit_get(intel_dp);
316 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
317 mask |= psr_irq_post_exit_bit_get(intel_dp) |
318 psr_irq_pre_entry_bit_get(intel_dp);
320 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321 psr_irq_mask_get(intel_dp), ~mask);
324 static void psr_event_print(struct drm_i915_private *i915,
325 u32 val, bool psr2_enabled)
327 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336 if (val & PSR_EVENT_GRAPHICS_RESET)
337 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338 if (val & PSR_EVENT_PCH_INTERRUPT)
339 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340 if (val & PSR_EVENT_MEMORY_UP)
341 drm_dbg_kms(&i915->drm, "\tMemory up\n");
342 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348 if (val & PSR_EVENT_REGISTER_UPDATE)
349 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350 if (val & PSR_EVENT_HDCP_ENABLE)
351 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354 if (val & PSR_EVENT_VBI_ENABLE)
355 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356 if (val & PSR_EVENT_LPSP_MODE_EXIT)
357 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
362 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
364 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366 ktime_t time_ns = ktime_get();
368 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
369 intel_dp->psr.last_entry_attempt = time_ns;
370 drm_dbg_kms(&dev_priv->drm,
371 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372 transcoder_name(cpu_transcoder));
375 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
376 intel_dp->psr.last_exit = time_ns;
377 drm_dbg_kms(&dev_priv->drm,
378 "[transcoder %s] PSR exit completed\n",
379 transcoder_name(cpu_transcoder));
381 if (DISPLAY_VER(dev_priv) >= 9) {
384 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
386 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
390 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392 transcoder_name(cpu_transcoder));
394 intel_dp->psr.irq_aux_error = true;
397 * If this interruption is not masked it will keep
398 * interrupting so fast that it prevents the scheduled
400 * Also after a PSR error, we don't want to arm PSR
401 * again so we don't care about unmask the interruption
402 * or unset irq_aux_error.
404 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405 0, psr_irq_psr_error_bit_get(intel_dp));
407 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
411 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
415 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
418 return alpm_caps & DP_ALPM_CAP;
421 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
423 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424 u8 val = 8; /* assume the worst if we can't read the value */
426 if (drm_dp_dpcd_readb(&intel_dp->aux,
427 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
430 drm_dbg_kms(&i915->drm,
431 "Unable to get sink synchronization latency, assuming 8 frames\n");
435 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
437 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
442 /* If sink don't have specific granularity requirements set legacy ones */
443 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
444 /* As PSR2 HW sends full lines, we do not care about x granularity */
450 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
452 drm_dbg_kms(&i915->drm,
453 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
455 * Spec says that if the value read is 0 the default granularity should
458 if (r != 2 || w == 0)
461 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
463 drm_dbg_kms(&i915->drm,
464 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
471 intel_dp->psr.su_w_granularity = w;
472 intel_dp->psr.su_y_granularity = y;
475 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
477 struct drm_i915_private *dev_priv =
478 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
480 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481 sizeof(intel_dp->psr_dpcd));
483 if (!intel_dp->psr_dpcd[0])
485 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486 intel_dp->psr_dpcd[0]);
488 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489 drm_dbg_kms(&dev_priv->drm,
490 "PSR support not currently available for this panel\n");
494 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495 drm_dbg_kms(&dev_priv->drm,
496 "Panel lacks power state control, PSR cannot be enabled\n");
500 intel_dp->psr.sink_support = true;
501 intel_dp->psr.sink_sync_latency =
502 intel_dp_get_sink_sync_latency(intel_dp);
504 if (DISPLAY_VER(dev_priv) >= 9 &&
505 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506 bool y_req = intel_dp->psr_dpcd[1] &
507 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508 bool alpm = intel_dp_get_alpm_status(intel_dp);
511 * All panels that supports PSR version 03h (PSR2 +
512 * Y-coordinate) can handle Y-coordinates in VSC but we are
513 * only sure that it is going to be used when required by the
514 * panel. This way panel is capable to do selective update
515 * without a aux frame sync.
517 * To support PSR version 02h and PSR version 03h without
518 * Y-coordinate requirement panels we would need to enable
521 intel_dp->psr.sink_psr2_support = y_req && alpm;
522 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
523 intel_dp->psr.sink_psr2_support ? "" : "not ");
525 if (intel_dp->psr.sink_psr2_support) {
526 intel_dp->psr.colorimetry_support =
527 intel_dp_get_colorimetry_status(intel_dp);
528 intel_dp_get_su_granularity(intel_dp);
533 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
535 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537 u32 aux_clock_divider, aux_ctl;
538 /* write DP_SET_POWER=D0 */
539 static const u8 aux_msg[] = {
540 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541 [1] = (DP_SET_POWER >> 8) & 0xff,
542 [2] = DP_SET_POWER & 0xff,
544 [4] = DP_SET_POWER_D0,
548 BUILD_BUG_ON(sizeof(aux_msg) > 20);
549 for (i = 0; i < sizeof(aux_msg); i += 4)
550 intel_de_write(dev_priv,
551 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
554 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
556 /* Start with bits set for DDI_AUX_CTL register */
557 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
560 /* Select only valid bits for SRD_AUX_CTL */
561 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
566 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
570 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 u8 dpcd_val = DP_PSR_ENABLE;
575 /* Enable ALPM at sink for psr2 */
576 if (intel_dp->psr.psr2_enabled) {
577 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
579 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
581 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
583 if (intel_dp->psr.link_standby)
584 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
586 if (DISPLAY_VER(dev_priv) >= 8)
587 dpcd_val |= DP_PSR_CRC_VERIFICATION;
590 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
593 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
595 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
598 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
600 struct intel_connector *connector = intel_dp->attached_connector;
601 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
604 if (DISPLAY_VER(dev_priv) >= 11)
605 val |= EDP_PSR_TP4_TIME_0us;
607 if (dev_priv->params.psr_safest_params) {
608 val |= EDP_PSR_TP1_TIME_2500us;
609 val |= EDP_PSR_TP2_TP3_TIME_2500us;
613 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614 val |= EDP_PSR_TP1_TIME_0us;
615 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616 val |= EDP_PSR_TP1_TIME_100us;
617 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618 val |= EDP_PSR_TP1_TIME_500us;
620 val |= EDP_PSR_TP1_TIME_2500us;
622 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623 val |= EDP_PSR_TP2_TP3_TIME_0us;
624 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625 val |= EDP_PSR_TP2_TP3_TIME_100us;
626 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627 val |= EDP_PSR_TP2_TP3_TIME_500us;
629 val |= EDP_PSR_TP2_TP3_TIME_2500us;
632 if (intel_dp_source_supports_tps3(dev_priv) &&
633 drm_dp_tps3_supported(intel_dp->dpcd))
634 val |= EDP_PSR_TP_TP1_TP3;
636 val |= EDP_PSR_TP_TP1_TP2;
641 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
643 struct intel_connector *connector = intel_dp->attached_connector;
644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
647 /* Let's use 6 as the minimum to cover all known cases including the
648 * off-by-one issue that HW has in some cases.
650 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
651 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
653 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
659 static void hsw_activate_psr1(struct intel_dp *intel_dp)
661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
662 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
663 u32 max_sleep_time = 0x1f;
664 u32 val = EDP_PSR_ENABLE;
666 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
668 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
669 if (IS_HASWELL(dev_priv))
670 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
672 if (intel_dp->psr.link_standby)
673 val |= EDP_PSR_LINK_STANDBY;
675 val |= intel_psr1_get_tp_time(intel_dp);
677 if (DISPLAY_VER(dev_priv) >= 8)
678 val |= EDP_PSR_CRC_ENABLE;
680 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
681 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
684 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
686 struct intel_connector *connector = intel_dp->attached_connector;
687 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
690 if (dev_priv->params.psr_safest_params)
691 return EDP_PSR2_TP2_TIME_2500us;
693 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
694 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
695 val |= EDP_PSR2_TP2_TIME_50us;
696 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
697 val |= EDP_PSR2_TP2_TIME_100us;
698 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
699 val |= EDP_PSR2_TP2_TIME_500us;
701 val |= EDP_PSR2_TP2_TIME_2500us;
706 static int psr2_block_count_lines(struct intel_dp *intel_dp)
708 return intel_dp->psr.io_wake_lines < 9 &&
709 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
712 static int psr2_block_count(struct intel_dp *intel_dp)
714 return psr2_block_count_lines(intel_dp) / 4;
717 static void hsw_activate_psr2(struct intel_dp *intel_dp)
719 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
720 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
721 u32 val = EDP_PSR2_ENABLE;
723 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
725 if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
726 val |= EDP_SU_TRACK_ENABLE;
728 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
729 val |= EDP_Y_COORDINATE_ENABLE;
731 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
732 val |= intel_psr2_get_tp_time(intel_dp);
734 if (DISPLAY_VER(dev_priv) >= 12) {
735 if (psr2_block_count(intel_dp) > 2)
736 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
738 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
741 /* Wa_22012278275:adl-p */
742 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
743 static const u8 map[] = {
754 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
755 * comments bellow for more information
759 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
760 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
762 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
763 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
764 } else if (DISPLAY_VER(dev_priv) >= 12) {
765 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
766 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
767 } else if (DISPLAY_VER(dev_priv) >= 9) {
768 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
769 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
772 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
773 val |= EDP_PSR2_SU_SDP_SCANLINE;
775 if (intel_dp->psr.psr2_sel_fetch_enabled) {
778 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
779 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
780 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
781 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
785 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
786 * recommending keep this bit unset while PSR2 is enabled.
788 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
790 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
794 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
796 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
797 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
798 else if (DISPLAY_VER(dev_priv) >= 12)
799 return cpu_transcoder == TRANSCODER_A;
800 else if (DISPLAY_VER(dev_priv) >= 9)
801 return cpu_transcoder == TRANSCODER_EDP;
806 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
808 if (!cstate || !cstate->hw.active)
811 return DIV_ROUND_UP(1000 * 1000,
812 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
815 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
818 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
821 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
822 EDP_PSR2_IDLE_FRAMES_MASK,
823 EDP_PSR2_IDLE_FRAMES(idle_frames));
826 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
828 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830 psr2_program_idle_frames(intel_dp, 0);
831 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
834 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
836 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
838 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
839 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
842 static void tgl_dc3co_disable_work(struct work_struct *work)
844 struct intel_dp *intel_dp =
845 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
847 mutex_lock(&intel_dp->psr.lock);
848 /* If delayed work is pending, it is not idle */
849 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
852 tgl_psr2_disable_dc3co(intel_dp);
854 mutex_unlock(&intel_dp->psr.lock);
857 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
859 if (!intel_dp->psr.dc3co_exitline)
862 cancel_delayed_work(&intel_dp->psr.dc3co_work);
863 /* Before PSR2 exit disallow dc3co*/
864 tgl_psr2_disable_dc3co(intel_dp);
868 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
869 struct intel_crtc_state *crtc_state)
871 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
872 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
873 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
874 enum port port = dig_port->base.port;
876 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
877 return pipe <= PIPE_B && port <= PORT_B;
879 return pipe == PIPE_A && port == PORT_A;
883 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
884 struct intel_crtc_state *crtc_state)
886 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
887 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
888 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
892 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
893 * disable DC3CO until the changed dc3co activating/deactivating sequence
894 * is applied. B.Specs:49196
899 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
900 * TODO: when the issue is addressed, this restriction should be removed.
902 if (crtc_state->enable_psr2_sel_fetch)
905 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
908 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
911 /* Wa_16011303918:adl-p */
912 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
916 * DC3CO Exit time 200us B.Spec 49196
917 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
920 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
922 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
925 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
928 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
929 struct intel_crtc_state *crtc_state)
931 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
933 if (!dev_priv->params.enable_psr2_sel_fetch &&
934 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
935 drm_dbg_kms(&dev_priv->drm,
936 "PSR2 sel fetch not enabled, disabled by parameter\n");
940 if (crtc_state->uapi.async_flip) {
941 drm_dbg_kms(&dev_priv->drm,
942 "PSR2 sel fetch not enabled, async flip enabled\n");
946 return crtc_state->enable_psr2_sel_fetch = true;
949 static bool psr2_granularity_check(struct intel_dp *intel_dp,
950 struct intel_crtc_state *crtc_state)
952 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
953 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
954 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
955 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
956 u16 y_granularity = 0;
958 /* PSR2 HW only send full lines so we only need to validate the width */
959 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
962 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
965 /* HW tracking is only aligned to 4 lines */
966 if (!crtc_state->enable_psr2_sel_fetch)
967 return intel_dp->psr.su_y_granularity == 4;
970 * adl_p and mtl platforms have 1 line granularity.
971 * For other platforms with SW tracking we can adjust the y coordinates
972 * to match sink requirement if multiple of 4.
974 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
975 y_granularity = intel_dp->psr.su_y_granularity;
976 else if (intel_dp->psr.su_y_granularity <= 2)
978 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
979 y_granularity = intel_dp->psr.su_y_granularity;
981 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
984 if (crtc_state->dsc.compression_enable &&
985 vdsc_cfg->slice_height % y_granularity)
988 crtc_state->su_y_granularity = y_granularity;
992 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
993 struct intel_crtc_state *crtc_state)
995 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
996 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
997 u32 hblank_total, hblank_ns, req_ns;
999 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1000 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1002 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1003 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1005 if ((hblank_ns - req_ns) > 100)
1008 /* Not supported <13 / Wa_22012279113:adl-p */
1009 if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1012 crtc_state->req_psr2_sdp_prior_scanline = true;
1016 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1017 struct intel_crtc_state *crtc_state)
1019 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1020 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1023 if (DISPLAY_VER(i915) >= 12) {
1026 * According to Bspec it's 42us, but based on testing
1027 * it is not enough -> use 45 us.
1029 fast_wake_time = 45;
1030 max_wake_lines = 12;
1033 fast_wake_time = 32;
1037 io_wake_lines = intel_usecs_to_scanlines(
1038 &crtc_state->uapi.adjusted_mode, io_wake_time);
1039 fast_wake_lines = intel_usecs_to_scanlines(
1040 &crtc_state->uapi.adjusted_mode, fast_wake_time);
1042 if (io_wake_lines > max_wake_lines ||
1043 fast_wake_lines > max_wake_lines)
1046 if (i915->params.psr_safest_params)
1047 io_wake_lines = fast_wake_lines = max_wake_lines;
1049 /* According to Bspec lower limit should be set as 7 lines. */
1050 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1051 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1056 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1057 struct intel_crtc_state *crtc_state)
1059 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1060 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1061 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1062 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1064 if (!intel_dp->psr.sink_psr2_support)
1067 /* JSL and EHL only supports eDP 1.3 */
1068 if (IS_JSL_EHL(dev_priv)) {
1069 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1073 /* Wa_16011181250 */
1074 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1076 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1080 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1081 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1085 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1086 drm_dbg_kms(&dev_priv->drm,
1087 "PSR2 not supported in transcoder %s\n",
1088 transcoder_name(crtc_state->cpu_transcoder));
1092 if (!psr2_global_enabled(intel_dp)) {
1093 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1098 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1099 * resolution requires DSC to be enabled, priority is given to DSC
1102 if (crtc_state->dsc.compression_enable &&
1103 (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1104 drm_dbg_kms(&dev_priv->drm,
1105 "PSR2 cannot be enabled since DSC is enabled\n");
1109 if (crtc_state->crc_enabled) {
1110 drm_dbg_kms(&dev_priv->drm,
1111 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1115 if (DISPLAY_VER(dev_priv) >= 12) {
1119 } else if (DISPLAY_VER(dev_priv) >= 10) {
1123 } else if (DISPLAY_VER(dev_priv) == 9) {
1129 if (crtc_state->pipe_bpp > max_bpp) {
1130 drm_dbg_kms(&dev_priv->drm,
1131 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1132 crtc_state->pipe_bpp, max_bpp);
1136 /* Wa_16011303918:adl-p */
1137 if (crtc_state->vrr.enable &&
1138 IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1139 drm_dbg_kms(&dev_priv->drm,
1140 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1144 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1145 drm_dbg_kms(&dev_priv->drm,
1146 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1150 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1151 drm_dbg_kms(&dev_priv->drm,
1152 "PSR2 not enabled, Unable to use long enough wake times\n");
1156 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1157 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1158 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1159 psr2_block_count_lines(intel_dp)) {
1160 drm_dbg_kms(&dev_priv->drm,
1161 "PSR2 not enabled, too short vblank time\n");
1165 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1166 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1167 !HAS_PSR_HW_TRACKING(dev_priv)) {
1168 drm_dbg_kms(&dev_priv->drm,
1169 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1174 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1175 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1179 if (!crtc_state->enable_psr2_sel_fetch &&
1180 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1181 drm_dbg_kms(&dev_priv->drm,
1182 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1183 crtc_hdisplay, crtc_vdisplay,
1184 psr_max_h, psr_max_v);
1188 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1192 crtc_state->enable_psr2_sel_fetch = false;
1196 void intel_psr_compute_config(struct intel_dp *intel_dp,
1197 struct intel_crtc_state *crtc_state,
1198 struct drm_connector_state *conn_state)
1200 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1201 const struct drm_display_mode *adjusted_mode =
1202 &crtc_state->hw.adjusted_mode;
1206 * Current PSR panels don't work reliably with VRR enabled
1207 * So if VRR is enabled, do not enable PSR.
1209 if (crtc_state->vrr.enable)
1212 if (!CAN_PSR(intel_dp))
1215 if (!psr_global_enabled(intel_dp)) {
1216 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1220 if (intel_dp->psr.sink_not_reliable) {
1221 drm_dbg_kms(&dev_priv->drm,
1222 "PSR sink implementation is not reliable\n");
1226 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1227 drm_dbg_kms(&dev_priv->drm,
1228 "PSR condition failed: Interlaced mode enabled\n");
1232 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1233 if (psr_setup_time < 0) {
1234 drm_dbg_kms(&dev_priv->drm,
1235 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1236 intel_dp->psr_dpcd[1]);
1240 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1241 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1242 drm_dbg_kms(&dev_priv->drm,
1243 "PSR condition failed: PSR setup time (%d us) too long\n",
1248 crtc_state->has_psr = true;
1249 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1251 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1252 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1253 &crtc_state->psr_vsc);
1256 void intel_psr_get_config(struct intel_encoder *encoder,
1257 struct intel_crtc_state *pipe_config)
1259 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1260 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1261 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1262 struct intel_dp *intel_dp;
1268 intel_dp = &dig_port->dp;
1269 if (!CAN_PSR(intel_dp))
1272 mutex_lock(&intel_dp->psr.lock);
1273 if (!intel_dp->psr.enabled)
1277 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1278 * enabled/disabled because of frontbuffer tracking and others.
1280 pipe_config->has_psr = true;
1281 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1282 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1284 if (!intel_dp->psr.psr2_enabled)
1287 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1288 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1289 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1290 pipe_config->enable_psr2_sel_fetch = true;
1293 if (DISPLAY_VER(dev_priv) >= 12) {
1294 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1295 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1298 mutex_unlock(&intel_dp->psr.lock);
1301 static void intel_psr_activate(struct intel_dp *intel_dp)
1303 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1304 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1306 drm_WARN_ON(&dev_priv->drm,
1307 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1308 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1310 drm_WARN_ON(&dev_priv->drm,
1311 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1313 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1315 lockdep_assert_held(&intel_dp->psr.lock);
1317 /* psr1 and psr2 are mutually exclusive.*/
1318 if (intel_dp->psr.psr2_enabled)
1319 hsw_activate_psr2(intel_dp);
1321 hsw_activate_psr1(intel_dp);
1323 intel_dp->psr.active = true;
1326 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1328 switch (intel_dp->psr.pipe) {
1330 return LATENCY_REPORTING_REMOVED_PIPE_A;
1332 return LATENCY_REPORTING_REMOVED_PIPE_B;
1334 return LATENCY_REPORTING_REMOVED_PIPE_C;
1336 return LATENCY_REPORTING_REMOVED_PIPE_D;
1338 MISSING_CASE(intel_dp->psr.pipe);
1347 static void wm_optimization_wa(struct intel_dp *intel_dp,
1348 const struct intel_crtc_state *crtc_state)
1350 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1351 bool set_wa_bit = false;
1353 /* Wa_14015648006 */
1354 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1355 IS_DISPLAY_VER(dev_priv, 11, 13))
1356 set_wa_bit |= crtc_state->wm_level_disabled;
1358 /* Wa_16013835468 */
1359 if (DISPLAY_VER(dev_priv) == 12)
1360 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1361 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1364 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1365 0, wa_16013835468_bit_get(intel_dp));
1367 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1368 wa_16013835468_bit_get(intel_dp), 0);
1371 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1372 const struct intel_crtc_state *crtc_state)
1374 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1379 * Only HSW and BDW have PSR AUX registers that need to be setup.
1380 * SKL+ use hardcoded values PSR AUX transactions
1382 if (DISPLAY_VER(dev_priv) < 9)
1383 hsw_psr_setup_aux(intel_dp);
1386 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1387 * mask LPSP to avoid dependency on other drivers that might block
1388 * runtime_pm besides preventing other hw tracking issues now we
1389 * can rely on frontbuffer tracking.
1391 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1392 EDP_PSR_DEBUG_MASK_HPD |
1393 EDP_PSR_DEBUG_MASK_LPSP |
1394 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1396 if (DISPLAY_VER(dev_priv) < 11)
1397 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1399 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1401 psr_irq_control(intel_dp);
1404 * TODO: if future platforms supports DC3CO in more than one
1405 * transcoder, EXITLINE will need to be unset when disabling PSR
1407 if (intel_dp->psr.dc3co_exitline)
1408 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1409 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1411 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1412 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1413 intel_dp->psr.psr2_sel_fetch_enabled ?
1414 IGNORE_PSR2_HW_TRACKING : 0);
1420 wm_optimization_wa(intel_dp, crtc_state);
1422 if (intel_dp->psr.psr2_enabled) {
1423 if (DISPLAY_VER(dev_priv) == 9)
1424 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1425 PSR2_VSC_ENABLE_PROG_HEADER |
1426 PSR2_ADD_VERTICAL_LINE_COUNT);
1429 * Wa_16014451276:adlp,mtl[a0,b0]
1430 * All supported adlp panels have 1-based X granularity, this may
1431 * cause issues if non-supported panels are used.
1433 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1434 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1435 ADLP_1_BASED_X_GRANULARITY);
1436 else if (IS_ALDERLAKE_P(dev_priv))
1437 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1438 ADLP_1_BASED_X_GRANULARITY);
1440 /* Wa_16012604467:adlp,mtl[a0,b0] */
1441 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1442 intel_de_rmw(dev_priv,
1443 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1444 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1445 else if (IS_ALDERLAKE_P(dev_priv))
1446 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1447 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1451 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1453 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1454 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1458 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1459 * will still keep the error set even after the reset done in the
1460 * irq_preinstall and irq_uninstall hooks.
1461 * And enabling in this situation cause the screen to freeze in the
1462 * first time that PSR HW tries to activate so lets keep PSR disabled
1463 * to avoid any rendering problems.
1465 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1466 val &= psr_irq_psr_error_bit_get(intel_dp);
1468 intel_dp->psr.sink_not_reliable = true;
1469 drm_dbg_kms(&dev_priv->drm,
1470 "PSR interruption error set, not enabling PSR\n");
1477 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1478 const struct intel_crtc_state *crtc_state)
1480 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1481 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1482 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1483 struct intel_encoder *encoder = &dig_port->base;
1486 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1488 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1489 intel_dp->psr.busy_frontbuffer_bits = 0;
1490 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1491 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1492 /* DC5/DC6 requires at least 6 idle frames */
1493 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1494 intel_dp->psr.dc3co_exit_delay = val;
1495 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1496 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1497 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1498 intel_dp->psr.req_psr2_sdp_prior_scanline =
1499 crtc_state->req_psr2_sdp_prior_scanline;
1501 if (!psr_interrupt_error_check(intel_dp))
1504 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1505 intel_dp->psr.psr2_enabled ? "2" : "1");
1506 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1507 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1508 intel_psr_enable_sink(intel_dp);
1509 intel_psr_enable_source(intel_dp, crtc_state);
1510 intel_dp->psr.enabled = true;
1511 intel_dp->psr.paused = false;
1513 intel_psr_activate(intel_dp);
1516 static void intel_psr_exit(struct intel_dp *intel_dp)
1518 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1522 if (!intel_dp->psr.active) {
1523 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1524 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1525 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1528 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1529 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1534 if (intel_dp->psr.psr2_enabled) {
1535 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1537 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1538 EDP_PSR2_ENABLE, 0);
1540 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1542 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1545 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1547 intel_dp->psr.active = false;
1550 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1553 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1554 i915_reg_t psr_status;
1555 u32 psr_status_mask;
1557 if (intel_dp->psr.psr2_enabled) {
1558 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1559 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1561 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1562 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1565 /* Wait till PSR is idle */
1566 if (intel_de_wait_for_clear(dev_priv, psr_status,
1567 psr_status_mask, 2000))
1568 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1571 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1573 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1575 enum phy phy = intel_port_to_phy(dev_priv,
1576 dp_to_dig_port(intel_dp)->base.port);
1578 lockdep_assert_held(&intel_dp->psr.lock);
1580 if (!intel_dp->psr.enabled)
1583 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1584 intel_dp->psr.psr2_enabled ? "2" : "1");
1586 intel_psr_exit(intel_dp);
1587 intel_psr_wait_exit_locked(intel_dp);
1593 if (DISPLAY_VER(dev_priv) >= 11)
1594 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1595 wa_16013835468_bit_get(intel_dp), 0);
1597 if (intel_dp->psr.psr2_enabled) {
1598 /* Wa_16012604467:adlp,mtl[a0,b0] */
1599 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1600 intel_de_rmw(dev_priv,
1601 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1602 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1603 else if (IS_ALDERLAKE_P(dev_priv))
1604 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1605 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1608 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1610 /* Disable PSR on Sink */
1611 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1613 if (intel_dp->psr.psr2_enabled)
1614 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1616 intel_dp->psr.enabled = false;
1617 intel_dp->psr.psr2_enabled = false;
1618 intel_dp->psr.psr2_sel_fetch_enabled = false;
1619 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1623 * intel_psr_disable - Disable PSR
1624 * @intel_dp: Intel DP
1625 * @old_crtc_state: old CRTC state
1627 * This function needs to be called before disabling pipe.
1629 void intel_psr_disable(struct intel_dp *intel_dp,
1630 const struct intel_crtc_state *old_crtc_state)
1632 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1634 if (!old_crtc_state->has_psr)
1637 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1640 mutex_lock(&intel_dp->psr.lock);
1642 intel_psr_disable_locked(intel_dp);
1644 mutex_unlock(&intel_dp->psr.lock);
1645 cancel_work_sync(&intel_dp->psr.work);
1646 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1650 * intel_psr_pause - Pause PSR
1651 * @intel_dp: Intel DP
1653 * This function need to be called after enabling psr.
1655 void intel_psr_pause(struct intel_dp *intel_dp)
1657 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1658 struct intel_psr *psr = &intel_dp->psr;
1660 if (!CAN_PSR(intel_dp))
1663 mutex_lock(&psr->lock);
1665 if (!psr->enabled) {
1666 mutex_unlock(&psr->lock);
1670 /* If we ever hit this, we will need to add refcount to pause/resume */
1671 drm_WARN_ON(&dev_priv->drm, psr->paused);
1673 intel_psr_exit(intel_dp);
1674 intel_psr_wait_exit_locked(intel_dp);
1677 mutex_unlock(&psr->lock);
1679 cancel_work_sync(&psr->work);
1680 cancel_delayed_work_sync(&psr->dc3co_work);
1684 * intel_psr_resume - Resume PSR
1685 * @intel_dp: Intel DP
1687 * This function need to be called after pausing psr.
1689 void intel_psr_resume(struct intel_dp *intel_dp)
1691 struct intel_psr *psr = &intel_dp->psr;
1693 if (!CAN_PSR(intel_dp))
1696 mutex_lock(&psr->lock);
1701 psr->paused = false;
1702 intel_psr_activate(intel_dp);
1705 mutex_unlock(&psr->lock);
1708 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1710 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1711 PSR2_MAN_TRK_CTL_ENABLE;
1714 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1716 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1717 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1718 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1721 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1723 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1724 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1725 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1728 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1730 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1731 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1732 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1735 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1738 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1740 if (intel_dp->psr.psr2_sel_fetch_enabled)
1741 intel_de_write(dev_priv,
1742 PSR2_MAN_TRK_CTL(cpu_transcoder),
1743 man_trk_ctl_enable_bit_get(dev_priv) |
1744 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1745 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1746 man_trk_ctl_continuos_full_frame(dev_priv));
1749 * Display WA #0884: skl+
1750 * This documented WA for bxt can be safely applied
1751 * broadly so we can force HW tracking to exit PSR
1752 * instead of disabling and re-enabling.
1753 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1754 * but it makes more sense write to the current active
1757 * This workaround do not exist for platforms with display 10 or newer
1758 * but testing proved that it works for up display 13, for newer
1759 * than that testing will be needed.
1761 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1764 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1765 const struct intel_crtc_state *crtc_state)
1767 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1768 enum pipe pipe = plane->pipe;
1770 if (!crtc_state->enable_psr2_sel_fetch)
1773 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1776 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1777 const struct intel_crtc_state *crtc_state,
1778 const struct intel_plane_state *plane_state)
1780 struct drm_i915_private *i915 = to_i915(plane->base.dev);
1781 enum pipe pipe = plane->pipe;
1783 if (!crtc_state->enable_psr2_sel_fetch)
1786 if (plane->id == PLANE_CURSOR)
1787 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1790 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1791 PLANE_SEL_FETCH_CTL_ENABLE);
1794 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1795 const struct intel_crtc_state *crtc_state,
1796 const struct intel_plane_state *plane_state,
1799 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1800 enum pipe pipe = plane->pipe;
1801 const struct drm_rect *clip;
1805 if (!crtc_state->enable_psr2_sel_fetch)
1808 if (plane->id == PLANE_CURSOR)
1811 clip = &plane_state->psr2_sel_fetch_area;
1813 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1814 val |= plane_state->uapi.dst.x1;
1815 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1817 x = plane_state->view.color_plane[color_plane].x;
1820 * From Bspec: UV surface Start Y Position = half of Y plane Y
1824 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1826 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1830 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1833 /* Sizes are 0 based */
1834 val = (drm_rect_height(clip) - 1) << 16;
1835 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1836 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1839 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1841 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1842 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1843 struct intel_encoder *encoder;
1845 if (!crtc_state->enable_psr2_sel_fetch)
1848 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1849 crtc_state->uapi.encoder_mask) {
1850 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1852 lockdep_assert_held(&intel_dp->psr.lock);
1853 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1858 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1859 crtc_state->psr2_man_track_ctl);
1862 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1863 struct drm_rect *clip, bool full_update)
1865 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1867 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1869 /* SF partial frame enable has to be set even on full update */
1870 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1873 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1874 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1881 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1882 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1883 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1885 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1887 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1888 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1891 crtc_state->psr2_man_track_ctl = val;
1894 static void clip_area_update(struct drm_rect *overlap_damage_area,
1895 struct drm_rect *damage_area,
1896 struct drm_rect *pipe_src)
1898 if (!drm_rect_intersect(damage_area, pipe_src))
1901 if (overlap_damage_area->y1 == -1) {
1902 overlap_damage_area->y1 = damage_area->y1;
1903 overlap_damage_area->y2 = damage_area->y2;
1907 if (damage_area->y1 < overlap_damage_area->y1)
1908 overlap_damage_area->y1 = damage_area->y1;
1910 if (damage_area->y2 > overlap_damage_area->y2)
1911 overlap_damage_area->y2 = damage_area->y2;
1914 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1915 struct drm_rect *pipe_clip)
1917 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1918 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1921 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1922 if (crtc_state->dsc.compression_enable &&
1923 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1924 y_alignment = vdsc_cfg->slice_height;
1926 y_alignment = crtc_state->su_y_granularity;
1928 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1929 if (pipe_clip->y2 % y_alignment)
1930 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1934 * TODO: Not clear how to handle planes with negative position,
1935 * also planes are not updated if they have a negative X
1936 * position so for now doing a full update in this cases
1938 * Plane scaling and rotation is not supported by selective fetch and both
1939 * properties can change without a modeset, so need to be check at every
1942 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1944 if (plane_state->uapi.dst.y1 < 0 ||
1945 plane_state->uapi.dst.x1 < 0 ||
1946 plane_state->scaler_id >= 0 ||
1947 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1954 * Check for pipe properties that is not supported by selective fetch.
1956 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1957 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1958 * enabled and going to the full update path.
1960 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1962 if (crtc_state->scaler_state.scaler_id >= 0)
1968 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1969 struct intel_crtc *crtc)
1971 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1972 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1973 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1974 struct intel_plane_state *new_plane_state, *old_plane_state;
1975 struct intel_plane *plane;
1976 bool full_update = false;
1979 if (!crtc_state->enable_psr2_sel_fetch)
1982 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1984 goto skip_sel_fetch_set_loop;
1988 * Calculate minimal selective fetch area of each plane and calculate
1989 * the pipe damaged area.
1990 * In the next loop the plane selective fetch area will actually be set
1991 * using whole pipe damaged area.
1993 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1994 new_plane_state, i) {
1995 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1998 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2001 if (!new_plane_state->uapi.visible &&
2002 !old_plane_state->uapi.visible)
2005 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2011 * If visibility or plane moved, mark the whole plane area as
2012 * damaged as it needs to be complete redraw in the new and old
2015 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2016 !drm_rect_equals(&new_plane_state->uapi.dst,
2017 &old_plane_state->uapi.dst)) {
2018 if (old_plane_state->uapi.visible) {
2019 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2020 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2021 clip_area_update(&pipe_clip, &damaged_area,
2022 &crtc_state->pipe_src);
2025 if (new_plane_state->uapi.visible) {
2026 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2027 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2028 clip_area_update(&pipe_clip, &damaged_area,
2029 &crtc_state->pipe_src);
2032 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2033 /* If alpha changed mark the whole plane area as damaged */
2034 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2035 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2036 clip_area_update(&pipe_clip, &damaged_area,
2037 &crtc_state->pipe_src);
2041 src = drm_plane_state_src(&new_plane_state->uapi);
2042 drm_rect_fp_to_int(&src, &src);
2044 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2045 &new_plane_state->uapi, &damaged_area))
2048 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2049 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2050 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2051 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2053 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2057 * TODO: For now we are just using full update in case
2058 * selective fetch area calculation fails. To optimize this we
2059 * should identify cases where this happens and fix the area
2060 * calculation for those.
2062 if (pipe_clip.y1 == -1) {
2063 drm_info_once(&dev_priv->drm,
2064 "Selective fetch area calculation failed in pipe %c\n",
2065 pipe_name(crtc->pipe));
2070 goto skip_sel_fetch_set_loop;
2072 /* Wa_14014971492 */
2073 if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2074 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2075 crtc_state->splitter.enable)
2078 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2082 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2085 * Now that we have the pipe damaged area check if it intersect with
2086 * every plane, if it does set the plane selective fetch area.
2088 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2089 new_plane_state, i) {
2090 struct drm_rect *sel_fetch_area, inter;
2091 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2093 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2094 !new_plane_state->uapi.visible)
2098 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2101 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2106 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2107 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2108 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2109 crtc_state->update_planes |= BIT(plane->id);
2112 * Sel_fetch_area is calculated for UV plane. Use
2113 * same area for Y plane as well.
2116 struct intel_plane_state *linked_new_plane_state;
2117 struct drm_rect *linked_sel_fetch_area;
2119 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2120 if (IS_ERR(linked_new_plane_state))
2121 return PTR_ERR(linked_new_plane_state);
2123 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2124 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2125 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2126 crtc_state->update_planes |= BIT(linked->id);
2130 skip_sel_fetch_set_loop:
2131 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2135 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2136 struct intel_crtc *crtc)
2138 struct drm_i915_private *i915 = to_i915(state->base.dev);
2139 const struct intel_crtc_state *old_crtc_state =
2140 intel_atomic_get_old_crtc_state(state, crtc);
2141 const struct intel_crtc_state *new_crtc_state =
2142 intel_atomic_get_new_crtc_state(state, crtc);
2143 struct intel_encoder *encoder;
2148 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2149 old_crtc_state->uapi.encoder_mask) {
2150 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2151 struct intel_psr *psr = &intel_dp->psr;
2152 bool needs_to_disable = false;
2154 mutex_lock(&psr->lock);
2157 * Reasons to disable:
2158 * - PSR disabled in new state
2159 * - All planes will go inactive
2160 * - Changing between PSR versions
2161 * - Display WA #1136: skl, bxt
2163 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2164 needs_to_disable |= !new_crtc_state->has_psr;
2165 needs_to_disable |= !new_crtc_state->active_planes;
2166 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2167 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2168 new_crtc_state->wm_level_disabled;
2170 if (psr->enabled && needs_to_disable)
2171 intel_psr_disable_locked(intel_dp);
2172 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2173 /* Wa_14015648006 */
2174 wm_optimization_wa(intel_dp, new_crtc_state);
2176 mutex_unlock(&psr->lock);
2180 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2181 const struct intel_crtc_state *crtc_state)
2183 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2184 struct intel_encoder *encoder;
2186 if (!crtc_state->has_psr)
2189 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2190 crtc_state->uapi.encoder_mask) {
2191 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2192 struct intel_psr *psr = &intel_dp->psr;
2193 bool keep_disabled = false;
2195 mutex_lock(&psr->lock);
2197 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2199 keep_disabled |= psr->sink_not_reliable;
2200 keep_disabled |= !crtc_state->active_planes;
2202 /* Display WA #1136: skl, bxt */
2203 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2204 crtc_state->wm_level_disabled;
2206 if (!psr->enabled && !keep_disabled)
2207 intel_psr_enable_locked(intel_dp, crtc_state);
2208 else if (psr->enabled && !crtc_state->wm_level_disabled)
2209 /* Wa_14015648006 */
2210 wm_optimization_wa(intel_dp, crtc_state);
2212 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2213 if (crtc_state->crc_enabled && psr->enabled)
2214 psr_force_hw_tracking_exit(intel_dp);
2216 mutex_unlock(&psr->lock);
2220 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2222 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2223 struct intel_crtc_state *crtc_state;
2224 struct intel_crtc *crtc;
2227 if (!HAS_PSR(dev_priv))
2230 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2231 _intel_psr_post_plane_update(state, crtc_state);
2234 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2236 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2237 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2240 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2241 * As all higher states has bit 4 of PSR2 state set we can just wait for
2242 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2244 return intel_de_wait_for_clear(dev_priv,
2245 EDP_PSR2_STATUS(cpu_transcoder),
2246 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2249 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2251 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2252 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2255 * From bspec: Panel Self Refresh (BDW+)
2256 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2257 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2258 * defensive enough to cover everything.
2260 return intel_de_wait_for_clear(dev_priv,
2261 psr_status_reg(dev_priv, cpu_transcoder),
2262 EDP_PSR_STATUS_STATE_MASK, 50);
2266 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2267 * @new_crtc_state: new CRTC state
2269 * This function is expected to be called from pipe_update_start() where it is
2270 * not expected to race with PSR enable or disable.
2272 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2274 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2275 struct intel_encoder *encoder;
2277 if (!new_crtc_state->has_psr)
2280 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2281 new_crtc_state->uapi.encoder_mask) {
2282 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2285 lockdep_assert_held(&intel_dp->psr.lock);
2287 if (!intel_dp->psr.enabled)
2290 if (intel_dp->psr.psr2_enabled)
2291 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2293 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2296 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2300 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2302 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2303 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2308 if (!intel_dp->psr.enabled)
2311 if (intel_dp->psr.psr2_enabled) {
2312 reg = EDP_PSR2_STATUS(cpu_transcoder);
2313 mask = EDP_PSR2_STATUS_STATE_MASK;
2315 reg = psr_status_reg(dev_priv, cpu_transcoder);
2316 mask = EDP_PSR_STATUS_STATE_MASK;
2319 mutex_unlock(&intel_dp->psr.lock);
2321 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2323 drm_err(&dev_priv->drm,
2324 "Timed out waiting for PSR Idle for re-enable\n");
2326 /* After the unlocked wait, verify that PSR is still wanted! */
2327 mutex_lock(&intel_dp->psr.lock);
2328 return err == 0 && intel_dp->psr.enabled;
2331 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2333 struct drm_connector_list_iter conn_iter;
2334 struct drm_modeset_acquire_ctx ctx;
2335 struct drm_atomic_state *state;
2336 struct drm_connector *conn;
2339 state = drm_atomic_state_alloc(&dev_priv->drm);
2343 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2345 state->acquire_ctx = &ctx;
2346 to_intel_atomic_state(state)->internal = true;
2349 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2350 drm_for_each_connector_iter(conn, &conn_iter) {
2351 struct drm_connector_state *conn_state;
2352 struct drm_crtc_state *crtc_state;
2354 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2357 conn_state = drm_atomic_get_connector_state(state, conn);
2358 if (IS_ERR(conn_state)) {
2359 err = PTR_ERR(conn_state);
2363 if (!conn_state->crtc)
2366 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2367 if (IS_ERR(crtc_state)) {
2368 err = PTR_ERR(crtc_state);
2372 /* Mark mode as changed to trigger a pipe->update() */
2373 crtc_state->mode_changed = true;
2375 drm_connector_list_iter_end(&conn_iter);
2378 err = drm_atomic_commit(state);
2380 if (err == -EDEADLK) {
2381 drm_atomic_state_clear(state);
2382 err = drm_modeset_backoff(&ctx);
2387 drm_modeset_drop_locks(&ctx);
2388 drm_modeset_acquire_fini(&ctx);
2389 drm_atomic_state_put(state);
2394 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2396 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2397 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2401 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2402 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2403 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2407 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2411 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2412 intel_dp->psr.debug = val;
2415 * Do it right away if it's already enabled, otherwise it will be done
2416 * when enabling the source.
2418 if (intel_dp->psr.enabled)
2419 psr_irq_control(intel_dp);
2421 mutex_unlock(&intel_dp->psr.lock);
2423 if (old_mode != mode)
2424 ret = intel_psr_fastset_force(dev_priv);
2429 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2431 struct intel_psr *psr = &intel_dp->psr;
2433 intel_psr_disable_locked(intel_dp);
2434 psr->sink_not_reliable = true;
2435 /* let's make sure that sink is awaken */
2436 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2439 static void intel_psr_work(struct work_struct *work)
2441 struct intel_dp *intel_dp =
2442 container_of(work, typeof(*intel_dp), psr.work);
2444 mutex_lock(&intel_dp->psr.lock);
2446 if (!intel_dp->psr.enabled)
2449 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2450 intel_psr_handle_irq(intel_dp);
2453 * We have to make sure PSR is ready for re-enable
2454 * otherwise it keeps disabled until next full enable/disable cycle.
2455 * PSR might take some time to get fully disabled
2456 * and be ready for re-enable.
2458 if (!__psr_wait_for_idle_locked(intel_dp))
2462 * The delayed work can race with an invalidate hence we need to
2463 * recheck. Since psr_flush first clears this and then reschedules we
2464 * won't ever miss a flush when bailing out here.
2466 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2469 intel_psr_activate(intel_dp);
2471 mutex_unlock(&intel_dp->psr.lock);
2474 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2476 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2477 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2479 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2482 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2483 /* Send one update otherwise lag is observed in screen */
2484 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2488 val = man_trk_ctl_enable_bit_get(dev_priv) |
2489 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2490 man_trk_ctl_continuos_full_frame(dev_priv);
2491 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2492 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2493 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2495 intel_psr_exit(intel_dp);
2500 * intel_psr_invalidate - Invalidate PSR
2501 * @dev_priv: i915 device
2502 * @frontbuffer_bits: frontbuffer plane tracking bits
2503 * @origin: which operation caused the invalidate
2505 * Since the hardware frontbuffer tracking has gaps we need to integrate
2506 * with the software frontbuffer tracking. This function gets called every
2507 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2508 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2510 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2512 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2513 unsigned frontbuffer_bits, enum fb_op_origin origin)
2515 struct intel_encoder *encoder;
2517 if (origin == ORIGIN_FLIP)
2520 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2521 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2522 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2524 mutex_lock(&intel_dp->psr.lock);
2525 if (!intel_dp->psr.enabled) {
2526 mutex_unlock(&intel_dp->psr.lock);
2530 pipe_frontbuffer_bits &=
2531 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2532 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2534 if (pipe_frontbuffer_bits)
2535 _psr_invalidate_handle(intel_dp);
2537 mutex_unlock(&intel_dp->psr.lock);
2541 * When we will be completely rely on PSR2 S/W tracking in future,
2542 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2543 * event also therefore tgl_dc3co_flush_locked() require to be changed
2544 * accordingly in future.
2547 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2548 enum fb_op_origin origin)
2550 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2552 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2553 !intel_dp->psr.active)
2557 * At every frontbuffer flush flip event modified delay of delayed work,
2558 * when delayed work schedules that means display has been idle.
2560 if (!(frontbuffer_bits &
2561 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2564 tgl_psr2_enable_dc3co(intel_dp);
2565 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2566 intel_dp->psr.dc3co_exit_delay);
2569 static void _psr_flush_handle(struct intel_dp *intel_dp)
2571 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2572 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2574 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2575 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2576 /* can we turn CFF off? */
2577 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2578 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2579 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2580 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2581 man_trk_ctl_continuos_full_frame(dev_priv);
2584 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2585 * updates. Still keep cff bit enabled as we don't have proper
2586 * SU configuration in case update is sent for any reason after
2587 * sff bit gets cleared by the HW on next vblank.
2589 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2591 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2592 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2596 * continuous full frame is disabled, only a single full
2599 psr_force_hw_tracking_exit(intel_dp);
2602 psr_force_hw_tracking_exit(intel_dp);
2604 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2605 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2610 * intel_psr_flush - Flush PSR
2611 * @dev_priv: i915 device
2612 * @frontbuffer_bits: frontbuffer plane tracking bits
2613 * @origin: which operation caused the flush
2615 * Since the hardware frontbuffer tracking has gaps we need to integrate
2616 * with the software frontbuffer tracking. This function gets called every
2617 * time frontbuffer rendering has completed and flushed out to memory. PSR
2618 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2620 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2622 void intel_psr_flush(struct drm_i915_private *dev_priv,
2623 unsigned frontbuffer_bits, enum fb_op_origin origin)
2625 struct intel_encoder *encoder;
2627 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2628 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2629 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2631 mutex_lock(&intel_dp->psr.lock);
2632 if (!intel_dp->psr.enabled) {
2633 mutex_unlock(&intel_dp->psr.lock);
2637 pipe_frontbuffer_bits &=
2638 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2639 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2642 * If the PSR is paused by an explicit intel_psr_paused() call,
2643 * we have to ensure that the PSR is not activated until
2644 * intel_psr_resume() is called.
2646 if (intel_dp->psr.paused)
2649 if (origin == ORIGIN_FLIP ||
2650 (origin == ORIGIN_CURSOR_UPDATE &&
2651 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2652 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2656 if (pipe_frontbuffer_bits == 0)
2659 /* By definition flush = invalidate + flush */
2660 _psr_flush_handle(intel_dp);
2662 mutex_unlock(&intel_dp->psr.lock);
2667 * intel_psr_init - Init basic PSR work and mutex.
2668 * @intel_dp: Intel DP
2670 * This function is called after the initializing connector.
2671 * (the initializing of connector treats the handling of connector capabilities)
2672 * And it initializes basic PSR stuff for each DP Encoder.
2674 void intel_psr_init(struct intel_dp *intel_dp)
2676 struct intel_connector *connector = intel_dp->attached_connector;
2677 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2678 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2680 if (!HAS_PSR(dev_priv))
2684 * HSW spec explicitly says PSR is tied to port A.
2685 * BDW+ platforms have a instance of PSR registers per transcoder but
2686 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2688 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2689 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2690 * But GEN12 supports a instance of PSR registers per transcoder.
2692 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2693 drm_dbg_kms(&dev_priv->drm,
2694 "PSR condition failed: Port not supported\n");
2698 intel_dp->psr.source_support = true;
2700 /* Set link_standby x link_off defaults */
2701 if (DISPLAY_VER(dev_priv) < 12)
2702 /* For new platforms up to TGL let's respect VBT back again */
2703 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2705 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2706 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2707 mutex_init(&intel_dp->psr.lock);
2710 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2711 u8 *status, u8 *error_status)
2713 struct drm_dp_aux *aux = &intel_dp->aux;
2716 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2720 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2724 *status = *status & DP_PSR_SINK_STATE_MASK;
2729 static void psr_alpm_check(struct intel_dp *intel_dp)
2731 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2732 struct drm_dp_aux *aux = &intel_dp->aux;
2733 struct intel_psr *psr = &intel_dp->psr;
2737 if (!psr->psr2_enabled)
2740 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2742 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2746 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2747 intel_psr_disable_locked(intel_dp);
2748 psr->sink_not_reliable = true;
2749 drm_dbg_kms(&dev_priv->drm,
2750 "ALPM lock timeout error, disabling PSR\n");
2752 /* Clearing error */
2753 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2757 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2759 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2760 struct intel_psr *psr = &intel_dp->psr;
2764 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2766 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2770 if (val & DP_PSR_CAPS_CHANGE) {
2771 intel_psr_disable_locked(intel_dp);
2772 psr->sink_not_reliable = true;
2773 drm_dbg_kms(&dev_priv->drm,
2774 "Sink PSR capability changed, disabling PSR\n");
2777 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2781 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2783 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2784 struct intel_psr *psr = &intel_dp->psr;
2785 u8 status, error_status;
2786 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2787 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2788 DP_PSR_LINK_CRC_ERROR;
2790 if (!CAN_PSR(intel_dp))
2793 mutex_lock(&psr->lock);
2798 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2799 drm_err(&dev_priv->drm,
2800 "Error reading PSR status or error status\n");
2804 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2805 intel_psr_disable_locked(intel_dp);
2806 psr->sink_not_reliable = true;
2809 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2810 drm_dbg_kms(&dev_priv->drm,
2811 "PSR sink internal error, disabling PSR\n");
2812 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2813 drm_dbg_kms(&dev_priv->drm,
2814 "PSR RFB storage error, disabling PSR\n");
2815 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2816 drm_dbg_kms(&dev_priv->drm,
2817 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2818 if (error_status & DP_PSR_LINK_CRC_ERROR)
2819 drm_dbg_kms(&dev_priv->drm,
2820 "PSR Link CRC error, disabling PSR\n");
2822 if (error_status & ~errors)
2823 drm_err(&dev_priv->drm,
2824 "PSR_ERROR_STATUS unhandled errors %x\n",
2825 error_status & ~errors);
2826 /* clear status register */
2827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2829 psr_alpm_check(intel_dp);
2830 psr_capability_changed_check(intel_dp);
2833 mutex_unlock(&psr->lock);
2836 bool intel_psr_enabled(struct intel_dp *intel_dp)
2840 if (!CAN_PSR(intel_dp))
2843 mutex_lock(&intel_dp->psr.lock);
2844 ret = intel_dp->psr.enabled;
2845 mutex_unlock(&intel_dp->psr.lock);
2851 * intel_psr_lock - grab PSR lock
2852 * @crtc_state: the crtc state
2854 * This is initially meant to be used by around CRTC update, when
2855 * vblank sensitive registers are updated and we need grab the lock
2856 * before it to avoid vblank evasion.
2858 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2860 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2861 struct intel_encoder *encoder;
2863 if (!crtc_state->has_psr)
2866 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2867 crtc_state->uapi.encoder_mask) {
2868 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2870 mutex_lock(&intel_dp->psr.lock);
2876 * intel_psr_unlock - release PSR lock
2877 * @crtc_state: the crtc state
2879 * Release the PSR lock that was held during pipe update.
2881 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2883 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2884 struct intel_encoder *encoder;
2886 if (!crtc_state->has_psr)
2889 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2890 crtc_state->uapi.encoder_mask) {
2891 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2893 mutex_unlock(&intel_dp->psr.lock);
2899 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2901 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2902 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2903 const char *status = "unknown";
2904 u32 val, status_val;
2906 if (intel_dp->psr.psr2_enabled) {
2907 static const char * const live_status[] = {
2920 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2921 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2922 if (status_val < ARRAY_SIZE(live_status))
2923 status = live_status[status_val];
2925 static const char * const live_status[] = {
2935 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2936 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2937 if (status_val < ARRAY_SIZE(live_status))
2938 status = live_status[status_val];
2941 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2944 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2946 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2947 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2948 struct intel_psr *psr = &intel_dp->psr;
2949 intel_wakeref_t wakeref;
2954 seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2955 if (psr->sink_support)
2956 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2959 if (!psr->sink_support)
2962 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2963 mutex_lock(&psr->lock);
2966 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2968 status = "disabled";
2969 seq_printf(m, "PSR mode: %s\n", status);
2971 if (!psr->enabled) {
2972 seq_printf(m, "PSR sink not reliable: %s\n",
2973 str_yes_no(psr->sink_not_reliable));
2978 if (psr->psr2_enabled) {
2979 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2980 enabled = val & EDP_PSR2_ENABLE;
2982 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
2983 enabled = val & EDP_PSR_ENABLE;
2985 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2986 str_enabled_disabled(enabled), val);
2987 psr_source_status(intel_dp, m);
2988 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2989 psr->busy_frontbuffer_bits);
2992 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2994 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
2995 seq_printf(m, "Performance counter: %u\n",
2996 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
2998 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2999 seq_printf(m, "Last attempted entry at: %lld\n",
3000 psr->last_entry_attempt);
3001 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3004 if (psr->psr2_enabled) {
3005 u32 su_frames_val[3];
3009 * Reading all 3 registers before hand to minimize crossing a
3010 * frame boundary between register reads
3012 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3013 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3014 su_frames_val[frame / 3] = val;
3017 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3019 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3022 su_blocks = su_frames_val[frame / 3] &
3023 PSR2_SU_STATUS_MASK(frame);
3024 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3025 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3028 seq_printf(m, "PSR2 selective fetch: %s\n",
3029 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3033 mutex_unlock(&psr->lock);
3034 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3039 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3041 struct drm_i915_private *dev_priv = m->private;
3042 struct intel_dp *intel_dp = NULL;
3043 struct intel_encoder *encoder;
3045 if (!HAS_PSR(dev_priv))
3048 /* Find the first EDP which supports PSR */
3049 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3050 intel_dp = enc_to_intel_dp(encoder);
3057 return intel_psr_status(m, intel_dp);
3059 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3062 i915_edp_psr_debug_set(void *data, u64 val)
3064 struct drm_i915_private *dev_priv = data;
3065 struct intel_encoder *encoder;
3066 intel_wakeref_t wakeref;
3069 if (!HAS_PSR(dev_priv))
3072 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3073 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3075 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3077 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3079 // TODO: split to each transcoder's PSR debug state
3080 ret = intel_psr_debug_set(intel_dp, val);
3082 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3089 i915_edp_psr_debug_get(void *data, u64 *val)
3091 struct drm_i915_private *dev_priv = data;
3092 struct intel_encoder *encoder;
3094 if (!HAS_PSR(dev_priv))
3097 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3098 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3100 // TODO: split to each transcoder's PSR debug state
3101 *val = READ_ONCE(intel_dp->psr.debug);
3108 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3109 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3112 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3114 struct drm_minor *minor = i915->drm.primary;
3116 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3117 i915, &i915_edp_psr_debug_fops);
3119 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3120 i915, &i915_edp_psr_status_fops);
3123 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3125 struct intel_connector *connector = m->private;
3126 struct intel_dp *intel_dp = intel_attached_dp(connector);
3127 static const char * const sink_status[] = {
3129 "transition to active, capture and display",
3130 "active, display from RFB",
3131 "active, capture and display on sink device timings",
3132 "transition to inactive, capture and display, timing re-sync",
3135 "sink internal error",
3141 if (!CAN_PSR(intel_dp)) {
3142 seq_puts(m, "PSR Unsupported\n");
3146 if (connector->base.status != connector_status_connected)
3149 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3151 return ret < 0 ? ret : -EIO;
3153 val &= DP_PSR_SINK_STATE_MASK;
3154 if (val < ARRAY_SIZE(sink_status))
3155 str = sink_status[val];
3159 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3163 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3165 static int i915_psr_status_show(struct seq_file *m, void *data)
3167 struct intel_connector *connector = m->private;
3168 struct intel_dp *intel_dp = intel_attached_dp(connector);
3170 return intel_psr_status(m, intel_dp);
3172 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3174 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3176 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3177 struct dentry *root = connector->base.debugfs_entry;
3179 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3182 debugfs_create_file("i915_psr_sink_status", 0444, root,
3183 connector, &i915_psr_sink_status_fops);
3186 debugfs_create_file("i915_psr_status", 0444, root,
3187 connector, &i915_psr_status_fops);