drm/i915/lnl: Remove watchdog timers for PSR
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
40
41 /**
42  * DOC: Panel Self Refresh (PSR/SRD)
43  *
44  * Since Haswell Display controller supports Panel Self-Refresh on display
45  * panels witch have a remote frame buffer (RFB) implemented according to PSR
46  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47  * when system is idle but display is on as it eliminates display refresh
48  * request to DDR memory completely as long as the frame buffer for that
49  * display is unchanged.
50  *
51  * Panel Self Refresh must be supported by both Hardware (source) and
52  * Panel (sink).
53  *
54  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55  * to power down the link and memory controller. For DSI panels the same idea
56  * is called "manual mode".
57  *
58  * The implementation uses the hardware-based PSR support which automatically
59  * enters/exits self-refresh mode. The hardware takes care of sending the
60  * required DP aux message and could even retrain the link (that part isn't
61  * enabled yet though). The hardware also keeps track of any frontbuffer
62  * changes to know when to exit self-refresh mode again. Unfortunately that
63  * part doesn't work too well, hence why the i915 PSR support uses the
64  * software frontbuffer tracking to make sure it doesn't miss a screen
65  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66  * get called by the frontbuffer tracking code. Note that because of locking
67  * issues the self-refresh re-enable code is done from a work queue, which
68  * must be correctly synchronized/cancelled when shutting down the pipe."
69  *
70  * DC3CO (DC3 clock off)
71  *
72  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73  * clock off automatically during PSR2 idle state.
74  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75  * entry/exit allows the HW to enter a low-power state even when page flipping
76  * periodically (for instance a 30fps video playback scenario).
77  *
78  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80  * frames, if no other flip occurs and the function above is executed, DC3CO is
81  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82  * of another flip.
83  * Front buffer modifications do not trigger DC3CO activation on purpose as it
84  * would bring a lot of complexity and most of the moderns systems will only
85  * use page flips.
86  */
87
88 /*
89  * Description of PSR mask bits:
90  *
91  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92  *
93  *  When unmasked (nearly) all display register writes (eg. even
94  *  SWF) trigger a PSR exit. Some registers are excluded from this
95  *  and they have a more specific mask (described below). On icl+
96  *  this bit no longer exists and is effectively always set.
97  *
98  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99  *
100  *  When unmasked (nearly) all pipe/plane register writes
101  *  trigger a PSR exit. Some plane registers are excluded from this
102  *  and they have a more specific mask (described below).
103  *
104  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107  *
108  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109  *  SPR_SURF/CURBASE are not included in this and instead are
110  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112  *
113  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115  *
116  *  When unmasked PSR is blocked as long as the sprite
117  *  plane is enabled. skl+ with their universal planes no
118  *  longer have a mask bit like this, and no plane being
119  *  enabledb blocks PSR.
120  *
121  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123  *
124  *  When umasked CURPOS writes trigger a PSR exit. On skl+
125  *  this doesn't exit but CURPOS is included in the
126  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127  *
128  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130  *
131  *  When unmasked PSR is blocked as long as vblank and/or vsync
132  *  interrupt is unmasked in IMR *and* enabled in IER.
133  *
134  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136  *
137  *  Selectcs whether PSR exit generates an extra vblank before
138  *  the first frame is transmitted. Also note the opposite polarity
139  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140  *  unmasked==do not generate the extra vblank).
141  *
142  *  With DC states enabled the extra vblank happens after link training,
143  *  with DC states disabled it happens immediately upuon PSR exit trigger.
144  *  No idea as of now why there is a difference. HSW/BDW (which don't
145  *  even have DMC) always generate it after link training. Go figure.
146  *
147  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
148  *  and thus won't latch until the first vblank. So with DC states
149  *  enabled the register effctively uses the reset value during DC5
150  *  exit+PSR exit sequence, and thus the bit does nothing until
151  *  latched by the vblank that it was trying to prevent from being
152  *  generated in the first place. So we should probably call this
153  *  one a chicken/egg bit instead on skl+.
154  *
155  *  In standby mode (as opposed to link-off) this makes no difference
156  *  as the timing generator keeps running the whole time generating
157  *  normal periodic vblanks.
158  *
159  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160  *  and doing so makes the behaviour match the skl+ reset value.
161  *
162  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164  *
165  *  On BDW without this bit is no vblanks whatsoever are
166  *  generated after PSR exit. On HSW this has no apparant effect.
167  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
168  *
169  * The rest of the bits are more self-explanatory and/or
170  * irrelevant for normal operation.
171  */
172
173 static bool psr_global_enabled(struct intel_dp *intel_dp)
174 {
175         struct intel_connector *connector = intel_dp->attached_connector;
176         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
177
178         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179         case I915_PSR_DEBUG_DEFAULT:
180                 if (i915->params.enable_psr == -1)
181                         return connector->panel.vbt.psr.enable;
182                 return i915->params.enable_psr;
183         case I915_PSR_DEBUG_DISABLE:
184                 return false;
185         default:
186                 return true;
187         }
188 }
189
190 static bool psr2_global_enabled(struct intel_dp *intel_dp)
191 {
192         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
193
194         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195         case I915_PSR_DEBUG_DISABLE:
196         case I915_PSR_DEBUG_FORCE_PSR1:
197                 return false;
198         default:
199                 if (i915->params.enable_psr == 1)
200                         return false;
201                 return true;
202         }
203 }
204
205 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
206 {
207         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208
209         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210                 EDP_PSR_ERROR(intel_dp->psr.transcoder);
211 }
212
213 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
214 {
215         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
216
217         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218                 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
219 }
220
221 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
222 {
223         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
224
225         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226                 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
227 }
228
229 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
230 {
231         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
232
233         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234                 EDP_PSR_MASK(intel_dp->psr.transcoder);
235 }
236
237 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238                               enum transcoder cpu_transcoder)
239 {
240         if (DISPLAY_VER(dev_priv) >= 8)
241                 return EDP_PSR_CTL(cpu_transcoder);
242         else
243                 return HSW_SRD_CTL;
244 }
245
246 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247                                 enum transcoder cpu_transcoder)
248 {
249         if (DISPLAY_VER(dev_priv) >= 8)
250                 return EDP_PSR_DEBUG(cpu_transcoder);
251         else
252                 return HSW_SRD_DEBUG;
253 }
254
255 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256                                    enum transcoder cpu_transcoder)
257 {
258         if (DISPLAY_VER(dev_priv) >= 8)
259                 return EDP_PSR_PERF_CNT(cpu_transcoder);
260         else
261                 return HSW_SRD_PERF_CNT;
262 }
263
264 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265                                  enum transcoder cpu_transcoder)
266 {
267         if (DISPLAY_VER(dev_priv) >= 8)
268                 return EDP_PSR_STATUS(cpu_transcoder);
269         else
270                 return HSW_SRD_STATUS;
271 }
272
273 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274                               enum transcoder cpu_transcoder)
275 {
276         if (DISPLAY_VER(dev_priv) >= 12)
277                 return TRANS_PSR_IMR(cpu_transcoder);
278         else
279                 return EDP_PSR_IMR;
280 }
281
282 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283                               enum transcoder cpu_transcoder)
284 {
285         if (DISPLAY_VER(dev_priv) >= 12)
286                 return TRANS_PSR_IIR(cpu_transcoder);
287         else
288                 return EDP_PSR_IIR;
289 }
290
291 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292                                   enum transcoder cpu_transcoder)
293 {
294         if (DISPLAY_VER(dev_priv) >= 8)
295                 return EDP_PSR_AUX_CTL(cpu_transcoder);
296         else
297                 return HSW_SRD_AUX_CTL;
298 }
299
300 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301                                    enum transcoder cpu_transcoder, int i)
302 {
303         if (DISPLAY_VER(dev_priv) >= 8)
304                 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
305         else
306                 return HSW_SRD_AUX_DATA(i);
307 }
308
309 static void psr_irq_control(struct intel_dp *intel_dp)
310 {
311         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
313         u32 mask;
314
315         mask = psr_irq_psr_error_bit_get(intel_dp);
316         if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
317                 mask |= psr_irq_post_exit_bit_get(intel_dp) |
318                         psr_irq_pre_entry_bit_get(intel_dp);
319
320         intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321                      psr_irq_mask_get(intel_dp), ~mask);
322 }
323
324 static void psr_event_print(struct drm_i915_private *i915,
325                             u32 val, bool psr2_enabled)
326 {
327         drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329                 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331                 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333                 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335                 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336         if (val & PSR_EVENT_GRAPHICS_RESET)
337                 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338         if (val & PSR_EVENT_PCH_INTERRUPT)
339                 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340         if (val & PSR_EVENT_MEMORY_UP)
341                 drm_dbg_kms(&i915->drm, "\tMemory up\n");
342         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343                 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345                 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347                 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348         if (val & PSR_EVENT_REGISTER_UPDATE)
349                 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350         if (val & PSR_EVENT_HDCP_ENABLE)
351                 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353                 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354         if (val & PSR_EVENT_VBI_ENABLE)
355                 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356         if (val & PSR_EVENT_LPSP_MODE_EXIT)
357                 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359                 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
360 }
361
362 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
363 {
364         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366         ktime_t time_ns =  ktime_get();
367
368         if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
369                 intel_dp->psr.last_entry_attempt = time_ns;
370                 drm_dbg_kms(&dev_priv->drm,
371                             "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372                             transcoder_name(cpu_transcoder));
373         }
374
375         if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
376                 intel_dp->psr.last_exit = time_ns;
377                 drm_dbg_kms(&dev_priv->drm,
378                             "[transcoder %s] PSR exit completed\n",
379                             transcoder_name(cpu_transcoder));
380
381                 if (DISPLAY_VER(dev_priv) >= 9) {
382                         u32 val;
383
384                         val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
385
386                         psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
387                 }
388         }
389
390         if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391                 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392                          transcoder_name(cpu_transcoder));
393
394                 intel_dp->psr.irq_aux_error = true;
395
396                 /*
397                  * If this interruption is not masked it will keep
398                  * interrupting so fast that it prevents the scheduled
399                  * work to run.
400                  * Also after a PSR error, we don't want to arm PSR
401                  * again so we don't care about unmask the interruption
402                  * or unset irq_aux_error.
403                  */
404                 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405                              0, psr_irq_psr_error_bit_get(intel_dp));
406
407                 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
408         }
409 }
410
411 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
412 {
413         u8 alpm_caps = 0;
414
415         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
416                               &alpm_caps) != 1)
417                 return false;
418         return alpm_caps & DP_ALPM_CAP;
419 }
420
421 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
422 {
423         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424         u8 val = 8; /* assume the worst if we can't read the value */
425
426         if (drm_dp_dpcd_readb(&intel_dp->aux,
427                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
429         else
430                 drm_dbg_kms(&i915->drm,
431                             "Unable to get sink synchronization latency, assuming 8 frames\n");
432         return val;
433 }
434
435 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
436 {
437         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
438         ssize_t r;
439         u16 w;
440         u8 y;
441
442         /* If sink don't have specific granularity requirements set legacy ones */
443         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
444                 /* As PSR2 HW sends full lines, we do not care about x granularity */
445                 w = 4;
446                 y = 4;
447                 goto exit;
448         }
449
450         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
451         if (r != 2)
452                 drm_dbg_kms(&i915->drm,
453                             "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
454         /*
455          * Spec says that if the value read is 0 the default granularity should
456          * be used instead.
457          */
458         if (r != 2 || w == 0)
459                 w = 4;
460
461         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
462         if (r != 1) {
463                 drm_dbg_kms(&i915->drm,
464                             "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
465                 y = 4;
466         }
467         if (y == 0)
468                 y = 1;
469
470 exit:
471         intel_dp->psr.su_w_granularity = w;
472         intel_dp->psr.su_y_granularity = y;
473 }
474
475 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
476 {
477         struct drm_i915_private *dev_priv =
478                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
479
480         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481                          sizeof(intel_dp->psr_dpcd));
482
483         if (!intel_dp->psr_dpcd[0])
484                 return;
485         drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486                     intel_dp->psr_dpcd[0]);
487
488         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489                 drm_dbg_kms(&dev_priv->drm,
490                             "PSR support not currently available for this panel\n");
491                 return;
492         }
493
494         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495                 drm_dbg_kms(&dev_priv->drm,
496                             "Panel lacks power state control, PSR cannot be enabled\n");
497                 return;
498         }
499
500         intel_dp->psr.sink_support = true;
501         intel_dp->psr.sink_sync_latency =
502                 intel_dp_get_sink_sync_latency(intel_dp);
503
504         if (DISPLAY_VER(dev_priv) >= 9 &&
505             (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506                 bool y_req = intel_dp->psr_dpcd[1] &
507                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508                 bool alpm = intel_dp_get_alpm_status(intel_dp);
509
510                 /*
511                  * All panels that supports PSR version 03h (PSR2 +
512                  * Y-coordinate) can handle Y-coordinates in VSC but we are
513                  * only sure that it is going to be used when required by the
514                  * panel. This way panel is capable to do selective update
515                  * without a aux frame sync.
516                  *
517                  * To support PSR version 02h and PSR version 03h without
518                  * Y-coordinate requirement panels we would need to enable
519                  * GTC first.
520                  */
521                 intel_dp->psr.sink_psr2_support = y_req && alpm;
522                 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
523                             intel_dp->psr.sink_psr2_support ? "" : "not ");
524
525                 if (intel_dp->psr.sink_psr2_support) {
526                         intel_dp->psr.colorimetry_support =
527                                 intel_dp_get_colorimetry_status(intel_dp);
528                         intel_dp_get_su_granularity(intel_dp);
529                 }
530         }
531 }
532
533 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
534 {
535         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537         u32 aux_clock_divider, aux_ctl;
538         /* write DP_SET_POWER=D0 */
539         static const u8 aux_msg[] = {
540                 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541                 [1] = (DP_SET_POWER >> 8) & 0xff,
542                 [2] = DP_SET_POWER & 0xff,
543                 [3] = 1 - 1,
544                 [4] = DP_SET_POWER_D0,
545         };
546         int i;
547
548         BUILD_BUG_ON(sizeof(aux_msg) > 20);
549         for (i = 0; i < sizeof(aux_msg); i += 4)
550                 intel_de_write(dev_priv,
551                                psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552                                intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
553
554         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
555
556         /* Start with bits set for DDI_AUX_CTL register */
557         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
558                                              aux_clock_divider);
559
560         /* Select only valid bits for SRD_AUX_CTL */
561         aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562                 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563                 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564                 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
565
566         intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
567                        aux_ctl);
568 }
569
570 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
571 {
572         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573         u8 dpcd_val = DP_PSR_ENABLE;
574
575         /* Enable ALPM at sink for psr2 */
576         if (intel_dp->psr.psr2_enabled) {
577                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
578                                    DP_ALPM_ENABLE |
579                                    DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
580
581                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
582         } else {
583                 if (intel_dp->psr.link_standby)
584                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
585
586                 if (DISPLAY_VER(dev_priv) >= 8)
587                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
588         }
589
590         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591                 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
592
593         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
594
595         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
596 }
597
598 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
599 {
600         struct intel_connector *connector = intel_dp->attached_connector;
601         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602         u32 val = 0;
603
604         if (DISPLAY_VER(dev_priv) >= 11)
605                 val |= EDP_PSR_TP4_TIME_0us;
606
607         if (dev_priv->params.psr_safest_params) {
608                 val |= EDP_PSR_TP1_TIME_2500us;
609                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
610                 goto check_tp3_sel;
611         }
612
613         if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614                 val |= EDP_PSR_TP1_TIME_0us;
615         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616                 val |= EDP_PSR_TP1_TIME_100us;
617         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618                 val |= EDP_PSR_TP1_TIME_500us;
619         else
620                 val |= EDP_PSR_TP1_TIME_2500us;
621
622         if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623                 val |= EDP_PSR_TP2_TP3_TIME_0us;
624         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625                 val |= EDP_PSR_TP2_TP3_TIME_100us;
626         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627                 val |= EDP_PSR_TP2_TP3_TIME_500us;
628         else
629                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
630
631         /*
632          * WA 0479: hsw,bdw
633          * "Do not skip both TP1 and TP2/TP3"
634          */
635         if (DISPLAY_VER(dev_priv) < 9 &&
636             connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
637             connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
638                 val |= EDP_PSR_TP2_TP3_TIME_100us;
639
640 check_tp3_sel:
641         if (intel_dp_source_supports_tps3(dev_priv) &&
642             drm_dp_tps3_supported(intel_dp->dpcd))
643                 val |= EDP_PSR_TP_TP1_TP3;
644         else
645                 val |= EDP_PSR_TP_TP1_TP2;
646
647         return val;
648 }
649
650 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
651 {
652         struct intel_connector *connector = intel_dp->attached_connector;
653         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
654         int idle_frames;
655
656         /* Let's use 6 as the minimum to cover all known cases including the
657          * off-by-one issue that HW has in some cases.
658          */
659         idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
660         idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
661
662         if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
663                 idle_frames = 0xf;
664
665         return idle_frames;
666 }
667
668 static void hsw_activate_psr1(struct intel_dp *intel_dp)
669 {
670         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
671         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
672         u32 max_sleep_time = 0x1f;
673         u32 val = EDP_PSR_ENABLE;
674
675         val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
676
677         if (DISPLAY_VER(dev_priv) < 20)
678                 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
679
680         if (IS_HASWELL(dev_priv))
681                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
682
683         if (intel_dp->psr.link_standby)
684                 val |= EDP_PSR_LINK_STANDBY;
685
686         val |= intel_psr1_get_tp_time(intel_dp);
687
688         if (DISPLAY_VER(dev_priv) >= 8)
689                 val |= EDP_PSR_CRC_ENABLE;
690
691         intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
692                      ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
693 }
694
695 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
696 {
697         struct intel_connector *connector = intel_dp->attached_connector;
698         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699         u32 val = 0;
700
701         if (dev_priv->params.psr_safest_params)
702                 return EDP_PSR2_TP2_TIME_2500us;
703
704         if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
705             connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
706                 val |= EDP_PSR2_TP2_TIME_50us;
707         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
708                 val |= EDP_PSR2_TP2_TIME_100us;
709         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
710                 val |= EDP_PSR2_TP2_TIME_500us;
711         else
712                 val |= EDP_PSR2_TP2_TIME_2500us;
713
714         return val;
715 }
716
717 static int psr2_block_count_lines(struct intel_dp *intel_dp)
718 {
719         return intel_dp->psr.io_wake_lines < 9 &&
720                 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
721 }
722
723 static int psr2_block_count(struct intel_dp *intel_dp)
724 {
725         return psr2_block_count_lines(intel_dp) / 4;
726 }
727
728 static void hsw_activate_psr2(struct intel_dp *intel_dp)
729 {
730         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732         u32 val = EDP_PSR2_ENABLE;
733
734         val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
735
736         if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
737                 val |= EDP_SU_TRACK_ENABLE;
738
739         if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
740                 val |= EDP_Y_COORDINATE_ENABLE;
741
742         val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
743         val |= intel_psr2_get_tp_time(intel_dp);
744
745         if (DISPLAY_VER(dev_priv) >= 12) {
746                 if (psr2_block_count(intel_dp) > 2)
747                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
748                 else
749                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
750         }
751
752         /* Wa_22012278275:adl-p */
753         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
754                 static const u8 map[] = {
755                         2, /* 5 lines */
756                         1, /* 6 lines */
757                         0, /* 7 lines */
758                         3, /* 8 lines */
759                         6, /* 9 lines */
760                         5, /* 10 lines */
761                         4, /* 11 lines */
762                         7, /* 12 lines */
763                 };
764                 /*
765                  * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
766                  * comments bellow for more information
767                  */
768                 int tmp;
769
770                 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
771                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
772
773                 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
774                 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
775         } else if (DISPLAY_VER(dev_priv) >= 12) {
776                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
777                 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
778         } else if (DISPLAY_VER(dev_priv) >= 9) {
779                 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
780                 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
781         }
782
783         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
784                 val |= EDP_PSR2_SU_SDP_SCANLINE;
785
786         if (intel_dp->psr.psr2_sel_fetch_enabled) {
787                 u32 tmp;
788
789                 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
790                 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
791         } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
792                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
793         }
794
795         /*
796          * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
797          * recommending keep this bit unset while PSR2 is enabled.
798          */
799         intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
800
801         intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
802 }
803
804 static bool
805 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
806 {
807         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
808                 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
809         else if (DISPLAY_VER(dev_priv) >= 12)
810                 return cpu_transcoder == TRANSCODER_A;
811         else if (DISPLAY_VER(dev_priv) >= 9)
812                 return cpu_transcoder == TRANSCODER_EDP;
813         else
814                 return false;
815 }
816
817 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
818 {
819         if (!cstate || !cstate->hw.active)
820                 return 0;
821
822         return DIV_ROUND_UP(1000 * 1000,
823                             drm_mode_vrefresh(&cstate->hw.adjusted_mode));
824 }
825
826 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
827                                      u32 idle_frames)
828 {
829         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
831
832         intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
833                      EDP_PSR2_IDLE_FRAMES_MASK,
834                      EDP_PSR2_IDLE_FRAMES(idle_frames));
835 }
836
837 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
838 {
839         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
840
841         psr2_program_idle_frames(intel_dp, 0);
842         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
843 }
844
845 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
846 {
847         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
848
849         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
850         psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
851 }
852
853 static void tgl_dc3co_disable_work(struct work_struct *work)
854 {
855         struct intel_dp *intel_dp =
856                 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
857
858         mutex_lock(&intel_dp->psr.lock);
859         /* If delayed work is pending, it is not idle */
860         if (delayed_work_pending(&intel_dp->psr.dc3co_work))
861                 goto unlock;
862
863         tgl_psr2_disable_dc3co(intel_dp);
864 unlock:
865         mutex_unlock(&intel_dp->psr.lock);
866 }
867
868 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
869 {
870         if (!intel_dp->psr.dc3co_exitline)
871                 return;
872
873         cancel_delayed_work(&intel_dp->psr.dc3co_work);
874         /* Before PSR2 exit disallow dc3co*/
875         tgl_psr2_disable_dc3co(intel_dp);
876 }
877
878 static bool
879 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
880                               struct intel_crtc_state *crtc_state)
881 {
882         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
883         enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
884         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
885         enum port port = dig_port->base.port;
886
887         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
888                 return pipe <= PIPE_B && port <= PORT_B;
889         else
890                 return pipe == PIPE_A && port == PORT_A;
891 }
892
893 static void
894 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
895                                   struct intel_crtc_state *crtc_state)
896 {
897         const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
898         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
899         struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
900         u32 exit_scanlines;
901
902         /*
903          * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
904          * disable DC3CO until the changed dc3co activating/deactivating sequence
905          * is applied. B.Specs:49196
906          */
907         return;
908
909         /*
910          * DMC's DC3CO exit mechanism has an issue with Selective Fecth
911          * TODO: when the issue is addressed, this restriction should be removed.
912          */
913         if (crtc_state->enable_psr2_sel_fetch)
914                 return;
915
916         if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
917                 return;
918
919         if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
920                 return;
921
922         /* Wa_16011303918:adl-p */
923         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
924                 return;
925
926         /*
927          * DC3CO Exit time 200us B.Spec 49196
928          * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
929          */
930         exit_scanlines =
931                 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
932
933         if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
934                 return;
935
936         crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
937 }
938
939 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
940                                               struct intel_crtc_state *crtc_state)
941 {
942         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
943
944         if (!dev_priv->params.enable_psr2_sel_fetch &&
945             intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
946                 drm_dbg_kms(&dev_priv->drm,
947                             "PSR2 sel fetch not enabled, disabled by parameter\n");
948                 return false;
949         }
950
951         if (crtc_state->uapi.async_flip) {
952                 drm_dbg_kms(&dev_priv->drm,
953                             "PSR2 sel fetch not enabled, async flip enabled\n");
954                 return false;
955         }
956
957         return crtc_state->enable_psr2_sel_fetch = true;
958 }
959
960 static bool psr2_granularity_check(struct intel_dp *intel_dp,
961                                    struct intel_crtc_state *crtc_state)
962 {
963         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
965         const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
966         const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
967         u16 y_granularity = 0;
968
969         /* PSR2 HW only send full lines so we only need to validate the width */
970         if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
971                 return false;
972
973         if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
974                 return false;
975
976         /* HW tracking is only aligned to 4 lines */
977         if (!crtc_state->enable_psr2_sel_fetch)
978                 return intel_dp->psr.su_y_granularity == 4;
979
980         /*
981          * adl_p and mtl platforms have 1 line granularity.
982          * For other platforms with SW tracking we can adjust the y coordinates
983          * to match sink requirement if multiple of 4.
984          */
985         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
986                 y_granularity = intel_dp->psr.su_y_granularity;
987         else if (intel_dp->psr.su_y_granularity <= 2)
988                 y_granularity = 4;
989         else if ((intel_dp->psr.su_y_granularity % 4) == 0)
990                 y_granularity = intel_dp->psr.su_y_granularity;
991
992         if (y_granularity == 0 || crtc_vdisplay % y_granularity)
993                 return false;
994
995         if (crtc_state->dsc.compression_enable &&
996             vdsc_cfg->slice_height % y_granularity)
997                 return false;
998
999         crtc_state->su_y_granularity = y_granularity;
1000         return true;
1001 }
1002
1003 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1004                                                         struct intel_crtc_state *crtc_state)
1005 {
1006         const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1007         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1008         u32 hblank_total, hblank_ns, req_ns;
1009
1010         hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1011         hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1012
1013         /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1014         req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1015
1016         if ((hblank_ns - req_ns) > 100)
1017                 return true;
1018
1019         /* Not supported <13 / Wa_22012279113:adl-p */
1020         if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1021                 return false;
1022
1023         crtc_state->req_psr2_sdp_prior_scanline = true;
1024         return true;
1025 }
1026
1027 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1028                                      struct intel_crtc_state *crtc_state)
1029 {
1030         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1031         int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1032         u8 max_wake_lines;
1033
1034         if (DISPLAY_VER(i915) >= 12) {
1035                 io_wake_time = 42;
1036                 /*
1037                  * According to Bspec it's 42us, but based on testing
1038                  * it is not enough -> use 45 us.
1039                  */
1040                 fast_wake_time = 45;
1041                 max_wake_lines = 12;
1042         } else {
1043                 io_wake_time = 50;
1044                 fast_wake_time = 32;
1045                 max_wake_lines = 8;
1046         }
1047
1048         io_wake_lines = intel_usecs_to_scanlines(
1049                 &crtc_state->hw.adjusted_mode, io_wake_time);
1050         fast_wake_lines = intel_usecs_to_scanlines(
1051                 &crtc_state->hw.adjusted_mode, fast_wake_time);
1052
1053         if (io_wake_lines > max_wake_lines ||
1054             fast_wake_lines > max_wake_lines)
1055                 return false;
1056
1057         if (i915->params.psr_safest_params)
1058                 io_wake_lines = fast_wake_lines = max_wake_lines;
1059
1060         /* According to Bspec lower limit should be set as 7 lines. */
1061         intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1062         intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1063
1064         return true;
1065 }
1066
1067 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1068                                     struct intel_crtc_state *crtc_state)
1069 {
1070         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071         int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1072         int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1073         int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1074
1075         if (!intel_dp->psr.sink_psr2_support)
1076                 return false;
1077
1078         /* JSL and EHL only supports eDP 1.3 */
1079         if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1080                 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1081                 return false;
1082         }
1083
1084         /* Wa_16011181250 */
1085         if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1086             IS_DG2(dev_priv)) {
1087                 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1088                 return false;
1089         }
1090
1091         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1092                 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1093                 return false;
1094         }
1095
1096         if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1097                 drm_dbg_kms(&dev_priv->drm,
1098                             "PSR2 not supported in transcoder %s\n",
1099                             transcoder_name(crtc_state->cpu_transcoder));
1100                 return false;
1101         }
1102
1103         if (!psr2_global_enabled(intel_dp)) {
1104                 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1105                 return false;
1106         }
1107
1108         /*
1109          * DSC and PSR2 cannot be enabled simultaneously. If a requested
1110          * resolution requires DSC to be enabled, priority is given to DSC
1111          * over PSR2.
1112          */
1113         if (crtc_state->dsc.compression_enable &&
1114             (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1115                 drm_dbg_kms(&dev_priv->drm,
1116                             "PSR2 cannot be enabled since DSC is enabled\n");
1117                 return false;
1118         }
1119
1120         if (crtc_state->crc_enabled) {
1121                 drm_dbg_kms(&dev_priv->drm,
1122                             "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1123                 return false;
1124         }
1125
1126         if (DISPLAY_VER(dev_priv) >= 12) {
1127                 psr_max_h = 5120;
1128                 psr_max_v = 3200;
1129                 max_bpp = 30;
1130         } else if (DISPLAY_VER(dev_priv) >= 10) {
1131                 psr_max_h = 4096;
1132                 psr_max_v = 2304;
1133                 max_bpp = 24;
1134         } else if (DISPLAY_VER(dev_priv) == 9) {
1135                 psr_max_h = 3640;
1136                 psr_max_v = 2304;
1137                 max_bpp = 24;
1138         }
1139
1140         if (crtc_state->pipe_bpp > max_bpp) {
1141                 drm_dbg_kms(&dev_priv->drm,
1142                             "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1143                             crtc_state->pipe_bpp, max_bpp);
1144                 return false;
1145         }
1146
1147         /* Wa_16011303918:adl-p */
1148         if (crtc_state->vrr.enable &&
1149             IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1150                 drm_dbg_kms(&dev_priv->drm,
1151                             "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1152                 return false;
1153         }
1154
1155         if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1156                 drm_dbg_kms(&dev_priv->drm,
1157                             "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1158                 return false;
1159         }
1160
1161         if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1162                 drm_dbg_kms(&dev_priv->drm,
1163                             "PSR2 not enabled, Unable to use long enough wake times\n");
1164                 return false;
1165         }
1166
1167         /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1168         if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1169             crtc_state->hw.adjusted_mode.crtc_vblank_start <
1170             psr2_block_count_lines(intel_dp)) {
1171                 drm_dbg_kms(&dev_priv->drm,
1172                             "PSR2 not enabled, too short vblank time\n");
1173                 return false;
1174         }
1175
1176         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1177                 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1178                     !HAS_PSR_HW_TRACKING(dev_priv)) {
1179                         drm_dbg_kms(&dev_priv->drm,
1180                                     "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1181                         return false;
1182                 }
1183         }
1184
1185         if (!psr2_granularity_check(intel_dp, crtc_state)) {
1186                 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1187                 goto unsupported;
1188         }
1189
1190         if (!crtc_state->enable_psr2_sel_fetch &&
1191             (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1192                 drm_dbg_kms(&dev_priv->drm,
1193                             "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1194                             crtc_hdisplay, crtc_vdisplay,
1195                             psr_max_h, psr_max_v);
1196                 goto unsupported;
1197         }
1198
1199         tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1200         return true;
1201
1202 unsupported:
1203         crtc_state->enable_psr2_sel_fetch = false;
1204         return false;
1205 }
1206
1207 void intel_psr_compute_config(struct intel_dp *intel_dp,
1208                               struct intel_crtc_state *crtc_state,
1209                               struct drm_connector_state *conn_state)
1210 {
1211         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1212         const struct drm_display_mode *adjusted_mode =
1213                 &crtc_state->hw.adjusted_mode;
1214         int psr_setup_time;
1215
1216         /*
1217          * Current PSR panels don't work reliably with VRR enabled
1218          * So if VRR is enabled, do not enable PSR.
1219          */
1220         if (crtc_state->vrr.enable)
1221                 return;
1222
1223         if (!CAN_PSR(intel_dp))
1224                 return;
1225
1226         if (!psr_global_enabled(intel_dp)) {
1227                 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1228                 return;
1229         }
1230
1231         if (intel_dp->psr.sink_not_reliable) {
1232                 drm_dbg_kms(&dev_priv->drm,
1233                             "PSR sink implementation is not reliable\n");
1234                 return;
1235         }
1236
1237         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1238                 drm_dbg_kms(&dev_priv->drm,
1239                             "PSR condition failed: Interlaced mode enabled\n");
1240                 return;
1241         }
1242
1243         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1244         if (psr_setup_time < 0) {
1245                 drm_dbg_kms(&dev_priv->drm,
1246                             "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1247                             intel_dp->psr_dpcd[1]);
1248                 return;
1249         }
1250
1251         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1252             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1253                 drm_dbg_kms(&dev_priv->drm,
1254                             "PSR condition failed: PSR setup time (%d us) too long\n",
1255                             psr_setup_time);
1256                 return;
1257         }
1258
1259         crtc_state->has_psr = true;
1260         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1261
1262         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1263         intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1264                                      &crtc_state->psr_vsc);
1265 }
1266
1267 void intel_psr_get_config(struct intel_encoder *encoder,
1268                           struct intel_crtc_state *pipe_config)
1269 {
1270         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1271         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1272         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1273         struct intel_dp *intel_dp;
1274         u32 val;
1275
1276         if (!dig_port)
1277                 return;
1278
1279         intel_dp = &dig_port->dp;
1280         if (!CAN_PSR(intel_dp))
1281                 return;
1282
1283         mutex_lock(&intel_dp->psr.lock);
1284         if (!intel_dp->psr.enabled)
1285                 goto unlock;
1286
1287         /*
1288          * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1289          * enabled/disabled because of frontbuffer tracking and others.
1290          */
1291         pipe_config->has_psr = true;
1292         pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1293         pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1294
1295         if (!intel_dp->psr.psr2_enabled)
1296                 goto unlock;
1297
1298         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1299                 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1300                 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1301                         pipe_config->enable_psr2_sel_fetch = true;
1302         }
1303
1304         if (DISPLAY_VER(dev_priv) >= 12) {
1305                 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1306                 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1307         }
1308 unlock:
1309         mutex_unlock(&intel_dp->psr.lock);
1310 }
1311
1312 static void intel_psr_activate(struct intel_dp *intel_dp)
1313 {
1314         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1315         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1316
1317         drm_WARN_ON(&dev_priv->drm,
1318                     transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1319                     intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1320
1321         drm_WARN_ON(&dev_priv->drm,
1322                     intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1323
1324         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1325
1326         lockdep_assert_held(&intel_dp->psr.lock);
1327
1328         /* psr1 and psr2 are mutually exclusive.*/
1329         if (intel_dp->psr.psr2_enabled)
1330                 hsw_activate_psr2(intel_dp);
1331         else
1332                 hsw_activate_psr1(intel_dp);
1333
1334         intel_dp->psr.active = true;
1335 }
1336
1337 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1338 {
1339         switch (intel_dp->psr.pipe) {
1340         case PIPE_A:
1341                 return LATENCY_REPORTING_REMOVED_PIPE_A;
1342         case PIPE_B:
1343                 return LATENCY_REPORTING_REMOVED_PIPE_B;
1344         case PIPE_C:
1345                 return LATENCY_REPORTING_REMOVED_PIPE_C;
1346         case PIPE_D:
1347                 return LATENCY_REPORTING_REMOVED_PIPE_D;
1348         default:
1349                 MISSING_CASE(intel_dp->psr.pipe);
1350                 return 0;
1351         }
1352 }
1353
1354 /*
1355  * Wa_16013835468
1356  * Wa_14015648006
1357  */
1358 static void wm_optimization_wa(struct intel_dp *intel_dp,
1359                                const struct intel_crtc_state *crtc_state)
1360 {
1361         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1362         bool set_wa_bit = false;
1363
1364         /* Wa_14015648006 */
1365         if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1366             IS_DISPLAY_VER(dev_priv, 11, 13))
1367                 set_wa_bit |= crtc_state->wm_level_disabled;
1368
1369         /* Wa_16013835468 */
1370         if (DISPLAY_VER(dev_priv) == 12)
1371                 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1372                         crtc_state->hw.adjusted_mode.crtc_vdisplay;
1373
1374         if (set_wa_bit)
1375                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1376                              0, wa_16013835468_bit_get(intel_dp));
1377         else
1378                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1379                              wa_16013835468_bit_get(intel_dp), 0);
1380 }
1381
1382 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1383                                     const struct intel_crtc_state *crtc_state)
1384 {
1385         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1386         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1387         u32 mask;
1388
1389         /*
1390          * Only HSW and BDW have PSR AUX registers that need to be setup.
1391          * SKL+ use hardcoded values PSR AUX transactions
1392          */
1393         if (DISPLAY_VER(dev_priv) < 9)
1394                 hsw_psr_setup_aux(intel_dp);
1395
1396         /*
1397          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1398          * mask LPSP to avoid dependency on other drivers that might block
1399          * runtime_pm besides preventing  other hw tracking issues now we
1400          * can rely on frontbuffer tracking.
1401          */
1402         mask = EDP_PSR_DEBUG_MASK_MEMUP |
1403                EDP_PSR_DEBUG_MASK_HPD |
1404                EDP_PSR_DEBUG_MASK_LPSP;
1405
1406         if (DISPLAY_VER(dev_priv) < 20)
1407                 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1408
1409         /*
1410          * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1411          * registers in order to keep the CURSURFLIVE tricks working :(
1412          */
1413         if (IS_DISPLAY_VER(dev_priv, 9, 10))
1414                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1415
1416         /* allow PSR with sprite enabled */
1417         if (IS_HASWELL(dev_priv))
1418                 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1419
1420         intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1421
1422         psr_irq_control(intel_dp);
1423
1424         /*
1425          * TODO: if future platforms supports DC3CO in more than one
1426          * transcoder, EXITLINE will need to be unset when disabling PSR
1427          */
1428         if (intel_dp->psr.dc3co_exitline)
1429                 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1430                              intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1431
1432         if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1433                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1434                              intel_dp->psr.psr2_sel_fetch_enabled ?
1435                              IGNORE_PSR2_HW_TRACKING : 0);
1436
1437         /*
1438          * Wa_16013835468
1439          * Wa_14015648006
1440          */
1441         wm_optimization_wa(intel_dp, crtc_state);
1442
1443         if (intel_dp->psr.psr2_enabled) {
1444                 if (DISPLAY_VER(dev_priv) == 9)
1445                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1446                                      PSR2_VSC_ENABLE_PROG_HEADER |
1447                                      PSR2_ADD_VERTICAL_LINE_COUNT);
1448
1449                 /*
1450                  * Wa_16014451276:adlp,mtl[a0,b0]
1451                  * All supported adlp panels have 1-based X granularity, this may
1452                  * cause issues if non-supported panels are used.
1453                  */
1454                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1455                         intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1456                                      ADLP_1_BASED_X_GRANULARITY);
1457                 else if (IS_ALDERLAKE_P(dev_priv))
1458                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1459                                      ADLP_1_BASED_X_GRANULARITY);
1460
1461                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1462                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1463                         intel_de_rmw(dev_priv,
1464                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1465                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1466                 else if (IS_ALDERLAKE_P(dev_priv))
1467                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1468                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1469         }
1470 }
1471
1472 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1473 {
1474         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1475         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1476         u32 val;
1477
1478         /*
1479          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1480          * will still keep the error set even after the reset done in the
1481          * irq_preinstall and irq_uninstall hooks.
1482          * And enabling in this situation cause the screen to freeze in the
1483          * first time that PSR HW tries to activate so lets keep PSR disabled
1484          * to avoid any rendering problems.
1485          */
1486         val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1487         val &= psr_irq_psr_error_bit_get(intel_dp);
1488         if (val) {
1489                 intel_dp->psr.sink_not_reliable = true;
1490                 drm_dbg_kms(&dev_priv->drm,
1491                             "PSR interruption error set, not enabling PSR\n");
1492                 return false;
1493         }
1494
1495         return true;
1496 }
1497
1498 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1499                                     const struct intel_crtc_state *crtc_state)
1500 {
1501         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1502         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1503         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1504         struct intel_encoder *encoder = &dig_port->base;
1505         u32 val;
1506
1507         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1508
1509         intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1510         intel_dp->psr.busy_frontbuffer_bits = 0;
1511         intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1512         intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1513         /* DC5/DC6 requires at least 6 idle frames */
1514         val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1515         intel_dp->psr.dc3co_exit_delay = val;
1516         intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1517         intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1518         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1519         intel_dp->psr.req_psr2_sdp_prior_scanline =
1520                 crtc_state->req_psr2_sdp_prior_scanline;
1521
1522         if (!psr_interrupt_error_check(intel_dp))
1523                 return;
1524
1525         drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1526                     intel_dp->psr.psr2_enabled ? "2" : "1");
1527         intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1528         intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1529         intel_psr_enable_sink(intel_dp);
1530         intel_psr_enable_source(intel_dp, crtc_state);
1531         intel_dp->psr.enabled = true;
1532         intel_dp->psr.paused = false;
1533
1534         intel_psr_activate(intel_dp);
1535 }
1536
1537 static void intel_psr_exit(struct intel_dp *intel_dp)
1538 {
1539         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1540         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1541         u32 val;
1542
1543         if (!intel_dp->psr.active) {
1544                 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1545                         val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1546                         drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1547                 }
1548
1549                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1550                 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1551
1552                 return;
1553         }
1554
1555         if (intel_dp->psr.psr2_enabled) {
1556                 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1557
1558                 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1559                                    EDP_PSR2_ENABLE, 0);
1560
1561                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1562         } else {
1563                 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1564                                    EDP_PSR_ENABLE, 0);
1565
1566                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1567         }
1568         intel_dp->psr.active = false;
1569 }
1570
1571 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1572 {
1573         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1575         i915_reg_t psr_status;
1576         u32 psr_status_mask;
1577
1578         if (intel_dp->psr.psr2_enabled) {
1579                 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1580                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1581         } else {
1582                 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1583                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1584         }
1585
1586         /* Wait till PSR is idle */
1587         if (intel_de_wait_for_clear(dev_priv, psr_status,
1588                                     psr_status_mask, 2000))
1589                 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1590 }
1591
1592 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1593 {
1594         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1595         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1596         enum phy phy = intel_port_to_phy(dev_priv,
1597                                          dp_to_dig_port(intel_dp)->base.port);
1598
1599         lockdep_assert_held(&intel_dp->psr.lock);
1600
1601         if (!intel_dp->psr.enabled)
1602                 return;
1603
1604         drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1605                     intel_dp->psr.psr2_enabled ? "2" : "1");
1606
1607         intel_psr_exit(intel_dp);
1608         intel_psr_wait_exit_locked(intel_dp);
1609
1610         /*
1611          * Wa_16013835468
1612          * Wa_14015648006
1613          */
1614         if (DISPLAY_VER(dev_priv) >= 11)
1615                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1616                              wa_16013835468_bit_get(intel_dp), 0);
1617
1618         if (intel_dp->psr.psr2_enabled) {
1619                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1620                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1621                         intel_de_rmw(dev_priv,
1622                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1623                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1624                 else if (IS_ALDERLAKE_P(dev_priv))
1625                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1626                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1627         }
1628
1629         intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1630
1631         /* Disable PSR on Sink */
1632         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1633
1634         if (intel_dp->psr.psr2_enabled)
1635                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1636
1637         intel_dp->psr.enabled = false;
1638         intel_dp->psr.psr2_enabled = false;
1639         intel_dp->psr.psr2_sel_fetch_enabled = false;
1640         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1641 }
1642
1643 /**
1644  * intel_psr_disable - Disable PSR
1645  * @intel_dp: Intel DP
1646  * @old_crtc_state: old CRTC state
1647  *
1648  * This function needs to be called before disabling pipe.
1649  */
1650 void intel_psr_disable(struct intel_dp *intel_dp,
1651                        const struct intel_crtc_state *old_crtc_state)
1652 {
1653         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1654
1655         if (!old_crtc_state->has_psr)
1656                 return;
1657
1658         if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1659                 return;
1660
1661         mutex_lock(&intel_dp->psr.lock);
1662
1663         intel_psr_disable_locked(intel_dp);
1664
1665         mutex_unlock(&intel_dp->psr.lock);
1666         cancel_work_sync(&intel_dp->psr.work);
1667         cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1668 }
1669
1670 /**
1671  * intel_psr_pause - Pause PSR
1672  * @intel_dp: Intel DP
1673  *
1674  * This function need to be called after enabling psr.
1675  */
1676 void intel_psr_pause(struct intel_dp *intel_dp)
1677 {
1678         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1679         struct intel_psr *psr = &intel_dp->psr;
1680
1681         if (!CAN_PSR(intel_dp))
1682                 return;
1683
1684         mutex_lock(&psr->lock);
1685
1686         if (!psr->enabled) {
1687                 mutex_unlock(&psr->lock);
1688                 return;
1689         }
1690
1691         /* If we ever hit this, we will need to add refcount to pause/resume */
1692         drm_WARN_ON(&dev_priv->drm, psr->paused);
1693
1694         intel_psr_exit(intel_dp);
1695         intel_psr_wait_exit_locked(intel_dp);
1696         psr->paused = true;
1697
1698         mutex_unlock(&psr->lock);
1699
1700         cancel_work_sync(&psr->work);
1701         cancel_delayed_work_sync(&psr->dc3co_work);
1702 }
1703
1704 /**
1705  * intel_psr_resume - Resume PSR
1706  * @intel_dp: Intel DP
1707  *
1708  * This function need to be called after pausing psr.
1709  */
1710 void intel_psr_resume(struct intel_dp *intel_dp)
1711 {
1712         struct intel_psr *psr = &intel_dp->psr;
1713
1714         if (!CAN_PSR(intel_dp))
1715                 return;
1716
1717         mutex_lock(&psr->lock);
1718
1719         if (!psr->paused)
1720                 goto unlock;
1721
1722         psr->paused = false;
1723         intel_psr_activate(intel_dp);
1724
1725 unlock:
1726         mutex_unlock(&psr->lock);
1727 }
1728
1729 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1730 {
1731         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1732                 PSR2_MAN_TRK_CTL_ENABLE;
1733 }
1734
1735 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1736 {
1737         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1738                ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1739                PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1740 }
1741
1742 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1743 {
1744         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1745                ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1746                PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1747 }
1748
1749 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1750 {
1751         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1752                ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1753                PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1754 }
1755
1756 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1757 {
1758         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1759         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1760
1761         if (intel_dp->psr.psr2_sel_fetch_enabled)
1762                 intel_de_write(dev_priv,
1763                                PSR2_MAN_TRK_CTL(cpu_transcoder),
1764                                man_trk_ctl_enable_bit_get(dev_priv) |
1765                                man_trk_ctl_partial_frame_bit_get(dev_priv) |
1766                                man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1767                                man_trk_ctl_continuos_full_frame(dev_priv));
1768
1769         /*
1770          * Display WA #0884: skl+
1771          * This documented WA for bxt can be safely applied
1772          * broadly so we can force HW tracking to exit PSR
1773          * instead of disabling and re-enabling.
1774          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1775          * but it makes more sense write to the current active
1776          * pipe.
1777          *
1778          * This workaround do not exist for platforms with display 10 or newer
1779          * but testing proved that it works for up display 13, for newer
1780          * than that testing will be needed.
1781          */
1782         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1783 }
1784
1785 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1786                                             const struct intel_crtc_state *crtc_state)
1787 {
1788         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1789         enum pipe pipe = plane->pipe;
1790
1791         if (!crtc_state->enable_psr2_sel_fetch)
1792                 return;
1793
1794         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1795 }
1796
1797 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1798                                             const struct intel_crtc_state *crtc_state,
1799                                             const struct intel_plane_state *plane_state)
1800 {
1801         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1802         enum pipe pipe = plane->pipe;
1803
1804         if (!crtc_state->enable_psr2_sel_fetch)
1805                 return;
1806
1807         if (plane->id == PLANE_CURSOR)
1808                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1809                                   plane_state->ctl);
1810         else
1811                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1812                                   PLANE_SEL_FETCH_CTL_ENABLE);
1813 }
1814
1815 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1816                                               const struct intel_crtc_state *crtc_state,
1817                                               const struct intel_plane_state *plane_state,
1818                                               int color_plane)
1819 {
1820         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1821         enum pipe pipe = plane->pipe;
1822         const struct drm_rect *clip;
1823         u32 val;
1824         int x, y;
1825
1826         if (!crtc_state->enable_psr2_sel_fetch)
1827                 return;
1828
1829         if (plane->id == PLANE_CURSOR)
1830                 return;
1831
1832         clip = &plane_state->psr2_sel_fetch_area;
1833
1834         val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1835         val |= plane_state->uapi.dst.x1;
1836         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1837
1838         x = plane_state->view.color_plane[color_plane].x;
1839
1840         /*
1841          * From Bspec: UV surface Start Y Position = half of Y plane Y
1842          * start position.
1843          */
1844         if (!color_plane)
1845                 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1846         else
1847                 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1848
1849         val = y << 16 | x;
1850
1851         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1852                           val);
1853
1854         /* Sizes are 0 based */
1855         val = (drm_rect_height(clip) - 1) << 16;
1856         val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1857         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1858 }
1859
1860 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1861 {
1862         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1863         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1864         struct intel_encoder *encoder;
1865
1866         if (!crtc_state->enable_psr2_sel_fetch)
1867                 return;
1868
1869         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1870                                              crtc_state->uapi.encoder_mask) {
1871                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1872
1873                 lockdep_assert_held(&intel_dp->psr.lock);
1874                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1875                         return;
1876                 break;
1877         }
1878
1879         intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1880                        crtc_state->psr2_man_track_ctl);
1881 }
1882
1883 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1884                                   struct drm_rect *clip, bool full_update)
1885 {
1886         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1887         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1888         u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1889
1890         /* SF partial frame enable has to be set even on full update */
1891         val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1892
1893         if (full_update) {
1894                 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1895                 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1896                 goto exit;
1897         }
1898
1899         if (clip->y1 == -1)
1900                 goto exit;
1901
1902         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1903                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1904                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1905         } else {
1906                 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1907
1908                 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1909                 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1910         }
1911 exit:
1912         crtc_state->psr2_man_track_ctl = val;
1913 }
1914
1915 static void clip_area_update(struct drm_rect *overlap_damage_area,
1916                              struct drm_rect *damage_area,
1917                              struct drm_rect *pipe_src)
1918 {
1919         if (!drm_rect_intersect(damage_area, pipe_src))
1920                 return;
1921
1922         if (overlap_damage_area->y1 == -1) {
1923                 overlap_damage_area->y1 = damage_area->y1;
1924                 overlap_damage_area->y2 = damage_area->y2;
1925                 return;
1926         }
1927
1928         if (damage_area->y1 < overlap_damage_area->y1)
1929                 overlap_damage_area->y1 = damage_area->y1;
1930
1931         if (damage_area->y2 > overlap_damage_area->y2)
1932                 overlap_damage_area->y2 = damage_area->y2;
1933 }
1934
1935 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1936                                                 struct drm_rect *pipe_clip)
1937 {
1938         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1939         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1940         u16 y_alignment;
1941
1942         /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1943         if (crtc_state->dsc.compression_enable &&
1944             (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1945                 y_alignment = vdsc_cfg->slice_height;
1946         else
1947                 y_alignment = crtc_state->su_y_granularity;
1948
1949         pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1950         if (pipe_clip->y2 % y_alignment)
1951                 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1952 }
1953
1954 /*
1955  * TODO: Not clear how to handle planes with negative position,
1956  * also planes are not updated if they have a negative X
1957  * position so for now doing a full update in this cases
1958  *
1959  * Plane scaling and rotation is not supported by selective fetch and both
1960  * properties can change without a modeset, so need to be check at every
1961  * atomic commit.
1962  */
1963 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1964 {
1965         if (plane_state->uapi.dst.y1 < 0 ||
1966             plane_state->uapi.dst.x1 < 0 ||
1967             plane_state->scaler_id >= 0 ||
1968             plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1969                 return false;
1970
1971         return true;
1972 }
1973
1974 /*
1975  * Check for pipe properties that is not supported by selective fetch.
1976  *
1977  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1978  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1979  * enabled and going to the full update path.
1980  */
1981 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1982 {
1983         if (crtc_state->scaler_state.scaler_id >= 0)
1984                 return false;
1985
1986         return true;
1987 }
1988
1989 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1990                                 struct intel_crtc *crtc)
1991 {
1992         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1993         struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1994         struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1995         struct intel_plane_state *new_plane_state, *old_plane_state;
1996         struct intel_plane *plane;
1997         bool full_update = false;
1998         int i, ret;
1999
2000         if (!crtc_state->enable_psr2_sel_fetch)
2001                 return 0;
2002
2003         if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2004                 full_update = true;
2005                 goto skip_sel_fetch_set_loop;
2006         }
2007
2008         /*
2009          * Calculate minimal selective fetch area of each plane and calculate
2010          * the pipe damaged area.
2011          * In the next loop the plane selective fetch area will actually be set
2012          * using whole pipe damaged area.
2013          */
2014         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2015                                              new_plane_state, i) {
2016                 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2017                                                       .x2 = INT_MAX };
2018
2019                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2020                         continue;
2021
2022                 if (!new_plane_state->uapi.visible &&
2023                     !old_plane_state->uapi.visible)
2024                         continue;
2025
2026                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2027                         full_update = true;
2028                         break;
2029                 }
2030
2031                 /*
2032                  * If visibility or plane moved, mark the whole plane area as
2033                  * damaged as it needs to be complete redraw in the new and old
2034                  * position.
2035                  */
2036                 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2037                     !drm_rect_equals(&new_plane_state->uapi.dst,
2038                                      &old_plane_state->uapi.dst)) {
2039                         if (old_plane_state->uapi.visible) {
2040                                 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2041                                 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2042                                 clip_area_update(&pipe_clip, &damaged_area,
2043                                                  &crtc_state->pipe_src);
2044                         }
2045
2046                         if (new_plane_state->uapi.visible) {
2047                                 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2048                                 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2049                                 clip_area_update(&pipe_clip, &damaged_area,
2050                                                  &crtc_state->pipe_src);
2051                         }
2052                         continue;
2053                 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2054                         /* If alpha changed mark the whole plane area as damaged */
2055                         damaged_area.y1 = new_plane_state->uapi.dst.y1;
2056                         damaged_area.y2 = new_plane_state->uapi.dst.y2;
2057                         clip_area_update(&pipe_clip, &damaged_area,
2058                                          &crtc_state->pipe_src);
2059                         continue;
2060                 }
2061
2062                 src = drm_plane_state_src(&new_plane_state->uapi);
2063                 drm_rect_fp_to_int(&src, &src);
2064
2065                 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2066                                                      &new_plane_state->uapi, &damaged_area))
2067                         continue;
2068
2069                 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2070                 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2071                 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2072                 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2073
2074                 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2075         }
2076
2077         /*
2078          * TODO: For now we are just using full update in case
2079          * selective fetch area calculation fails. To optimize this we
2080          * should identify cases where this happens and fix the area
2081          * calculation for those.
2082          */
2083         if (pipe_clip.y1 == -1) {
2084                 drm_info_once(&dev_priv->drm,
2085                               "Selective fetch area calculation failed in pipe %c\n",
2086                               pipe_name(crtc->pipe));
2087                 full_update = true;
2088         }
2089
2090         if (full_update)
2091                 goto skip_sel_fetch_set_loop;
2092
2093         /* Wa_14014971492 */
2094         if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2095              IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2096             crtc_state->splitter.enable)
2097                 pipe_clip.y1 = 0;
2098
2099         ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2100         if (ret)
2101                 return ret;
2102
2103         intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2104
2105         /*
2106          * Now that we have the pipe damaged area check if it intersect with
2107          * every plane, if it does set the plane selective fetch area.
2108          */
2109         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2110                                              new_plane_state, i) {
2111                 struct drm_rect *sel_fetch_area, inter;
2112                 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2113
2114                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2115                     !new_plane_state->uapi.visible)
2116                         continue;
2117
2118                 inter = pipe_clip;
2119                 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2120                         continue;
2121
2122                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2123                         full_update = true;
2124                         break;
2125                 }
2126
2127                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2128                 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2129                 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2130                 crtc_state->update_planes |= BIT(plane->id);
2131
2132                 /*
2133                  * Sel_fetch_area is calculated for UV plane. Use
2134                  * same area for Y plane as well.
2135                  */
2136                 if (linked) {
2137                         struct intel_plane_state *linked_new_plane_state;
2138                         struct drm_rect *linked_sel_fetch_area;
2139
2140                         linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2141                         if (IS_ERR(linked_new_plane_state))
2142                                 return PTR_ERR(linked_new_plane_state);
2143
2144                         linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2145                         linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2146                         linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2147                         crtc_state->update_planes |= BIT(linked->id);
2148                 }
2149         }
2150
2151 skip_sel_fetch_set_loop:
2152         psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2153         return 0;
2154 }
2155
2156 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2157                                 struct intel_crtc *crtc)
2158 {
2159         struct drm_i915_private *i915 = to_i915(state->base.dev);
2160         const struct intel_crtc_state *old_crtc_state =
2161                 intel_atomic_get_old_crtc_state(state, crtc);
2162         const struct intel_crtc_state *new_crtc_state =
2163                 intel_atomic_get_new_crtc_state(state, crtc);
2164         struct intel_encoder *encoder;
2165
2166         if (!HAS_PSR(i915))
2167                 return;
2168
2169         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2170                                              old_crtc_state->uapi.encoder_mask) {
2171                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2172                 struct intel_psr *psr = &intel_dp->psr;
2173                 bool needs_to_disable = false;
2174
2175                 mutex_lock(&psr->lock);
2176
2177                 /*
2178                  * Reasons to disable:
2179                  * - PSR disabled in new state
2180                  * - All planes will go inactive
2181                  * - Changing between PSR versions
2182                  * - Display WA #1136: skl, bxt
2183                  */
2184                 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2185                 needs_to_disable |= !new_crtc_state->has_psr;
2186                 needs_to_disable |= !new_crtc_state->active_planes;
2187                 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2188                 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2189                         new_crtc_state->wm_level_disabled;
2190
2191                 if (psr->enabled && needs_to_disable)
2192                         intel_psr_disable_locked(intel_dp);
2193                 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2194                         /* Wa_14015648006 */
2195                         wm_optimization_wa(intel_dp, new_crtc_state);
2196
2197                 mutex_unlock(&psr->lock);
2198         }
2199 }
2200
2201 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2202                                          const struct intel_crtc_state *crtc_state)
2203 {
2204         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2205         struct intel_encoder *encoder;
2206
2207         if (!crtc_state->has_psr)
2208                 return;
2209
2210         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2211                                              crtc_state->uapi.encoder_mask) {
2212                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2213                 struct intel_psr *psr = &intel_dp->psr;
2214                 bool keep_disabled = false;
2215
2216                 mutex_lock(&psr->lock);
2217
2218                 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2219
2220                 keep_disabled |= psr->sink_not_reliable;
2221                 keep_disabled |= !crtc_state->active_planes;
2222
2223                 /* Display WA #1136: skl, bxt */
2224                 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2225                         crtc_state->wm_level_disabled;
2226
2227                 if (!psr->enabled && !keep_disabled)
2228                         intel_psr_enable_locked(intel_dp, crtc_state);
2229                 else if (psr->enabled && !crtc_state->wm_level_disabled)
2230                         /* Wa_14015648006 */
2231                         wm_optimization_wa(intel_dp, crtc_state);
2232
2233                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2234                 if (crtc_state->crc_enabled && psr->enabled)
2235                         psr_force_hw_tracking_exit(intel_dp);
2236
2237                 mutex_unlock(&psr->lock);
2238         }
2239 }
2240
2241 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2242 {
2243         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2244         struct intel_crtc_state *crtc_state;
2245         struct intel_crtc *crtc;
2246         int i;
2247
2248         if (!HAS_PSR(dev_priv))
2249                 return;
2250
2251         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2252                 _intel_psr_post_plane_update(state, crtc_state);
2253 }
2254
2255 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2256 {
2257         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2258         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2259
2260         /*
2261          * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2262          * As all higher states has bit 4 of PSR2 state set we can just wait for
2263          * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2264          */
2265         return intel_de_wait_for_clear(dev_priv,
2266                                        EDP_PSR2_STATUS(cpu_transcoder),
2267                                        EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2268 }
2269
2270 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2271 {
2272         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2273         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2274
2275         /*
2276          * From bspec: Panel Self Refresh (BDW+)
2277          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2278          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2279          * defensive enough to cover everything.
2280          */
2281         return intel_de_wait_for_clear(dev_priv,
2282                                        psr_status_reg(dev_priv, cpu_transcoder),
2283                                        EDP_PSR_STATUS_STATE_MASK, 50);
2284 }
2285
2286 /**
2287  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2288  * @new_crtc_state: new CRTC state
2289  *
2290  * This function is expected to be called from pipe_update_start() where it is
2291  * not expected to race with PSR enable or disable.
2292  */
2293 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2294 {
2295         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2296         struct intel_encoder *encoder;
2297
2298         if (!new_crtc_state->has_psr)
2299                 return;
2300
2301         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2302                                              new_crtc_state->uapi.encoder_mask) {
2303                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2304                 int ret;
2305
2306                 lockdep_assert_held(&intel_dp->psr.lock);
2307
2308                 if (!intel_dp->psr.enabled)
2309                         continue;
2310
2311                 if (intel_dp->psr.psr2_enabled)
2312                         ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2313                 else
2314                         ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2315
2316                 if (ret)
2317                         drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2318         }
2319 }
2320
2321 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2322 {
2323         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2324         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2325         i915_reg_t reg;
2326         u32 mask;
2327         int err;
2328
2329         if (!intel_dp->psr.enabled)
2330                 return false;
2331
2332         if (intel_dp->psr.psr2_enabled) {
2333                 reg = EDP_PSR2_STATUS(cpu_transcoder);
2334                 mask = EDP_PSR2_STATUS_STATE_MASK;
2335         } else {
2336                 reg = psr_status_reg(dev_priv, cpu_transcoder);
2337                 mask = EDP_PSR_STATUS_STATE_MASK;
2338         }
2339
2340         mutex_unlock(&intel_dp->psr.lock);
2341
2342         err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2343         if (err)
2344                 drm_err(&dev_priv->drm,
2345                         "Timed out waiting for PSR Idle for re-enable\n");
2346
2347         /* After the unlocked wait, verify that PSR is still wanted! */
2348         mutex_lock(&intel_dp->psr.lock);
2349         return err == 0 && intel_dp->psr.enabled;
2350 }
2351
2352 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2353 {
2354         struct drm_connector_list_iter conn_iter;
2355         struct drm_modeset_acquire_ctx ctx;
2356         struct drm_atomic_state *state;
2357         struct drm_connector *conn;
2358         int err = 0;
2359
2360         state = drm_atomic_state_alloc(&dev_priv->drm);
2361         if (!state)
2362                 return -ENOMEM;
2363
2364         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2365
2366         state->acquire_ctx = &ctx;
2367         to_intel_atomic_state(state)->internal = true;
2368
2369 retry:
2370         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2371         drm_for_each_connector_iter(conn, &conn_iter) {
2372                 struct drm_connector_state *conn_state;
2373                 struct drm_crtc_state *crtc_state;
2374
2375                 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2376                         continue;
2377
2378                 conn_state = drm_atomic_get_connector_state(state, conn);
2379                 if (IS_ERR(conn_state)) {
2380                         err = PTR_ERR(conn_state);
2381                         break;
2382                 }
2383
2384                 if (!conn_state->crtc)
2385                         continue;
2386
2387                 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2388                 if (IS_ERR(crtc_state)) {
2389                         err = PTR_ERR(crtc_state);
2390                         break;
2391                 }
2392
2393                 /* Mark mode as changed to trigger a pipe->update() */
2394                 crtc_state->mode_changed = true;
2395         }
2396         drm_connector_list_iter_end(&conn_iter);
2397
2398         if (err == 0)
2399                 err = drm_atomic_commit(state);
2400
2401         if (err == -EDEADLK) {
2402                 drm_atomic_state_clear(state);
2403                 err = drm_modeset_backoff(&ctx);
2404                 if (!err)
2405                         goto retry;
2406         }
2407
2408         drm_modeset_drop_locks(&ctx);
2409         drm_modeset_acquire_fini(&ctx);
2410         drm_atomic_state_put(state);
2411
2412         return err;
2413 }
2414
2415 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2416 {
2417         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2418         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2419         u32 old_mode;
2420         int ret;
2421
2422         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2423             mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2424                 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2425                 return -EINVAL;
2426         }
2427
2428         ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2429         if (ret)
2430                 return ret;
2431
2432         old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2433         intel_dp->psr.debug = val;
2434
2435         /*
2436          * Do it right away if it's already enabled, otherwise it will be done
2437          * when enabling the source.
2438          */
2439         if (intel_dp->psr.enabled)
2440                 psr_irq_control(intel_dp);
2441
2442         mutex_unlock(&intel_dp->psr.lock);
2443
2444         if (old_mode != mode)
2445                 ret = intel_psr_fastset_force(dev_priv);
2446
2447         return ret;
2448 }
2449
2450 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2451 {
2452         struct intel_psr *psr = &intel_dp->psr;
2453
2454         intel_psr_disable_locked(intel_dp);
2455         psr->sink_not_reliable = true;
2456         /* let's make sure that sink is awaken */
2457         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2458 }
2459
2460 static void intel_psr_work(struct work_struct *work)
2461 {
2462         struct intel_dp *intel_dp =
2463                 container_of(work, typeof(*intel_dp), psr.work);
2464
2465         mutex_lock(&intel_dp->psr.lock);
2466
2467         if (!intel_dp->psr.enabled)
2468                 goto unlock;
2469
2470         if (READ_ONCE(intel_dp->psr.irq_aux_error))
2471                 intel_psr_handle_irq(intel_dp);
2472
2473         /*
2474          * We have to make sure PSR is ready for re-enable
2475          * otherwise it keeps disabled until next full enable/disable cycle.
2476          * PSR might take some time to get fully disabled
2477          * and be ready for re-enable.
2478          */
2479         if (!__psr_wait_for_idle_locked(intel_dp))
2480                 goto unlock;
2481
2482         /*
2483          * The delayed work can race with an invalidate hence we need to
2484          * recheck. Since psr_flush first clears this and then reschedules we
2485          * won't ever miss a flush when bailing out here.
2486          */
2487         if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2488                 goto unlock;
2489
2490         intel_psr_activate(intel_dp);
2491 unlock:
2492         mutex_unlock(&intel_dp->psr.lock);
2493 }
2494
2495 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2496 {
2497         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2498         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2499
2500         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2501                 u32 val;
2502
2503                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2504                         /* Send one update otherwise lag is observed in screen */
2505                         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2506                         return;
2507                 }
2508
2509                 val = man_trk_ctl_enable_bit_get(dev_priv) |
2510                       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2511                       man_trk_ctl_continuos_full_frame(dev_priv);
2512                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2513                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2514                 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2515         } else {
2516                 intel_psr_exit(intel_dp);
2517         }
2518 }
2519
2520 /**
2521  * intel_psr_invalidate - Invalidate PSR
2522  * @dev_priv: i915 device
2523  * @frontbuffer_bits: frontbuffer plane tracking bits
2524  * @origin: which operation caused the invalidate
2525  *
2526  * Since the hardware frontbuffer tracking has gaps we need to integrate
2527  * with the software frontbuffer tracking. This function gets called every
2528  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2529  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2530  *
2531  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2532  */
2533 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2534                           unsigned frontbuffer_bits, enum fb_op_origin origin)
2535 {
2536         struct intel_encoder *encoder;
2537
2538         if (origin == ORIGIN_FLIP)
2539                 return;
2540
2541         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2542                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2543                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2544
2545                 mutex_lock(&intel_dp->psr.lock);
2546                 if (!intel_dp->psr.enabled) {
2547                         mutex_unlock(&intel_dp->psr.lock);
2548                         continue;
2549                 }
2550
2551                 pipe_frontbuffer_bits &=
2552                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2553                 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2554
2555                 if (pipe_frontbuffer_bits)
2556                         _psr_invalidate_handle(intel_dp);
2557
2558                 mutex_unlock(&intel_dp->psr.lock);
2559         }
2560 }
2561 /*
2562  * When we will be completely rely on PSR2 S/W tracking in future,
2563  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2564  * event also therefore tgl_dc3co_flush_locked() require to be changed
2565  * accordingly in future.
2566  */
2567 static void
2568 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2569                        enum fb_op_origin origin)
2570 {
2571         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2572
2573         if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2574             !intel_dp->psr.active)
2575                 return;
2576
2577         /*
2578          * At every frontbuffer flush flip event modified delay of delayed work,
2579          * when delayed work schedules that means display has been idle.
2580          */
2581         if (!(frontbuffer_bits &
2582             INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2583                 return;
2584
2585         tgl_psr2_enable_dc3co(intel_dp);
2586         mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2587                          intel_dp->psr.dc3co_exit_delay);
2588 }
2589
2590 static void _psr_flush_handle(struct intel_dp *intel_dp)
2591 {
2592         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2593         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2594
2595         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2596                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2597                         /* can we turn CFF off? */
2598                         if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2599                                 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2600                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
2601                                         man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2602                                         man_trk_ctl_continuos_full_frame(dev_priv);
2603
2604                                 /*
2605                                  * Set psr2_sel_fetch_cff_enabled as false to allow selective
2606                                  * updates. Still keep cff bit enabled as we don't have proper
2607                                  * SU configuration in case update is sent for any reason after
2608                                  * sff bit gets cleared by the HW on next vblank.
2609                                  */
2610                                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2611                                                val);
2612                                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2613                                 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2614                         }
2615                 } else {
2616                         /*
2617                          * continuous full frame is disabled, only a single full
2618                          * frame is required
2619                          */
2620                         psr_force_hw_tracking_exit(intel_dp);
2621                 }
2622         } else {
2623                 psr_force_hw_tracking_exit(intel_dp);
2624
2625                 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2626                         queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2627         }
2628 }
2629
2630 /**
2631  * intel_psr_flush - Flush PSR
2632  * @dev_priv: i915 device
2633  * @frontbuffer_bits: frontbuffer plane tracking bits
2634  * @origin: which operation caused the flush
2635  *
2636  * Since the hardware frontbuffer tracking has gaps we need to integrate
2637  * with the software frontbuffer tracking. This function gets called every
2638  * time frontbuffer rendering has completed and flushed out to memory. PSR
2639  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2640  *
2641  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2642  */
2643 void intel_psr_flush(struct drm_i915_private *dev_priv,
2644                      unsigned frontbuffer_bits, enum fb_op_origin origin)
2645 {
2646         struct intel_encoder *encoder;
2647
2648         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2649                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2650                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2651
2652                 mutex_lock(&intel_dp->psr.lock);
2653                 if (!intel_dp->psr.enabled) {
2654                         mutex_unlock(&intel_dp->psr.lock);
2655                         continue;
2656                 }
2657
2658                 pipe_frontbuffer_bits &=
2659                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2660                 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2661
2662                 /*
2663                  * If the PSR is paused by an explicit intel_psr_paused() call,
2664                  * we have to ensure that the PSR is not activated until
2665                  * intel_psr_resume() is called.
2666                  */
2667                 if (intel_dp->psr.paused)
2668                         goto unlock;
2669
2670                 if (origin == ORIGIN_FLIP ||
2671                     (origin == ORIGIN_CURSOR_UPDATE &&
2672                      !intel_dp->psr.psr2_sel_fetch_enabled)) {
2673                         tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2674                         goto unlock;
2675                 }
2676
2677                 if (pipe_frontbuffer_bits == 0)
2678                         goto unlock;
2679
2680                 /* By definition flush = invalidate + flush */
2681                 _psr_flush_handle(intel_dp);
2682 unlock:
2683                 mutex_unlock(&intel_dp->psr.lock);
2684         }
2685 }
2686
2687 /**
2688  * intel_psr_init - Init basic PSR work and mutex.
2689  * @intel_dp: Intel DP
2690  *
2691  * This function is called after the initializing connector.
2692  * (the initializing of connector treats the handling of connector capabilities)
2693  * And it initializes basic PSR stuff for each DP Encoder.
2694  */
2695 void intel_psr_init(struct intel_dp *intel_dp)
2696 {
2697         struct intel_connector *connector = intel_dp->attached_connector;
2698         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2699         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2700
2701         if (!HAS_PSR(dev_priv))
2702                 return;
2703
2704         /*
2705          * HSW spec explicitly says PSR is tied to port A.
2706          * BDW+ platforms have a instance of PSR registers per transcoder but
2707          * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2708          * than eDP one.
2709          * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2710          * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2711          * But GEN12 supports a instance of PSR registers per transcoder.
2712          */
2713         if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2714                 drm_dbg_kms(&dev_priv->drm,
2715                             "PSR condition failed: Port not supported\n");
2716                 return;
2717         }
2718
2719         intel_dp->psr.source_support = true;
2720
2721         /* Set link_standby x link_off defaults */
2722         if (DISPLAY_VER(dev_priv) < 12)
2723                 /* For new platforms up to TGL let's respect VBT back again */
2724                 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2725
2726         INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2727         INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2728         mutex_init(&intel_dp->psr.lock);
2729 }
2730
2731 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2732                                            u8 *status, u8 *error_status)
2733 {
2734         struct drm_dp_aux *aux = &intel_dp->aux;
2735         int ret;
2736
2737         ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2738         if (ret != 1)
2739                 return ret;
2740
2741         ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2742         if (ret != 1)
2743                 return ret;
2744
2745         *status = *status & DP_PSR_SINK_STATE_MASK;
2746
2747         return 0;
2748 }
2749
2750 static void psr_alpm_check(struct intel_dp *intel_dp)
2751 {
2752         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2753         struct drm_dp_aux *aux = &intel_dp->aux;
2754         struct intel_psr *psr = &intel_dp->psr;
2755         u8 val;
2756         int r;
2757
2758         if (!psr->psr2_enabled)
2759                 return;
2760
2761         r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2762         if (r != 1) {
2763                 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2764                 return;
2765         }
2766
2767         if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2768                 intel_psr_disable_locked(intel_dp);
2769                 psr->sink_not_reliable = true;
2770                 drm_dbg_kms(&dev_priv->drm,
2771                             "ALPM lock timeout error, disabling PSR\n");
2772
2773                 /* Clearing error */
2774                 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2775         }
2776 }
2777
2778 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2779 {
2780         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2781         struct intel_psr *psr = &intel_dp->psr;
2782         u8 val;
2783         int r;
2784
2785         r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2786         if (r != 1) {
2787                 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2788                 return;
2789         }
2790
2791         if (val & DP_PSR_CAPS_CHANGE) {
2792                 intel_psr_disable_locked(intel_dp);
2793                 psr->sink_not_reliable = true;
2794                 drm_dbg_kms(&dev_priv->drm,
2795                             "Sink PSR capability changed, disabling PSR\n");
2796
2797                 /* Clearing it */
2798                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2799         }
2800 }
2801
2802 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2803 {
2804         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2805         struct intel_psr *psr = &intel_dp->psr;
2806         u8 status, error_status;
2807         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2808                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2809                           DP_PSR_LINK_CRC_ERROR;
2810
2811         if (!CAN_PSR(intel_dp))
2812                 return;
2813
2814         mutex_lock(&psr->lock);
2815
2816         if (!psr->enabled)
2817                 goto exit;
2818
2819         if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2820                 drm_err(&dev_priv->drm,
2821                         "Error reading PSR status or error status\n");
2822                 goto exit;
2823         }
2824
2825         if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2826                 intel_psr_disable_locked(intel_dp);
2827                 psr->sink_not_reliable = true;
2828         }
2829
2830         if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2831                 drm_dbg_kms(&dev_priv->drm,
2832                             "PSR sink internal error, disabling PSR\n");
2833         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2834                 drm_dbg_kms(&dev_priv->drm,
2835                             "PSR RFB storage error, disabling PSR\n");
2836         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2837                 drm_dbg_kms(&dev_priv->drm,
2838                             "PSR VSC SDP uncorrectable error, disabling PSR\n");
2839         if (error_status & DP_PSR_LINK_CRC_ERROR)
2840                 drm_dbg_kms(&dev_priv->drm,
2841                             "PSR Link CRC error, disabling PSR\n");
2842
2843         if (error_status & ~errors)
2844                 drm_err(&dev_priv->drm,
2845                         "PSR_ERROR_STATUS unhandled errors %x\n",
2846                         error_status & ~errors);
2847         /* clear status register */
2848         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2849
2850         psr_alpm_check(intel_dp);
2851         psr_capability_changed_check(intel_dp);
2852
2853 exit:
2854         mutex_unlock(&psr->lock);
2855 }
2856
2857 bool intel_psr_enabled(struct intel_dp *intel_dp)
2858 {
2859         bool ret;
2860
2861         if (!CAN_PSR(intel_dp))
2862                 return false;
2863
2864         mutex_lock(&intel_dp->psr.lock);
2865         ret = intel_dp->psr.enabled;
2866         mutex_unlock(&intel_dp->psr.lock);
2867
2868         return ret;
2869 }
2870
2871 /**
2872  * intel_psr_lock - grab PSR lock
2873  * @crtc_state: the crtc state
2874  *
2875  * This is initially meant to be used by around CRTC update, when
2876  * vblank sensitive registers are updated and we need grab the lock
2877  * before it to avoid vblank evasion.
2878  */
2879 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2880 {
2881         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2882         struct intel_encoder *encoder;
2883
2884         if (!crtc_state->has_psr)
2885                 return;
2886
2887         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2888                                              crtc_state->uapi.encoder_mask) {
2889                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2890
2891                 mutex_lock(&intel_dp->psr.lock);
2892                 break;
2893         }
2894 }
2895
2896 /**
2897  * intel_psr_unlock - release PSR lock
2898  * @crtc_state: the crtc state
2899  *
2900  * Release the PSR lock that was held during pipe update.
2901  */
2902 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2903 {
2904         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2905         struct intel_encoder *encoder;
2906
2907         if (!crtc_state->has_psr)
2908                 return;
2909
2910         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2911                                              crtc_state->uapi.encoder_mask) {
2912                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2913
2914                 mutex_unlock(&intel_dp->psr.lock);
2915                 break;
2916         }
2917 }
2918
2919 static void
2920 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2921 {
2922         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2923         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2924         const char *status = "unknown";
2925         u32 val, status_val;
2926
2927         if (intel_dp->psr.psr2_enabled) {
2928                 static const char * const live_status[] = {
2929                         "IDLE",
2930                         "CAPTURE",
2931                         "CAPTURE_FS",
2932                         "SLEEP",
2933                         "BUFON_FW",
2934                         "ML_UP",
2935                         "SU_STANDBY",
2936                         "FAST_SLEEP",
2937                         "DEEP_SLEEP",
2938                         "BUF_ON",
2939                         "TG_ON"
2940                 };
2941                 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2942                 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2943                 if (status_val < ARRAY_SIZE(live_status))
2944                         status = live_status[status_val];
2945         } else {
2946                 static const char * const live_status[] = {
2947                         "IDLE",
2948                         "SRDONACK",
2949                         "SRDENT",
2950                         "BUFOFF",
2951                         "BUFON",
2952                         "AUXACK",
2953                         "SRDOFFACK",
2954                         "SRDENT_ON",
2955                 };
2956                 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2957                 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2958                 if (status_val < ARRAY_SIZE(live_status))
2959                         status = live_status[status_val];
2960         }
2961
2962         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2963 }
2964
2965 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2966 {
2967         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2968         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2969         struct intel_psr *psr = &intel_dp->psr;
2970         intel_wakeref_t wakeref;
2971         const char *status;
2972         bool enabled;
2973         u32 val;
2974
2975         seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2976         if (psr->sink_support)
2977                 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2978         seq_puts(m, "\n");
2979
2980         if (!psr->sink_support)
2981                 return 0;
2982
2983         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2984         mutex_lock(&psr->lock);
2985
2986         if (psr->enabled)
2987                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2988         else
2989                 status = "disabled";
2990         seq_printf(m, "PSR mode: %s\n", status);
2991
2992         if (!psr->enabled) {
2993                 seq_printf(m, "PSR sink not reliable: %s\n",
2994                            str_yes_no(psr->sink_not_reliable));
2995
2996                 goto unlock;
2997         }
2998
2999         if (psr->psr2_enabled) {
3000                 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3001                 enabled = val & EDP_PSR2_ENABLE;
3002         } else {
3003                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3004                 enabled = val & EDP_PSR_ENABLE;
3005         }
3006         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3007                    str_enabled_disabled(enabled), val);
3008         psr_source_status(intel_dp, m);
3009         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3010                    psr->busy_frontbuffer_bits);
3011
3012         /*
3013          * SKL+ Perf counter is reset to 0 everytime DC state is entered
3014          */
3015         val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3016         seq_printf(m, "Performance counter: %u\n",
3017                    REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3018
3019         if (psr->debug & I915_PSR_DEBUG_IRQ) {
3020                 seq_printf(m, "Last attempted entry at: %lld\n",
3021                            psr->last_entry_attempt);
3022                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3023         }
3024
3025         if (psr->psr2_enabled) {
3026                 u32 su_frames_val[3];
3027                 int frame;
3028
3029                 /*
3030                  * Reading all 3 registers before hand to minimize crossing a
3031                  * frame boundary between register reads
3032                  */
3033                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3034                         val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3035                         su_frames_val[frame / 3] = val;
3036                 }
3037
3038                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3039
3040                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3041                         u32 su_blocks;
3042
3043                         su_blocks = su_frames_val[frame / 3] &
3044                                     PSR2_SU_STATUS_MASK(frame);
3045                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3046                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
3047                 }
3048
3049                 seq_printf(m, "PSR2 selective fetch: %s\n",
3050                            str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3051         }
3052
3053 unlock:
3054         mutex_unlock(&psr->lock);
3055         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3056
3057         return 0;
3058 }
3059
3060 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3061 {
3062         struct drm_i915_private *dev_priv = m->private;
3063         struct intel_dp *intel_dp = NULL;
3064         struct intel_encoder *encoder;
3065
3066         if (!HAS_PSR(dev_priv))
3067                 return -ENODEV;
3068
3069         /* Find the first EDP which supports PSR */
3070         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3071                 intel_dp = enc_to_intel_dp(encoder);
3072                 break;
3073         }
3074
3075         if (!intel_dp)
3076                 return -ENODEV;
3077
3078         return intel_psr_status(m, intel_dp);
3079 }
3080 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3081
3082 static int
3083 i915_edp_psr_debug_set(void *data, u64 val)
3084 {
3085         struct drm_i915_private *dev_priv = data;
3086         struct intel_encoder *encoder;
3087         intel_wakeref_t wakeref;
3088         int ret = -ENODEV;
3089
3090         if (!HAS_PSR(dev_priv))
3091                 return ret;
3092
3093         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3094                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3095
3096                 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3097
3098                 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3099
3100                 // TODO: split to each transcoder's PSR debug state
3101                 ret = intel_psr_debug_set(intel_dp, val);
3102
3103                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3104         }
3105
3106         return ret;
3107 }
3108
3109 static int
3110 i915_edp_psr_debug_get(void *data, u64 *val)
3111 {
3112         struct drm_i915_private *dev_priv = data;
3113         struct intel_encoder *encoder;
3114
3115         if (!HAS_PSR(dev_priv))
3116                 return -ENODEV;
3117
3118         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3119                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3120
3121                 // TODO: split to each transcoder's PSR debug state
3122                 *val = READ_ONCE(intel_dp->psr.debug);
3123                 return 0;
3124         }
3125
3126         return -ENODEV;
3127 }
3128
3129 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3130                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3131                         "%llu\n");
3132
3133 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3134 {
3135         struct drm_minor *minor = i915->drm.primary;
3136
3137         debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3138                             i915, &i915_edp_psr_debug_fops);
3139
3140         debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3141                             i915, &i915_edp_psr_status_fops);
3142 }
3143
3144 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3145 {
3146         struct intel_connector *connector = m->private;
3147         struct intel_dp *intel_dp = intel_attached_dp(connector);
3148         static const char * const sink_status[] = {
3149                 "inactive",
3150                 "transition to active, capture and display",
3151                 "active, display from RFB",
3152                 "active, capture and display on sink device timings",
3153                 "transition to inactive, capture and display, timing re-sync",
3154                 "reserved",
3155                 "reserved",
3156                 "sink internal error",
3157         };
3158         const char *str;
3159         int ret;
3160         u8 val;
3161
3162         if (!CAN_PSR(intel_dp)) {
3163                 seq_puts(m, "PSR Unsupported\n");
3164                 return -ENODEV;
3165         }
3166
3167         if (connector->base.status != connector_status_connected)
3168                 return -ENODEV;
3169
3170         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3171         if (ret != 1)
3172                 return ret < 0 ? ret : -EIO;
3173
3174         val &= DP_PSR_SINK_STATE_MASK;
3175         if (val < ARRAY_SIZE(sink_status))
3176                 str = sink_status[val];
3177         else
3178                 str = "unknown";
3179
3180         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3181
3182         return 0;
3183 }
3184 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3185
3186 static int i915_psr_status_show(struct seq_file *m, void *data)
3187 {
3188         struct intel_connector *connector = m->private;
3189         struct intel_dp *intel_dp = intel_attached_dp(connector);
3190
3191         return intel_psr_status(m, intel_dp);
3192 }
3193 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3194
3195 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3196 {
3197         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3198         struct dentry *root = connector->base.debugfs_entry;
3199
3200         if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3201                 return;
3202
3203         debugfs_create_file("i915_psr_sink_status", 0444, root,
3204                             connector, &i915_psr_sink_status_fops);
3205
3206         if (HAS_PSR(i915))
3207                 debugfs_create_file("i915_psr_status", 0444, root,
3208                                     connector, &i915_psr_status_fops);
3209 }