24ab98d656c3b0f2628b5f2d985a0be1d47a3bd1
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
40
41 /**
42  * DOC: Panel Self Refresh (PSR/SRD)
43  *
44  * Since Haswell Display controller supports Panel Self-Refresh on display
45  * panels witch have a remote frame buffer (RFB) implemented according to PSR
46  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47  * when system is idle but display is on as it eliminates display refresh
48  * request to DDR memory completely as long as the frame buffer for that
49  * display is unchanged.
50  *
51  * Panel Self Refresh must be supported by both Hardware (source) and
52  * Panel (sink).
53  *
54  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55  * to power down the link and memory controller. For DSI panels the same idea
56  * is called "manual mode".
57  *
58  * The implementation uses the hardware-based PSR support which automatically
59  * enters/exits self-refresh mode. The hardware takes care of sending the
60  * required DP aux message and could even retrain the link (that part isn't
61  * enabled yet though). The hardware also keeps track of any frontbuffer
62  * changes to know when to exit self-refresh mode again. Unfortunately that
63  * part doesn't work too well, hence why the i915 PSR support uses the
64  * software frontbuffer tracking to make sure it doesn't miss a screen
65  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66  * get called by the frontbuffer tracking code. Note that because of locking
67  * issues the self-refresh re-enable code is done from a work queue, which
68  * must be correctly synchronized/cancelled when shutting down the pipe."
69  *
70  * DC3CO (DC3 clock off)
71  *
72  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73  * clock off automatically during PSR2 idle state.
74  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75  * entry/exit allows the HW to enter a low-power state even when page flipping
76  * periodically (for instance a 30fps video playback scenario).
77  *
78  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80  * frames, if no other flip occurs and the function above is executed, DC3CO is
81  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82  * of another flip.
83  * Front buffer modifications do not trigger DC3CO activation on purpose as it
84  * would bring a lot of complexity and most of the moderns systems will only
85  * use page flips.
86  */
87
88 /*
89  * Description of PSR mask bits:
90  *
91  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92  *
93  *  When unmasked (nearly) all display register writes (eg. even
94  *  SWF) trigger a PSR exit. Some registers are excluded from this
95  *  and they have a more specific mask (described below). On icl+
96  *  this bit no longer exists and is effectively always set.
97  *
98  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99  *
100  *  When unmasked (nearly) all pipe/plane register writes
101  *  trigger a PSR exit. Some plane registers are excluded from this
102  *  and they have a more specific mask (described below).
103  *
104  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107  *
108  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109  *  SPR_SURF/CURBASE are not included in this and instead are
110  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112  *
113  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115  *
116  *  When unmasked PSR is blocked as long as the sprite
117  *  plane is enabled. skl+ with their universal planes no
118  *  longer have a mask bit like this, and no plane being
119  *  enabledb blocks PSR.
120  *
121  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123  *
124  *  When umasked CURPOS writes trigger a PSR exit. On skl+
125  *  this doesn't exit but CURPOS is included in the
126  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127  *
128  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130  *
131  *  When unmasked PSR is blocked as long as vblank and/or vsync
132  *  interrupt is unmasked in IMR *and* enabled in IER.
133  *
134  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136  *
137  *  Selectcs whether PSR exit generates an extra vblank before
138  *  the first frame is transmitted. Also note the opposite polarity
139  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140  *  unmasked==do not generate the extra vblank).
141  *
142  *  With DC states enabled the extra vblank happens after link training,
143  *  with DC states disabled it happens immediately upuon PSR exit trigger.
144  *  No idea as of now why there is a difference. HSW/BDW (which don't
145  *  even have DMC) always generate it after link training. Go figure.
146  *
147  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
148  *  and thus won't latch until the first vblank. So with DC states
149  *  enabled the register effctively uses the reset value during DC5
150  *  exit+PSR exit sequence, and thus the bit does nothing until
151  *  latched by the vblank that it was trying to prevent from being
152  *  generated in the first place. So we should probably call this
153  *  one a chicken/egg bit instead on skl+.
154  *
155  *  In standby mode (as opposed to link-off) this makes no difference
156  *  as the timing generator keeps running the whole time generating
157  *  normal periodic vblanks.
158  *
159  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160  *  and doing so makes the behaviour match the skl+ reset value.
161  *
162  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164  *
165  *  On BDW without this bit is no vblanks whatsoever are
166  *  generated after PSR exit. On HSW this has no apparant effect.
167  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
168  *
169  * The rest of the bits are more self-explanatory and/or
170  * irrelevant for normal operation.
171  */
172
173 static bool psr_global_enabled(struct intel_dp *intel_dp)
174 {
175         struct intel_connector *connector = intel_dp->attached_connector;
176         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
177
178         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179         case I915_PSR_DEBUG_DEFAULT:
180                 if (i915->params.enable_psr == -1)
181                         return connector->panel.vbt.psr.enable;
182                 return i915->params.enable_psr;
183         case I915_PSR_DEBUG_DISABLE:
184                 return false;
185         default:
186                 return true;
187         }
188 }
189
190 static bool psr2_global_enabled(struct intel_dp *intel_dp)
191 {
192         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
193
194         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195         case I915_PSR_DEBUG_DISABLE:
196         case I915_PSR_DEBUG_FORCE_PSR1:
197                 return false;
198         default:
199                 if (i915->params.enable_psr == 1)
200                         return false;
201                 return true;
202         }
203 }
204
205 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
206 {
207         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208
209         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210                 EDP_PSR_ERROR(intel_dp->psr.transcoder);
211 }
212
213 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
214 {
215         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
216
217         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218                 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
219 }
220
221 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
222 {
223         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
224
225         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226                 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
227 }
228
229 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
230 {
231         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
232
233         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234                 EDP_PSR_MASK(intel_dp->psr.transcoder);
235 }
236
237 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238                               enum transcoder cpu_transcoder)
239 {
240         if (DISPLAY_VER(dev_priv) >= 8)
241                 return EDP_PSR_CTL(cpu_transcoder);
242         else
243                 return HSW_SRD_CTL;
244 }
245
246 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247                                 enum transcoder cpu_transcoder)
248 {
249         if (DISPLAY_VER(dev_priv) >= 8)
250                 return EDP_PSR_DEBUG(cpu_transcoder);
251         else
252                 return HSW_SRD_DEBUG;
253 }
254
255 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256                                    enum transcoder cpu_transcoder)
257 {
258         if (DISPLAY_VER(dev_priv) >= 8)
259                 return EDP_PSR_PERF_CNT(cpu_transcoder);
260         else
261                 return HSW_SRD_PERF_CNT;
262 }
263
264 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265                                  enum transcoder cpu_transcoder)
266 {
267         if (DISPLAY_VER(dev_priv) >= 8)
268                 return EDP_PSR_STATUS(cpu_transcoder);
269         else
270                 return HSW_SRD_STATUS;
271 }
272
273 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274                               enum transcoder cpu_transcoder)
275 {
276         if (DISPLAY_VER(dev_priv) >= 12)
277                 return TRANS_PSR_IMR(cpu_transcoder);
278         else
279                 return EDP_PSR_IMR;
280 }
281
282 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283                               enum transcoder cpu_transcoder)
284 {
285         if (DISPLAY_VER(dev_priv) >= 12)
286                 return TRANS_PSR_IIR(cpu_transcoder);
287         else
288                 return EDP_PSR_IIR;
289 }
290
291 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292                                   enum transcoder cpu_transcoder)
293 {
294         if (DISPLAY_VER(dev_priv) >= 8)
295                 return EDP_PSR_AUX_CTL(cpu_transcoder);
296         else
297                 return HSW_SRD_AUX_CTL;
298 }
299
300 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301                                    enum transcoder cpu_transcoder, int i)
302 {
303         if (DISPLAY_VER(dev_priv) >= 8)
304                 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
305         else
306                 return HSW_SRD_AUX_DATA(i);
307 }
308
309 static void psr_irq_control(struct intel_dp *intel_dp)
310 {
311         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
313         u32 mask;
314
315         mask = psr_irq_psr_error_bit_get(intel_dp);
316         if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
317                 mask |= psr_irq_post_exit_bit_get(intel_dp) |
318                         psr_irq_pre_entry_bit_get(intel_dp);
319
320         intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321                      psr_irq_mask_get(intel_dp), ~mask);
322 }
323
324 static void psr_event_print(struct drm_i915_private *i915,
325                             u32 val, bool psr2_enabled)
326 {
327         drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329                 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331                 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333                 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335                 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336         if (val & PSR_EVENT_GRAPHICS_RESET)
337                 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338         if (val & PSR_EVENT_PCH_INTERRUPT)
339                 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340         if (val & PSR_EVENT_MEMORY_UP)
341                 drm_dbg_kms(&i915->drm, "\tMemory up\n");
342         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343                 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345                 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347                 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348         if (val & PSR_EVENT_REGISTER_UPDATE)
349                 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350         if (val & PSR_EVENT_HDCP_ENABLE)
351                 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353                 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354         if (val & PSR_EVENT_VBI_ENABLE)
355                 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356         if (val & PSR_EVENT_LPSP_MODE_EXIT)
357                 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359                 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
360 }
361
362 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
363 {
364         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366         ktime_t time_ns =  ktime_get();
367
368         if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
369                 intel_dp->psr.last_entry_attempt = time_ns;
370                 drm_dbg_kms(&dev_priv->drm,
371                             "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372                             transcoder_name(cpu_transcoder));
373         }
374
375         if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
376                 intel_dp->psr.last_exit = time_ns;
377                 drm_dbg_kms(&dev_priv->drm,
378                             "[transcoder %s] PSR exit completed\n",
379                             transcoder_name(cpu_transcoder));
380
381                 if (DISPLAY_VER(dev_priv) >= 9) {
382                         u32 val;
383
384                         val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
385
386                         psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
387                 }
388         }
389
390         if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391                 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392                          transcoder_name(cpu_transcoder));
393
394                 intel_dp->psr.irq_aux_error = true;
395
396                 /*
397                  * If this interruption is not masked it will keep
398                  * interrupting so fast that it prevents the scheduled
399                  * work to run.
400                  * Also after a PSR error, we don't want to arm PSR
401                  * again so we don't care about unmask the interruption
402                  * or unset irq_aux_error.
403                  */
404                 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405                              0, psr_irq_psr_error_bit_get(intel_dp));
406
407                 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
408         }
409 }
410
411 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
412 {
413         u8 alpm_caps = 0;
414
415         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
416                               &alpm_caps) != 1)
417                 return false;
418         return alpm_caps & DP_ALPM_CAP;
419 }
420
421 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
422 {
423         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424         u8 val = 8; /* assume the worst if we can't read the value */
425
426         if (drm_dp_dpcd_readb(&intel_dp->aux,
427                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
429         else
430                 drm_dbg_kms(&i915->drm,
431                             "Unable to get sink synchronization latency, assuming 8 frames\n");
432         return val;
433 }
434
435 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
436 {
437         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
438         ssize_t r;
439         u16 w;
440         u8 y;
441
442         /* If sink don't have specific granularity requirements set legacy ones */
443         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
444                 /* As PSR2 HW sends full lines, we do not care about x granularity */
445                 w = 4;
446                 y = 4;
447                 goto exit;
448         }
449
450         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
451         if (r != 2)
452                 drm_dbg_kms(&i915->drm,
453                             "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
454         /*
455          * Spec says that if the value read is 0 the default granularity should
456          * be used instead.
457          */
458         if (r != 2 || w == 0)
459                 w = 4;
460
461         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
462         if (r != 1) {
463                 drm_dbg_kms(&i915->drm,
464                             "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
465                 y = 4;
466         }
467         if (y == 0)
468                 y = 1;
469
470 exit:
471         intel_dp->psr.su_w_granularity = w;
472         intel_dp->psr.su_y_granularity = y;
473 }
474
475 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
476 {
477         struct drm_i915_private *dev_priv =
478                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
479
480         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481                          sizeof(intel_dp->psr_dpcd));
482
483         if (!intel_dp->psr_dpcd[0])
484                 return;
485         drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486                     intel_dp->psr_dpcd[0]);
487
488         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489                 drm_dbg_kms(&dev_priv->drm,
490                             "PSR support not currently available for this panel\n");
491                 return;
492         }
493
494         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495                 drm_dbg_kms(&dev_priv->drm,
496                             "Panel lacks power state control, PSR cannot be enabled\n");
497                 return;
498         }
499
500         intel_dp->psr.sink_support = true;
501         intel_dp->psr.sink_sync_latency =
502                 intel_dp_get_sink_sync_latency(intel_dp);
503
504         if (DISPLAY_VER(dev_priv) >= 9 &&
505             (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506                 bool y_req = intel_dp->psr_dpcd[1] &
507                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508                 bool alpm = intel_dp_get_alpm_status(intel_dp);
509
510                 /*
511                  * All panels that supports PSR version 03h (PSR2 +
512                  * Y-coordinate) can handle Y-coordinates in VSC but we are
513                  * only sure that it is going to be used when required by the
514                  * panel. This way panel is capable to do selective update
515                  * without a aux frame sync.
516                  *
517                  * To support PSR version 02h and PSR version 03h without
518                  * Y-coordinate requirement panels we would need to enable
519                  * GTC first.
520                  */
521                 intel_dp->psr.sink_psr2_support = y_req && alpm;
522                 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
523                             intel_dp->psr.sink_psr2_support ? "" : "not ");
524
525                 if (intel_dp->psr.sink_psr2_support) {
526                         intel_dp->psr.colorimetry_support =
527                                 intel_dp_get_colorimetry_status(intel_dp);
528                         intel_dp_get_su_granularity(intel_dp);
529                 }
530         }
531 }
532
533 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
534 {
535         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537         u32 aux_clock_divider, aux_ctl;
538         /* write DP_SET_POWER=D0 */
539         static const u8 aux_msg[] = {
540                 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541                 [1] = (DP_SET_POWER >> 8) & 0xff,
542                 [2] = DP_SET_POWER & 0xff,
543                 [3] = 1 - 1,
544                 [4] = DP_SET_POWER_D0,
545         };
546         int i;
547
548         BUILD_BUG_ON(sizeof(aux_msg) > 20);
549         for (i = 0; i < sizeof(aux_msg); i += 4)
550                 intel_de_write(dev_priv,
551                                psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552                                intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
553
554         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
555
556         /* Start with bits set for DDI_AUX_CTL register */
557         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
558                                              aux_clock_divider);
559
560         /* Select only valid bits for SRD_AUX_CTL */
561         aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562                 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563                 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564                 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
565
566         intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
567                        aux_ctl);
568 }
569
570 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
571 {
572         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573         u8 dpcd_val = DP_PSR_ENABLE;
574
575         /* Enable ALPM at sink for psr2 */
576         if (intel_dp->psr.psr2_enabled) {
577                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
578                                    DP_ALPM_ENABLE |
579                                    DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
580
581                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
582         } else {
583                 if (intel_dp->psr.link_standby)
584                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
585
586                 if (DISPLAY_VER(dev_priv) >= 8)
587                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
588         }
589
590         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591                 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
592
593         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
594
595         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
596 }
597
598 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
599 {
600         struct intel_connector *connector = intel_dp->attached_connector;
601         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602         u32 val = 0;
603
604         if (DISPLAY_VER(dev_priv) >= 11)
605                 val |= EDP_PSR_TP4_TIME_0us;
606
607         if (dev_priv->params.psr_safest_params) {
608                 val |= EDP_PSR_TP1_TIME_2500us;
609                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
610                 goto check_tp3_sel;
611         }
612
613         if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614                 val |= EDP_PSR_TP1_TIME_0us;
615         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616                 val |= EDP_PSR_TP1_TIME_100us;
617         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618                 val |= EDP_PSR_TP1_TIME_500us;
619         else
620                 val |= EDP_PSR_TP1_TIME_2500us;
621
622         if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623                 val |= EDP_PSR_TP2_TP3_TIME_0us;
624         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625                 val |= EDP_PSR_TP2_TP3_TIME_100us;
626         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627                 val |= EDP_PSR_TP2_TP3_TIME_500us;
628         else
629                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
630
631 check_tp3_sel:
632         if (intel_dp_source_supports_tps3(dev_priv) &&
633             drm_dp_tps3_supported(intel_dp->dpcd))
634                 val |= EDP_PSR_TP_TP1_TP3;
635         else
636                 val |= EDP_PSR_TP_TP1_TP2;
637
638         return val;
639 }
640
641 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
642 {
643         struct intel_connector *connector = intel_dp->attached_connector;
644         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645         int idle_frames;
646
647         /* Let's use 6 as the minimum to cover all known cases including the
648          * off-by-one issue that HW has in some cases.
649          */
650         idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
651         idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
652
653         if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
654                 idle_frames = 0xf;
655
656         return idle_frames;
657 }
658
659 static void hsw_activate_psr1(struct intel_dp *intel_dp)
660 {
661         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
662         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
663         u32 max_sleep_time = 0x1f;
664         u32 val = EDP_PSR_ENABLE;
665
666         val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
667
668         val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
669         if (IS_HASWELL(dev_priv))
670                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
671
672         if (intel_dp->psr.link_standby)
673                 val |= EDP_PSR_LINK_STANDBY;
674
675         val |= intel_psr1_get_tp_time(intel_dp);
676
677         if (DISPLAY_VER(dev_priv) >= 8)
678                 val |= EDP_PSR_CRC_ENABLE;
679
680         intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
681                      ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
682 }
683
684 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
685 {
686         struct intel_connector *connector = intel_dp->attached_connector;
687         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
688         u32 val = 0;
689
690         if (dev_priv->params.psr_safest_params)
691                 return EDP_PSR2_TP2_TIME_2500us;
692
693         if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
694             connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
695                 val |= EDP_PSR2_TP2_TIME_50us;
696         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
697                 val |= EDP_PSR2_TP2_TIME_100us;
698         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
699                 val |= EDP_PSR2_TP2_TIME_500us;
700         else
701                 val |= EDP_PSR2_TP2_TIME_2500us;
702
703         return val;
704 }
705
706 static int psr2_block_count_lines(struct intel_dp *intel_dp)
707 {
708         return intel_dp->psr.io_wake_lines < 9 &&
709                 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
710 }
711
712 static int psr2_block_count(struct intel_dp *intel_dp)
713 {
714         return psr2_block_count_lines(intel_dp) / 4;
715 }
716
717 static void hsw_activate_psr2(struct intel_dp *intel_dp)
718 {
719         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
720         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
721         u32 val = EDP_PSR2_ENABLE;
722
723         val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
724
725         if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
726                 val |= EDP_SU_TRACK_ENABLE;
727
728         if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
729                 val |= EDP_Y_COORDINATE_ENABLE;
730
731         val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
732         val |= intel_psr2_get_tp_time(intel_dp);
733
734         if (DISPLAY_VER(dev_priv) >= 12) {
735                 if (psr2_block_count(intel_dp) > 2)
736                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
737                 else
738                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
739         }
740
741         /* Wa_22012278275:adl-p */
742         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
743                 static const u8 map[] = {
744                         2, /* 5 lines */
745                         1, /* 6 lines */
746                         0, /* 7 lines */
747                         3, /* 8 lines */
748                         6, /* 9 lines */
749                         5, /* 10 lines */
750                         4, /* 11 lines */
751                         7, /* 12 lines */
752                 };
753                 /*
754                  * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
755                  * comments bellow for more information
756                  */
757                 int tmp;
758
759                 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
760                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
761
762                 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
763                 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
764         } else if (DISPLAY_VER(dev_priv) >= 12) {
765                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
766                 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
767         } else if (DISPLAY_VER(dev_priv) >= 9) {
768                 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
769                 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
770         }
771
772         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
773                 val |= EDP_PSR2_SU_SDP_SCANLINE;
774
775         if (intel_dp->psr.psr2_sel_fetch_enabled) {
776                 u32 tmp;
777
778                 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
779                 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
780         } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
781                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
782         }
783
784         /*
785          * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
786          * recommending keep this bit unset while PSR2 is enabled.
787          */
788         intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
789
790         intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
791 }
792
793 static bool
794 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
795 {
796         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
797                 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
798         else if (DISPLAY_VER(dev_priv) >= 12)
799                 return cpu_transcoder == TRANSCODER_A;
800         else if (DISPLAY_VER(dev_priv) >= 9)
801                 return cpu_transcoder == TRANSCODER_EDP;
802         else
803                 return false;
804 }
805
806 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
807 {
808         if (!cstate || !cstate->hw.active)
809                 return 0;
810
811         return DIV_ROUND_UP(1000 * 1000,
812                             drm_mode_vrefresh(&cstate->hw.adjusted_mode));
813 }
814
815 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
816                                      u32 idle_frames)
817 {
818         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
820
821         intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
822                      EDP_PSR2_IDLE_FRAMES_MASK,
823                      EDP_PSR2_IDLE_FRAMES(idle_frames));
824 }
825
826 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
827 {
828         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
829
830         psr2_program_idle_frames(intel_dp, 0);
831         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
832 }
833
834 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
835 {
836         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
837
838         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
839         psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
840 }
841
842 static void tgl_dc3co_disable_work(struct work_struct *work)
843 {
844         struct intel_dp *intel_dp =
845                 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
846
847         mutex_lock(&intel_dp->psr.lock);
848         /* If delayed work is pending, it is not idle */
849         if (delayed_work_pending(&intel_dp->psr.dc3co_work))
850                 goto unlock;
851
852         tgl_psr2_disable_dc3co(intel_dp);
853 unlock:
854         mutex_unlock(&intel_dp->psr.lock);
855 }
856
857 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
858 {
859         if (!intel_dp->psr.dc3co_exitline)
860                 return;
861
862         cancel_delayed_work(&intel_dp->psr.dc3co_work);
863         /* Before PSR2 exit disallow dc3co*/
864         tgl_psr2_disable_dc3co(intel_dp);
865 }
866
867 static bool
868 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
869                               struct intel_crtc_state *crtc_state)
870 {
871         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
872         enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
873         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
874         enum port port = dig_port->base.port;
875
876         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
877                 return pipe <= PIPE_B && port <= PORT_B;
878         else
879                 return pipe == PIPE_A && port == PORT_A;
880 }
881
882 static void
883 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
884                                   struct intel_crtc_state *crtc_state)
885 {
886         const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
887         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
888         struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
889         u32 exit_scanlines;
890
891         /*
892          * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
893          * disable DC3CO until the changed dc3co activating/deactivating sequence
894          * is applied. B.Specs:49196
895          */
896         return;
897
898         /*
899          * DMC's DC3CO exit mechanism has an issue with Selective Fecth
900          * TODO: when the issue is addressed, this restriction should be removed.
901          */
902         if (crtc_state->enable_psr2_sel_fetch)
903                 return;
904
905         if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
906                 return;
907
908         if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
909                 return;
910
911         /* Wa_16011303918:adl-p */
912         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
913                 return;
914
915         /*
916          * DC3CO Exit time 200us B.Spec 49196
917          * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
918          */
919         exit_scanlines =
920                 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
921
922         if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
923                 return;
924
925         crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
926 }
927
928 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
929                                               struct intel_crtc_state *crtc_state)
930 {
931         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
932
933         if (!dev_priv->params.enable_psr2_sel_fetch &&
934             intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
935                 drm_dbg_kms(&dev_priv->drm,
936                             "PSR2 sel fetch not enabled, disabled by parameter\n");
937                 return false;
938         }
939
940         if (crtc_state->uapi.async_flip) {
941                 drm_dbg_kms(&dev_priv->drm,
942                             "PSR2 sel fetch not enabled, async flip enabled\n");
943                 return false;
944         }
945
946         return crtc_state->enable_psr2_sel_fetch = true;
947 }
948
949 static bool psr2_granularity_check(struct intel_dp *intel_dp,
950                                    struct intel_crtc_state *crtc_state)
951 {
952         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
953         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
954         const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
955         const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
956         u16 y_granularity = 0;
957
958         /* PSR2 HW only send full lines so we only need to validate the width */
959         if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
960                 return false;
961
962         if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
963                 return false;
964
965         /* HW tracking is only aligned to 4 lines */
966         if (!crtc_state->enable_psr2_sel_fetch)
967                 return intel_dp->psr.su_y_granularity == 4;
968
969         /*
970          * adl_p and mtl platforms have 1 line granularity.
971          * For other platforms with SW tracking we can adjust the y coordinates
972          * to match sink requirement if multiple of 4.
973          */
974         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
975                 y_granularity = intel_dp->psr.su_y_granularity;
976         else if (intel_dp->psr.su_y_granularity <= 2)
977                 y_granularity = 4;
978         else if ((intel_dp->psr.su_y_granularity % 4) == 0)
979                 y_granularity = intel_dp->psr.su_y_granularity;
980
981         if (y_granularity == 0 || crtc_vdisplay % y_granularity)
982                 return false;
983
984         if (crtc_state->dsc.compression_enable &&
985             vdsc_cfg->slice_height % y_granularity)
986                 return false;
987
988         crtc_state->su_y_granularity = y_granularity;
989         return true;
990 }
991
992 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
993                                                         struct intel_crtc_state *crtc_state)
994 {
995         const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
996         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
997         u32 hblank_total, hblank_ns, req_ns;
998
999         hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1000         hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1001
1002         /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1003         req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1004
1005         if ((hblank_ns - req_ns) > 100)
1006                 return true;
1007
1008         /* Not supported <13 / Wa_22012279113:adl-p */
1009         if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1010                 return false;
1011
1012         crtc_state->req_psr2_sdp_prior_scanline = true;
1013         return true;
1014 }
1015
1016 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1017                                      struct intel_crtc_state *crtc_state)
1018 {
1019         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1020         int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1021         u8 max_wake_lines;
1022
1023         if (DISPLAY_VER(i915) >= 12) {
1024                 io_wake_time = 42;
1025                 /*
1026                  * According to Bspec it's 42us, but based on testing
1027                  * it is not enough -> use 45 us.
1028                  */
1029                 fast_wake_time = 45;
1030                 max_wake_lines = 12;
1031         } else {
1032                 io_wake_time = 50;
1033                 fast_wake_time = 32;
1034                 max_wake_lines = 8;
1035         }
1036
1037         io_wake_lines = intel_usecs_to_scanlines(
1038                 &crtc_state->uapi.adjusted_mode, io_wake_time);
1039         fast_wake_lines = intel_usecs_to_scanlines(
1040                 &crtc_state->uapi.adjusted_mode, fast_wake_time);
1041
1042         if (io_wake_lines > max_wake_lines ||
1043             fast_wake_lines > max_wake_lines)
1044                 return false;
1045
1046         if (i915->params.psr_safest_params)
1047                 io_wake_lines = fast_wake_lines = max_wake_lines;
1048
1049         /* According to Bspec lower limit should be set as 7 lines. */
1050         intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1051         intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1052
1053         return true;
1054 }
1055
1056 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1057                                     struct intel_crtc_state *crtc_state)
1058 {
1059         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1060         int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1061         int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1062         int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1063
1064         if (!intel_dp->psr.sink_psr2_support)
1065                 return false;
1066
1067         /* JSL and EHL only supports eDP 1.3 */
1068         if (IS_JSL_EHL(dev_priv)) {
1069                 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1070                 return false;
1071         }
1072
1073         /* Wa_16011181250 */
1074         if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1075             IS_DG2(dev_priv)) {
1076                 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1077                 return false;
1078         }
1079
1080         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1081                 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1082                 return false;
1083         }
1084
1085         if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1086                 drm_dbg_kms(&dev_priv->drm,
1087                             "PSR2 not supported in transcoder %s\n",
1088                             transcoder_name(crtc_state->cpu_transcoder));
1089                 return false;
1090         }
1091
1092         if (!psr2_global_enabled(intel_dp)) {
1093                 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1094                 return false;
1095         }
1096
1097         /*
1098          * DSC and PSR2 cannot be enabled simultaneously. If a requested
1099          * resolution requires DSC to be enabled, priority is given to DSC
1100          * over PSR2.
1101          */
1102         if (crtc_state->dsc.compression_enable &&
1103             (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1104                 drm_dbg_kms(&dev_priv->drm,
1105                             "PSR2 cannot be enabled since DSC is enabled\n");
1106                 return false;
1107         }
1108
1109         if (crtc_state->crc_enabled) {
1110                 drm_dbg_kms(&dev_priv->drm,
1111                             "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1112                 return false;
1113         }
1114
1115         if (DISPLAY_VER(dev_priv) >= 12) {
1116                 psr_max_h = 5120;
1117                 psr_max_v = 3200;
1118                 max_bpp = 30;
1119         } else if (DISPLAY_VER(dev_priv) >= 10) {
1120                 psr_max_h = 4096;
1121                 psr_max_v = 2304;
1122                 max_bpp = 24;
1123         } else if (DISPLAY_VER(dev_priv) == 9) {
1124                 psr_max_h = 3640;
1125                 psr_max_v = 2304;
1126                 max_bpp = 24;
1127         }
1128
1129         if (crtc_state->pipe_bpp > max_bpp) {
1130                 drm_dbg_kms(&dev_priv->drm,
1131                             "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1132                             crtc_state->pipe_bpp, max_bpp);
1133                 return false;
1134         }
1135
1136         /* Wa_16011303918:adl-p */
1137         if (crtc_state->vrr.enable &&
1138             IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1139                 drm_dbg_kms(&dev_priv->drm,
1140                             "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1141                 return false;
1142         }
1143
1144         if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1145                 drm_dbg_kms(&dev_priv->drm,
1146                             "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1147                 return false;
1148         }
1149
1150         if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1151                 drm_dbg_kms(&dev_priv->drm,
1152                             "PSR2 not enabled, Unable to use long enough wake times\n");
1153                 return false;
1154         }
1155
1156         /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1157         if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1158             crtc_state->hw.adjusted_mode.crtc_vblank_start <
1159             psr2_block_count_lines(intel_dp)) {
1160                 drm_dbg_kms(&dev_priv->drm,
1161                             "PSR2 not enabled, too short vblank time\n");
1162                 return false;
1163         }
1164
1165         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1166                 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1167                     !HAS_PSR_HW_TRACKING(dev_priv)) {
1168                         drm_dbg_kms(&dev_priv->drm,
1169                                     "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1170                         return false;
1171                 }
1172         }
1173
1174         if (!psr2_granularity_check(intel_dp, crtc_state)) {
1175                 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1176                 goto unsupported;
1177         }
1178
1179         if (!crtc_state->enable_psr2_sel_fetch &&
1180             (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1181                 drm_dbg_kms(&dev_priv->drm,
1182                             "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1183                             crtc_hdisplay, crtc_vdisplay,
1184                             psr_max_h, psr_max_v);
1185                 goto unsupported;
1186         }
1187
1188         tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1189         return true;
1190
1191 unsupported:
1192         crtc_state->enable_psr2_sel_fetch = false;
1193         return false;
1194 }
1195
1196 void intel_psr_compute_config(struct intel_dp *intel_dp,
1197                               struct intel_crtc_state *crtc_state,
1198                               struct drm_connector_state *conn_state)
1199 {
1200         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1201         const struct drm_display_mode *adjusted_mode =
1202                 &crtc_state->hw.adjusted_mode;
1203         int psr_setup_time;
1204
1205         /*
1206          * Current PSR panels don't work reliably with VRR enabled
1207          * So if VRR is enabled, do not enable PSR.
1208          */
1209         if (crtc_state->vrr.enable)
1210                 return;
1211
1212         if (!CAN_PSR(intel_dp))
1213                 return;
1214
1215         if (!psr_global_enabled(intel_dp)) {
1216                 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1217                 return;
1218         }
1219
1220         if (intel_dp->psr.sink_not_reliable) {
1221                 drm_dbg_kms(&dev_priv->drm,
1222                             "PSR sink implementation is not reliable\n");
1223                 return;
1224         }
1225
1226         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1227                 drm_dbg_kms(&dev_priv->drm,
1228                             "PSR condition failed: Interlaced mode enabled\n");
1229                 return;
1230         }
1231
1232         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1233         if (psr_setup_time < 0) {
1234                 drm_dbg_kms(&dev_priv->drm,
1235                             "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1236                             intel_dp->psr_dpcd[1]);
1237                 return;
1238         }
1239
1240         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1241             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1242                 drm_dbg_kms(&dev_priv->drm,
1243                             "PSR condition failed: PSR setup time (%d us) too long\n",
1244                             psr_setup_time);
1245                 return;
1246         }
1247
1248         crtc_state->has_psr = true;
1249         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1250
1251         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1252         intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1253                                      &crtc_state->psr_vsc);
1254 }
1255
1256 void intel_psr_get_config(struct intel_encoder *encoder,
1257                           struct intel_crtc_state *pipe_config)
1258 {
1259         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1260         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1261         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1262         struct intel_dp *intel_dp;
1263         u32 val;
1264
1265         if (!dig_port)
1266                 return;
1267
1268         intel_dp = &dig_port->dp;
1269         if (!CAN_PSR(intel_dp))
1270                 return;
1271
1272         mutex_lock(&intel_dp->psr.lock);
1273         if (!intel_dp->psr.enabled)
1274                 goto unlock;
1275
1276         /*
1277          * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1278          * enabled/disabled because of frontbuffer tracking and others.
1279          */
1280         pipe_config->has_psr = true;
1281         pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1282         pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1283
1284         if (!intel_dp->psr.psr2_enabled)
1285                 goto unlock;
1286
1287         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1288                 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1289                 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1290                         pipe_config->enable_psr2_sel_fetch = true;
1291         }
1292
1293         if (DISPLAY_VER(dev_priv) >= 12) {
1294                 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1295                 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1296         }
1297 unlock:
1298         mutex_unlock(&intel_dp->psr.lock);
1299 }
1300
1301 static void intel_psr_activate(struct intel_dp *intel_dp)
1302 {
1303         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1304         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1305
1306         drm_WARN_ON(&dev_priv->drm,
1307                     transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1308                     intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1309
1310         drm_WARN_ON(&dev_priv->drm,
1311                     intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1312
1313         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1314
1315         lockdep_assert_held(&intel_dp->psr.lock);
1316
1317         /* psr1 and psr2 are mutually exclusive.*/
1318         if (intel_dp->psr.psr2_enabled)
1319                 hsw_activate_psr2(intel_dp);
1320         else
1321                 hsw_activate_psr1(intel_dp);
1322
1323         intel_dp->psr.active = true;
1324 }
1325
1326 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1327 {
1328         switch (intel_dp->psr.pipe) {
1329         case PIPE_A:
1330                 return LATENCY_REPORTING_REMOVED_PIPE_A;
1331         case PIPE_B:
1332                 return LATENCY_REPORTING_REMOVED_PIPE_B;
1333         case PIPE_C:
1334                 return LATENCY_REPORTING_REMOVED_PIPE_C;
1335         case PIPE_D:
1336                 return LATENCY_REPORTING_REMOVED_PIPE_D;
1337         default:
1338                 MISSING_CASE(intel_dp->psr.pipe);
1339                 return 0;
1340         }
1341 }
1342
1343 /*
1344  * Wa_16013835468
1345  * Wa_14015648006
1346  */
1347 static void wm_optimization_wa(struct intel_dp *intel_dp,
1348                                const struct intel_crtc_state *crtc_state)
1349 {
1350         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1351         bool set_wa_bit = false;
1352
1353         /* Wa_14015648006 */
1354         if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1355             IS_DISPLAY_VER(dev_priv, 11, 13))
1356                 set_wa_bit |= crtc_state->wm_level_disabled;
1357
1358         /* Wa_16013835468 */
1359         if (DISPLAY_VER(dev_priv) == 12)
1360                 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1361                         crtc_state->hw.adjusted_mode.crtc_vdisplay;
1362
1363         if (set_wa_bit)
1364                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1365                              0, wa_16013835468_bit_get(intel_dp));
1366         else
1367                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1368                              wa_16013835468_bit_get(intel_dp), 0);
1369 }
1370
1371 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1372                                     const struct intel_crtc_state *crtc_state)
1373 {
1374         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1376         u32 mask;
1377
1378         /*
1379          * Only HSW and BDW have PSR AUX registers that need to be setup.
1380          * SKL+ use hardcoded values PSR AUX transactions
1381          */
1382         if (DISPLAY_VER(dev_priv) < 9)
1383                 hsw_psr_setup_aux(intel_dp);
1384
1385         /*
1386          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1387          * mask LPSP to avoid dependency on other drivers that might block
1388          * runtime_pm besides preventing  other hw tracking issues now we
1389          * can rely on frontbuffer tracking.
1390          */
1391         mask = EDP_PSR_DEBUG_MASK_MEMUP |
1392                EDP_PSR_DEBUG_MASK_HPD |
1393                EDP_PSR_DEBUG_MASK_LPSP |
1394                EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1395
1396         if (DISPLAY_VER(dev_priv) < 11)
1397                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1398
1399         intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1400
1401         psr_irq_control(intel_dp);
1402
1403         /*
1404          * TODO: if future platforms supports DC3CO in more than one
1405          * transcoder, EXITLINE will need to be unset when disabling PSR
1406          */
1407         if (intel_dp->psr.dc3co_exitline)
1408                 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1409                              intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1410
1411         if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1412                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1413                              intel_dp->psr.psr2_sel_fetch_enabled ?
1414                              IGNORE_PSR2_HW_TRACKING : 0);
1415
1416         /*
1417          * Wa_16013835468
1418          * Wa_14015648006
1419          */
1420         wm_optimization_wa(intel_dp, crtc_state);
1421
1422         if (intel_dp->psr.psr2_enabled) {
1423                 if (DISPLAY_VER(dev_priv) == 9)
1424                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1425                                      PSR2_VSC_ENABLE_PROG_HEADER |
1426                                      PSR2_ADD_VERTICAL_LINE_COUNT);
1427
1428                 /*
1429                  * Wa_16014451276:adlp,mtl[a0,b0]
1430                  * All supported adlp panels have 1-based X granularity, this may
1431                  * cause issues if non-supported panels are used.
1432                  */
1433                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1434                         intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1435                                      ADLP_1_BASED_X_GRANULARITY);
1436                 else if (IS_ALDERLAKE_P(dev_priv))
1437                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1438                                      ADLP_1_BASED_X_GRANULARITY);
1439
1440                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1441                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1442                         intel_de_rmw(dev_priv,
1443                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1444                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1445                 else if (IS_ALDERLAKE_P(dev_priv))
1446                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1447                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1448         }
1449 }
1450
1451 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1452 {
1453         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1454         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1455         u32 val;
1456
1457         /*
1458          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1459          * will still keep the error set even after the reset done in the
1460          * irq_preinstall and irq_uninstall hooks.
1461          * And enabling in this situation cause the screen to freeze in the
1462          * first time that PSR HW tries to activate so lets keep PSR disabled
1463          * to avoid any rendering problems.
1464          */
1465         val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1466         val &= psr_irq_psr_error_bit_get(intel_dp);
1467         if (val) {
1468                 intel_dp->psr.sink_not_reliable = true;
1469                 drm_dbg_kms(&dev_priv->drm,
1470                             "PSR interruption error set, not enabling PSR\n");
1471                 return false;
1472         }
1473
1474         return true;
1475 }
1476
1477 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1478                                     const struct intel_crtc_state *crtc_state)
1479 {
1480         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1481         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1482         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1483         struct intel_encoder *encoder = &dig_port->base;
1484         u32 val;
1485
1486         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1487
1488         intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1489         intel_dp->psr.busy_frontbuffer_bits = 0;
1490         intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1491         intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1492         /* DC5/DC6 requires at least 6 idle frames */
1493         val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1494         intel_dp->psr.dc3co_exit_delay = val;
1495         intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1496         intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1497         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1498         intel_dp->psr.req_psr2_sdp_prior_scanline =
1499                 crtc_state->req_psr2_sdp_prior_scanline;
1500
1501         if (!psr_interrupt_error_check(intel_dp))
1502                 return;
1503
1504         drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1505                     intel_dp->psr.psr2_enabled ? "2" : "1");
1506         intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1507         intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1508         intel_psr_enable_sink(intel_dp);
1509         intel_psr_enable_source(intel_dp, crtc_state);
1510         intel_dp->psr.enabled = true;
1511         intel_dp->psr.paused = false;
1512
1513         intel_psr_activate(intel_dp);
1514 }
1515
1516 static void intel_psr_exit(struct intel_dp *intel_dp)
1517 {
1518         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1520         u32 val;
1521
1522         if (!intel_dp->psr.active) {
1523                 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1524                         val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1525                         drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1526                 }
1527
1528                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1529                 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1530
1531                 return;
1532         }
1533
1534         if (intel_dp->psr.psr2_enabled) {
1535                 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1536
1537                 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1538                                    EDP_PSR2_ENABLE, 0);
1539
1540                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1541         } else {
1542                 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1543                                    EDP_PSR_ENABLE, 0);
1544
1545                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1546         }
1547         intel_dp->psr.active = false;
1548 }
1549
1550 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1551 {
1552         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1553         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1554         i915_reg_t psr_status;
1555         u32 psr_status_mask;
1556
1557         if (intel_dp->psr.psr2_enabled) {
1558                 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1559                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1560         } else {
1561                 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1562                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1563         }
1564
1565         /* Wait till PSR is idle */
1566         if (intel_de_wait_for_clear(dev_priv, psr_status,
1567                                     psr_status_mask, 2000))
1568                 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1569 }
1570
1571 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1572 {
1573         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1575         enum phy phy = intel_port_to_phy(dev_priv,
1576                                          dp_to_dig_port(intel_dp)->base.port);
1577
1578         lockdep_assert_held(&intel_dp->psr.lock);
1579
1580         if (!intel_dp->psr.enabled)
1581                 return;
1582
1583         drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1584                     intel_dp->psr.psr2_enabled ? "2" : "1");
1585
1586         intel_psr_exit(intel_dp);
1587         intel_psr_wait_exit_locked(intel_dp);
1588
1589         /*
1590          * Wa_16013835468
1591          * Wa_14015648006
1592          */
1593         if (DISPLAY_VER(dev_priv) >= 11)
1594                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1595                              wa_16013835468_bit_get(intel_dp), 0);
1596
1597         if (intel_dp->psr.psr2_enabled) {
1598                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1599                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1600                         intel_de_rmw(dev_priv,
1601                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1602                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1603                 else if (IS_ALDERLAKE_P(dev_priv))
1604                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1605                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1606         }
1607
1608         intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1609
1610         /* Disable PSR on Sink */
1611         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1612
1613         if (intel_dp->psr.psr2_enabled)
1614                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1615
1616         intel_dp->psr.enabled = false;
1617         intel_dp->psr.psr2_enabled = false;
1618         intel_dp->psr.psr2_sel_fetch_enabled = false;
1619         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1620 }
1621
1622 /**
1623  * intel_psr_disable - Disable PSR
1624  * @intel_dp: Intel DP
1625  * @old_crtc_state: old CRTC state
1626  *
1627  * This function needs to be called before disabling pipe.
1628  */
1629 void intel_psr_disable(struct intel_dp *intel_dp,
1630                        const struct intel_crtc_state *old_crtc_state)
1631 {
1632         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1633
1634         if (!old_crtc_state->has_psr)
1635                 return;
1636
1637         if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1638                 return;
1639
1640         mutex_lock(&intel_dp->psr.lock);
1641
1642         intel_psr_disable_locked(intel_dp);
1643
1644         mutex_unlock(&intel_dp->psr.lock);
1645         cancel_work_sync(&intel_dp->psr.work);
1646         cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1647 }
1648
1649 /**
1650  * intel_psr_pause - Pause PSR
1651  * @intel_dp: Intel DP
1652  *
1653  * This function need to be called after enabling psr.
1654  */
1655 void intel_psr_pause(struct intel_dp *intel_dp)
1656 {
1657         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1658         struct intel_psr *psr = &intel_dp->psr;
1659
1660         if (!CAN_PSR(intel_dp))
1661                 return;
1662
1663         mutex_lock(&psr->lock);
1664
1665         if (!psr->enabled) {
1666                 mutex_unlock(&psr->lock);
1667                 return;
1668         }
1669
1670         /* If we ever hit this, we will need to add refcount to pause/resume */
1671         drm_WARN_ON(&dev_priv->drm, psr->paused);
1672
1673         intel_psr_exit(intel_dp);
1674         intel_psr_wait_exit_locked(intel_dp);
1675         psr->paused = true;
1676
1677         mutex_unlock(&psr->lock);
1678
1679         cancel_work_sync(&psr->work);
1680         cancel_delayed_work_sync(&psr->dc3co_work);
1681 }
1682
1683 /**
1684  * intel_psr_resume - Resume PSR
1685  * @intel_dp: Intel DP
1686  *
1687  * This function need to be called after pausing psr.
1688  */
1689 void intel_psr_resume(struct intel_dp *intel_dp)
1690 {
1691         struct intel_psr *psr = &intel_dp->psr;
1692
1693         if (!CAN_PSR(intel_dp))
1694                 return;
1695
1696         mutex_lock(&psr->lock);
1697
1698         if (!psr->paused)
1699                 goto unlock;
1700
1701         psr->paused = false;
1702         intel_psr_activate(intel_dp);
1703
1704 unlock:
1705         mutex_unlock(&psr->lock);
1706 }
1707
1708 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1709 {
1710         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1711                 PSR2_MAN_TRK_CTL_ENABLE;
1712 }
1713
1714 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1715 {
1716         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1717                ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1718                PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1719 }
1720
1721 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1722 {
1723         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1724                ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1725                PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1726 }
1727
1728 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1729 {
1730         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1731                ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1732                PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1733 }
1734
1735 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1736 {
1737         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1738         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1739
1740         if (intel_dp->psr.psr2_sel_fetch_enabled)
1741                 intel_de_write(dev_priv,
1742                                PSR2_MAN_TRK_CTL(cpu_transcoder),
1743                                man_trk_ctl_enable_bit_get(dev_priv) |
1744                                man_trk_ctl_partial_frame_bit_get(dev_priv) |
1745                                man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1746                                man_trk_ctl_continuos_full_frame(dev_priv));
1747
1748         /*
1749          * Display WA #0884: skl+
1750          * This documented WA for bxt can be safely applied
1751          * broadly so we can force HW tracking to exit PSR
1752          * instead of disabling and re-enabling.
1753          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1754          * but it makes more sense write to the current active
1755          * pipe.
1756          *
1757          * This workaround do not exist for platforms with display 10 or newer
1758          * but testing proved that it works for up display 13, for newer
1759          * than that testing will be needed.
1760          */
1761         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1762 }
1763
1764 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1765                                             const struct intel_crtc_state *crtc_state)
1766 {
1767         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1768         enum pipe pipe = plane->pipe;
1769
1770         if (!crtc_state->enable_psr2_sel_fetch)
1771                 return;
1772
1773         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1774 }
1775
1776 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1777                                             const struct intel_crtc_state *crtc_state,
1778                                             const struct intel_plane_state *plane_state)
1779 {
1780         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1781         enum pipe pipe = plane->pipe;
1782
1783         if (!crtc_state->enable_psr2_sel_fetch)
1784                 return;
1785
1786         if (plane->id == PLANE_CURSOR)
1787                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1788                                   plane_state->ctl);
1789         else
1790                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1791                                   PLANE_SEL_FETCH_CTL_ENABLE);
1792 }
1793
1794 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1795                                               const struct intel_crtc_state *crtc_state,
1796                                               const struct intel_plane_state *plane_state,
1797                                               int color_plane)
1798 {
1799         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1800         enum pipe pipe = plane->pipe;
1801         const struct drm_rect *clip;
1802         u32 val;
1803         int x, y;
1804
1805         if (!crtc_state->enable_psr2_sel_fetch)
1806                 return;
1807
1808         if (plane->id == PLANE_CURSOR)
1809                 return;
1810
1811         clip = &plane_state->psr2_sel_fetch_area;
1812
1813         val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1814         val |= plane_state->uapi.dst.x1;
1815         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1816
1817         x = plane_state->view.color_plane[color_plane].x;
1818
1819         /*
1820          * From Bspec: UV surface Start Y Position = half of Y plane Y
1821          * start position.
1822          */
1823         if (!color_plane)
1824                 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1825         else
1826                 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1827
1828         val = y << 16 | x;
1829
1830         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1831                           val);
1832
1833         /* Sizes are 0 based */
1834         val = (drm_rect_height(clip) - 1) << 16;
1835         val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1836         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1837 }
1838
1839 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1840 {
1841         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1842         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1843         struct intel_encoder *encoder;
1844
1845         if (!crtc_state->enable_psr2_sel_fetch)
1846                 return;
1847
1848         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1849                                              crtc_state->uapi.encoder_mask) {
1850                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1851
1852                 lockdep_assert_held(&intel_dp->psr.lock);
1853                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1854                         return;
1855                 break;
1856         }
1857
1858         intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1859                        crtc_state->psr2_man_track_ctl);
1860 }
1861
1862 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1863                                   struct drm_rect *clip, bool full_update)
1864 {
1865         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1866         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1867         u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1868
1869         /* SF partial frame enable has to be set even on full update */
1870         val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1871
1872         if (full_update) {
1873                 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1874                 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1875                 goto exit;
1876         }
1877
1878         if (clip->y1 == -1)
1879                 goto exit;
1880
1881         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1882                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1883                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1884         } else {
1885                 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1886
1887                 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1888                 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1889         }
1890 exit:
1891         crtc_state->psr2_man_track_ctl = val;
1892 }
1893
1894 static void clip_area_update(struct drm_rect *overlap_damage_area,
1895                              struct drm_rect *damage_area,
1896                              struct drm_rect *pipe_src)
1897 {
1898         if (!drm_rect_intersect(damage_area, pipe_src))
1899                 return;
1900
1901         if (overlap_damage_area->y1 == -1) {
1902                 overlap_damage_area->y1 = damage_area->y1;
1903                 overlap_damage_area->y2 = damage_area->y2;
1904                 return;
1905         }
1906
1907         if (damage_area->y1 < overlap_damage_area->y1)
1908                 overlap_damage_area->y1 = damage_area->y1;
1909
1910         if (damage_area->y2 > overlap_damage_area->y2)
1911                 overlap_damage_area->y2 = damage_area->y2;
1912 }
1913
1914 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1915                                                 struct drm_rect *pipe_clip)
1916 {
1917         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1918         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1919         u16 y_alignment;
1920
1921         /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1922         if (crtc_state->dsc.compression_enable &&
1923             (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1924                 y_alignment = vdsc_cfg->slice_height;
1925         else
1926                 y_alignment = crtc_state->su_y_granularity;
1927
1928         pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1929         if (pipe_clip->y2 % y_alignment)
1930                 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1931 }
1932
1933 /*
1934  * TODO: Not clear how to handle planes with negative position,
1935  * also planes are not updated if they have a negative X
1936  * position so for now doing a full update in this cases
1937  *
1938  * Plane scaling and rotation is not supported by selective fetch and both
1939  * properties can change without a modeset, so need to be check at every
1940  * atomic commit.
1941  */
1942 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1943 {
1944         if (plane_state->uapi.dst.y1 < 0 ||
1945             plane_state->uapi.dst.x1 < 0 ||
1946             plane_state->scaler_id >= 0 ||
1947             plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1948                 return false;
1949
1950         return true;
1951 }
1952
1953 /*
1954  * Check for pipe properties that is not supported by selective fetch.
1955  *
1956  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1957  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1958  * enabled and going to the full update path.
1959  */
1960 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1961 {
1962         if (crtc_state->scaler_state.scaler_id >= 0)
1963                 return false;
1964
1965         return true;
1966 }
1967
1968 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1969                                 struct intel_crtc *crtc)
1970 {
1971         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1972         struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1973         struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1974         struct intel_plane_state *new_plane_state, *old_plane_state;
1975         struct intel_plane *plane;
1976         bool full_update = false;
1977         int i, ret;
1978
1979         if (!crtc_state->enable_psr2_sel_fetch)
1980                 return 0;
1981
1982         if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1983                 full_update = true;
1984                 goto skip_sel_fetch_set_loop;
1985         }
1986
1987         /*
1988          * Calculate minimal selective fetch area of each plane and calculate
1989          * the pipe damaged area.
1990          * In the next loop the plane selective fetch area will actually be set
1991          * using whole pipe damaged area.
1992          */
1993         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1994                                              new_plane_state, i) {
1995                 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1996                                                       .x2 = INT_MAX };
1997
1998                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1999                         continue;
2000
2001                 if (!new_plane_state->uapi.visible &&
2002                     !old_plane_state->uapi.visible)
2003                         continue;
2004
2005                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2006                         full_update = true;
2007                         break;
2008                 }
2009
2010                 /*
2011                  * If visibility or plane moved, mark the whole plane area as
2012                  * damaged as it needs to be complete redraw in the new and old
2013                  * position.
2014                  */
2015                 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2016                     !drm_rect_equals(&new_plane_state->uapi.dst,
2017                                      &old_plane_state->uapi.dst)) {
2018                         if (old_plane_state->uapi.visible) {
2019                                 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2020                                 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2021                                 clip_area_update(&pipe_clip, &damaged_area,
2022                                                  &crtc_state->pipe_src);
2023                         }
2024
2025                         if (new_plane_state->uapi.visible) {
2026                                 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2027                                 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2028                                 clip_area_update(&pipe_clip, &damaged_area,
2029                                                  &crtc_state->pipe_src);
2030                         }
2031                         continue;
2032                 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2033                         /* If alpha changed mark the whole plane area as damaged */
2034                         damaged_area.y1 = new_plane_state->uapi.dst.y1;
2035                         damaged_area.y2 = new_plane_state->uapi.dst.y2;
2036                         clip_area_update(&pipe_clip, &damaged_area,
2037                                          &crtc_state->pipe_src);
2038                         continue;
2039                 }
2040
2041                 src = drm_plane_state_src(&new_plane_state->uapi);
2042                 drm_rect_fp_to_int(&src, &src);
2043
2044                 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2045                                                      &new_plane_state->uapi, &damaged_area))
2046                         continue;
2047
2048                 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2049                 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2050                 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2051                 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2052
2053                 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2054         }
2055
2056         /*
2057          * TODO: For now we are just using full update in case
2058          * selective fetch area calculation fails. To optimize this we
2059          * should identify cases where this happens and fix the area
2060          * calculation for those.
2061          */
2062         if (pipe_clip.y1 == -1) {
2063                 drm_info_once(&dev_priv->drm,
2064                               "Selective fetch area calculation failed in pipe %c\n",
2065                               pipe_name(crtc->pipe));
2066                 full_update = true;
2067         }
2068
2069         if (full_update)
2070                 goto skip_sel_fetch_set_loop;
2071
2072         /* Wa_14014971492 */
2073         if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2074              IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2075             crtc_state->splitter.enable)
2076                 pipe_clip.y1 = 0;
2077
2078         ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2079         if (ret)
2080                 return ret;
2081
2082         intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2083
2084         /*
2085          * Now that we have the pipe damaged area check if it intersect with
2086          * every plane, if it does set the plane selective fetch area.
2087          */
2088         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2089                                              new_plane_state, i) {
2090                 struct drm_rect *sel_fetch_area, inter;
2091                 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2092
2093                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2094                     !new_plane_state->uapi.visible)
2095                         continue;
2096
2097                 inter = pipe_clip;
2098                 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2099                         continue;
2100
2101                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2102                         full_update = true;
2103                         break;
2104                 }
2105
2106                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2107                 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2108                 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2109                 crtc_state->update_planes |= BIT(plane->id);
2110
2111                 /*
2112                  * Sel_fetch_area is calculated for UV plane. Use
2113                  * same area for Y plane as well.
2114                  */
2115                 if (linked) {
2116                         struct intel_plane_state *linked_new_plane_state;
2117                         struct drm_rect *linked_sel_fetch_area;
2118
2119                         linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2120                         if (IS_ERR(linked_new_plane_state))
2121                                 return PTR_ERR(linked_new_plane_state);
2122
2123                         linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2124                         linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2125                         linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2126                         crtc_state->update_planes |= BIT(linked->id);
2127                 }
2128         }
2129
2130 skip_sel_fetch_set_loop:
2131         psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2132         return 0;
2133 }
2134
2135 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2136                                 struct intel_crtc *crtc)
2137 {
2138         struct drm_i915_private *i915 = to_i915(state->base.dev);
2139         const struct intel_crtc_state *old_crtc_state =
2140                 intel_atomic_get_old_crtc_state(state, crtc);
2141         const struct intel_crtc_state *new_crtc_state =
2142                 intel_atomic_get_new_crtc_state(state, crtc);
2143         struct intel_encoder *encoder;
2144
2145         if (!HAS_PSR(i915))
2146                 return;
2147
2148         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2149                                              old_crtc_state->uapi.encoder_mask) {
2150                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2151                 struct intel_psr *psr = &intel_dp->psr;
2152                 bool needs_to_disable = false;
2153
2154                 mutex_lock(&psr->lock);
2155
2156                 /*
2157                  * Reasons to disable:
2158                  * - PSR disabled in new state
2159                  * - All planes will go inactive
2160                  * - Changing between PSR versions
2161                  * - Display WA #1136: skl, bxt
2162                  */
2163                 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2164                 needs_to_disable |= !new_crtc_state->has_psr;
2165                 needs_to_disable |= !new_crtc_state->active_planes;
2166                 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2167                 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2168                         new_crtc_state->wm_level_disabled;
2169
2170                 if (psr->enabled && needs_to_disable)
2171                         intel_psr_disable_locked(intel_dp);
2172                 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2173                         /* Wa_14015648006 */
2174                         wm_optimization_wa(intel_dp, new_crtc_state);
2175
2176                 mutex_unlock(&psr->lock);
2177         }
2178 }
2179
2180 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2181                                          const struct intel_crtc_state *crtc_state)
2182 {
2183         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2184         struct intel_encoder *encoder;
2185
2186         if (!crtc_state->has_psr)
2187                 return;
2188
2189         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2190                                              crtc_state->uapi.encoder_mask) {
2191                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2192                 struct intel_psr *psr = &intel_dp->psr;
2193                 bool keep_disabled = false;
2194
2195                 mutex_lock(&psr->lock);
2196
2197                 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2198
2199                 keep_disabled |= psr->sink_not_reliable;
2200                 keep_disabled |= !crtc_state->active_planes;
2201
2202                 /* Display WA #1136: skl, bxt */
2203                 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2204                         crtc_state->wm_level_disabled;
2205
2206                 if (!psr->enabled && !keep_disabled)
2207                         intel_psr_enable_locked(intel_dp, crtc_state);
2208                 else if (psr->enabled && !crtc_state->wm_level_disabled)
2209                         /* Wa_14015648006 */
2210                         wm_optimization_wa(intel_dp, crtc_state);
2211
2212                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2213                 if (crtc_state->crc_enabled && psr->enabled)
2214                         psr_force_hw_tracking_exit(intel_dp);
2215
2216                 mutex_unlock(&psr->lock);
2217         }
2218 }
2219
2220 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2221 {
2222         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2223         struct intel_crtc_state *crtc_state;
2224         struct intel_crtc *crtc;
2225         int i;
2226
2227         if (!HAS_PSR(dev_priv))
2228                 return;
2229
2230         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2231                 _intel_psr_post_plane_update(state, crtc_state);
2232 }
2233
2234 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2235 {
2236         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2237         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2238
2239         /*
2240          * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2241          * As all higher states has bit 4 of PSR2 state set we can just wait for
2242          * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2243          */
2244         return intel_de_wait_for_clear(dev_priv,
2245                                        EDP_PSR2_STATUS(cpu_transcoder),
2246                                        EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2247 }
2248
2249 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2250 {
2251         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2252         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2253
2254         /*
2255          * From bspec: Panel Self Refresh (BDW+)
2256          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2257          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2258          * defensive enough to cover everything.
2259          */
2260         return intel_de_wait_for_clear(dev_priv,
2261                                        psr_status_reg(dev_priv, cpu_transcoder),
2262                                        EDP_PSR_STATUS_STATE_MASK, 50);
2263 }
2264
2265 /**
2266  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2267  * @new_crtc_state: new CRTC state
2268  *
2269  * This function is expected to be called from pipe_update_start() where it is
2270  * not expected to race with PSR enable or disable.
2271  */
2272 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2273 {
2274         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2275         struct intel_encoder *encoder;
2276
2277         if (!new_crtc_state->has_psr)
2278                 return;
2279
2280         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2281                                              new_crtc_state->uapi.encoder_mask) {
2282                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2283                 int ret;
2284
2285                 lockdep_assert_held(&intel_dp->psr.lock);
2286
2287                 if (!intel_dp->psr.enabled)
2288                         continue;
2289
2290                 if (intel_dp->psr.psr2_enabled)
2291                         ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2292                 else
2293                         ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2294
2295                 if (ret)
2296                         drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2297         }
2298 }
2299
2300 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2301 {
2302         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2303         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2304         i915_reg_t reg;
2305         u32 mask;
2306         int err;
2307
2308         if (!intel_dp->psr.enabled)
2309                 return false;
2310
2311         if (intel_dp->psr.psr2_enabled) {
2312                 reg = EDP_PSR2_STATUS(cpu_transcoder);
2313                 mask = EDP_PSR2_STATUS_STATE_MASK;
2314         } else {
2315                 reg = psr_status_reg(dev_priv, cpu_transcoder);
2316                 mask = EDP_PSR_STATUS_STATE_MASK;
2317         }
2318
2319         mutex_unlock(&intel_dp->psr.lock);
2320
2321         err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2322         if (err)
2323                 drm_err(&dev_priv->drm,
2324                         "Timed out waiting for PSR Idle for re-enable\n");
2325
2326         /* After the unlocked wait, verify that PSR is still wanted! */
2327         mutex_lock(&intel_dp->psr.lock);
2328         return err == 0 && intel_dp->psr.enabled;
2329 }
2330
2331 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2332 {
2333         struct drm_connector_list_iter conn_iter;
2334         struct drm_modeset_acquire_ctx ctx;
2335         struct drm_atomic_state *state;
2336         struct drm_connector *conn;
2337         int err = 0;
2338
2339         state = drm_atomic_state_alloc(&dev_priv->drm);
2340         if (!state)
2341                 return -ENOMEM;
2342
2343         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2344
2345         state->acquire_ctx = &ctx;
2346         to_intel_atomic_state(state)->internal = true;
2347
2348 retry:
2349         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2350         drm_for_each_connector_iter(conn, &conn_iter) {
2351                 struct drm_connector_state *conn_state;
2352                 struct drm_crtc_state *crtc_state;
2353
2354                 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2355                         continue;
2356
2357                 conn_state = drm_atomic_get_connector_state(state, conn);
2358                 if (IS_ERR(conn_state)) {
2359                         err = PTR_ERR(conn_state);
2360                         break;
2361                 }
2362
2363                 if (!conn_state->crtc)
2364                         continue;
2365
2366                 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2367                 if (IS_ERR(crtc_state)) {
2368                         err = PTR_ERR(crtc_state);
2369                         break;
2370                 }
2371
2372                 /* Mark mode as changed to trigger a pipe->update() */
2373                 crtc_state->mode_changed = true;
2374         }
2375         drm_connector_list_iter_end(&conn_iter);
2376
2377         if (err == 0)
2378                 err = drm_atomic_commit(state);
2379
2380         if (err == -EDEADLK) {
2381                 drm_atomic_state_clear(state);
2382                 err = drm_modeset_backoff(&ctx);
2383                 if (!err)
2384                         goto retry;
2385         }
2386
2387         drm_modeset_drop_locks(&ctx);
2388         drm_modeset_acquire_fini(&ctx);
2389         drm_atomic_state_put(state);
2390
2391         return err;
2392 }
2393
2394 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2395 {
2396         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2397         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2398         u32 old_mode;
2399         int ret;
2400
2401         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2402             mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2403                 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2404                 return -EINVAL;
2405         }
2406
2407         ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2408         if (ret)
2409                 return ret;
2410
2411         old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2412         intel_dp->psr.debug = val;
2413
2414         /*
2415          * Do it right away if it's already enabled, otherwise it will be done
2416          * when enabling the source.
2417          */
2418         if (intel_dp->psr.enabled)
2419                 psr_irq_control(intel_dp);
2420
2421         mutex_unlock(&intel_dp->psr.lock);
2422
2423         if (old_mode != mode)
2424                 ret = intel_psr_fastset_force(dev_priv);
2425
2426         return ret;
2427 }
2428
2429 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2430 {
2431         struct intel_psr *psr = &intel_dp->psr;
2432
2433         intel_psr_disable_locked(intel_dp);
2434         psr->sink_not_reliable = true;
2435         /* let's make sure that sink is awaken */
2436         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2437 }
2438
2439 static void intel_psr_work(struct work_struct *work)
2440 {
2441         struct intel_dp *intel_dp =
2442                 container_of(work, typeof(*intel_dp), psr.work);
2443
2444         mutex_lock(&intel_dp->psr.lock);
2445
2446         if (!intel_dp->psr.enabled)
2447                 goto unlock;
2448
2449         if (READ_ONCE(intel_dp->psr.irq_aux_error))
2450                 intel_psr_handle_irq(intel_dp);
2451
2452         /*
2453          * We have to make sure PSR is ready for re-enable
2454          * otherwise it keeps disabled until next full enable/disable cycle.
2455          * PSR might take some time to get fully disabled
2456          * and be ready for re-enable.
2457          */
2458         if (!__psr_wait_for_idle_locked(intel_dp))
2459                 goto unlock;
2460
2461         /*
2462          * The delayed work can race with an invalidate hence we need to
2463          * recheck. Since psr_flush first clears this and then reschedules we
2464          * won't ever miss a flush when bailing out here.
2465          */
2466         if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2467                 goto unlock;
2468
2469         intel_psr_activate(intel_dp);
2470 unlock:
2471         mutex_unlock(&intel_dp->psr.lock);
2472 }
2473
2474 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2475 {
2476         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2477         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2478
2479         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2480                 u32 val;
2481
2482                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2483                         /* Send one update otherwise lag is observed in screen */
2484                         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2485                         return;
2486                 }
2487
2488                 val = man_trk_ctl_enable_bit_get(dev_priv) |
2489                       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2490                       man_trk_ctl_continuos_full_frame(dev_priv);
2491                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2492                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2493                 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2494         } else {
2495                 intel_psr_exit(intel_dp);
2496         }
2497 }
2498
2499 /**
2500  * intel_psr_invalidate - Invalidate PSR
2501  * @dev_priv: i915 device
2502  * @frontbuffer_bits: frontbuffer plane tracking bits
2503  * @origin: which operation caused the invalidate
2504  *
2505  * Since the hardware frontbuffer tracking has gaps we need to integrate
2506  * with the software frontbuffer tracking. This function gets called every
2507  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2508  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2509  *
2510  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2511  */
2512 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2513                           unsigned frontbuffer_bits, enum fb_op_origin origin)
2514 {
2515         struct intel_encoder *encoder;
2516
2517         if (origin == ORIGIN_FLIP)
2518                 return;
2519
2520         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2521                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2522                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2523
2524                 mutex_lock(&intel_dp->psr.lock);
2525                 if (!intel_dp->psr.enabled) {
2526                         mutex_unlock(&intel_dp->psr.lock);
2527                         continue;
2528                 }
2529
2530                 pipe_frontbuffer_bits &=
2531                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2532                 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2533
2534                 if (pipe_frontbuffer_bits)
2535                         _psr_invalidate_handle(intel_dp);
2536
2537                 mutex_unlock(&intel_dp->psr.lock);
2538         }
2539 }
2540 /*
2541  * When we will be completely rely on PSR2 S/W tracking in future,
2542  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2543  * event also therefore tgl_dc3co_flush_locked() require to be changed
2544  * accordingly in future.
2545  */
2546 static void
2547 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2548                        enum fb_op_origin origin)
2549 {
2550         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2551
2552         if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2553             !intel_dp->psr.active)
2554                 return;
2555
2556         /*
2557          * At every frontbuffer flush flip event modified delay of delayed work,
2558          * when delayed work schedules that means display has been idle.
2559          */
2560         if (!(frontbuffer_bits &
2561             INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2562                 return;
2563
2564         tgl_psr2_enable_dc3co(intel_dp);
2565         mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2566                          intel_dp->psr.dc3co_exit_delay);
2567 }
2568
2569 static void _psr_flush_handle(struct intel_dp *intel_dp)
2570 {
2571         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2572         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2573
2574         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2575                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2576                         /* can we turn CFF off? */
2577                         if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2578                                 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2579                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
2580                                         man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2581                                         man_trk_ctl_continuos_full_frame(dev_priv);
2582
2583                                 /*
2584                                  * Set psr2_sel_fetch_cff_enabled as false to allow selective
2585                                  * updates. Still keep cff bit enabled as we don't have proper
2586                                  * SU configuration in case update is sent for any reason after
2587                                  * sff bit gets cleared by the HW on next vblank.
2588                                  */
2589                                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2590                                                val);
2591                                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2592                                 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2593                         }
2594                 } else {
2595                         /*
2596                          * continuous full frame is disabled, only a single full
2597                          * frame is required
2598                          */
2599                         psr_force_hw_tracking_exit(intel_dp);
2600                 }
2601         } else {
2602                 psr_force_hw_tracking_exit(intel_dp);
2603
2604                 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2605                         queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2606         }
2607 }
2608
2609 /**
2610  * intel_psr_flush - Flush PSR
2611  * @dev_priv: i915 device
2612  * @frontbuffer_bits: frontbuffer plane tracking bits
2613  * @origin: which operation caused the flush
2614  *
2615  * Since the hardware frontbuffer tracking has gaps we need to integrate
2616  * with the software frontbuffer tracking. This function gets called every
2617  * time frontbuffer rendering has completed and flushed out to memory. PSR
2618  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2619  *
2620  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2621  */
2622 void intel_psr_flush(struct drm_i915_private *dev_priv,
2623                      unsigned frontbuffer_bits, enum fb_op_origin origin)
2624 {
2625         struct intel_encoder *encoder;
2626
2627         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2628                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2629                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2630
2631                 mutex_lock(&intel_dp->psr.lock);
2632                 if (!intel_dp->psr.enabled) {
2633                         mutex_unlock(&intel_dp->psr.lock);
2634                         continue;
2635                 }
2636
2637                 pipe_frontbuffer_bits &=
2638                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2639                 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2640
2641                 /*
2642                  * If the PSR is paused by an explicit intel_psr_paused() call,
2643                  * we have to ensure that the PSR is not activated until
2644                  * intel_psr_resume() is called.
2645                  */
2646                 if (intel_dp->psr.paused)
2647                         goto unlock;
2648
2649                 if (origin == ORIGIN_FLIP ||
2650                     (origin == ORIGIN_CURSOR_UPDATE &&
2651                      !intel_dp->psr.psr2_sel_fetch_enabled)) {
2652                         tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2653                         goto unlock;
2654                 }
2655
2656                 if (pipe_frontbuffer_bits == 0)
2657                         goto unlock;
2658
2659                 /* By definition flush = invalidate + flush */
2660                 _psr_flush_handle(intel_dp);
2661 unlock:
2662                 mutex_unlock(&intel_dp->psr.lock);
2663         }
2664 }
2665
2666 /**
2667  * intel_psr_init - Init basic PSR work and mutex.
2668  * @intel_dp: Intel DP
2669  *
2670  * This function is called after the initializing connector.
2671  * (the initializing of connector treats the handling of connector capabilities)
2672  * And it initializes basic PSR stuff for each DP Encoder.
2673  */
2674 void intel_psr_init(struct intel_dp *intel_dp)
2675 {
2676         struct intel_connector *connector = intel_dp->attached_connector;
2677         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2678         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2679
2680         if (!HAS_PSR(dev_priv))
2681                 return;
2682
2683         /*
2684          * HSW spec explicitly says PSR is tied to port A.
2685          * BDW+ platforms have a instance of PSR registers per transcoder but
2686          * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2687          * than eDP one.
2688          * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2689          * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2690          * But GEN12 supports a instance of PSR registers per transcoder.
2691          */
2692         if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2693                 drm_dbg_kms(&dev_priv->drm,
2694                             "PSR condition failed: Port not supported\n");
2695                 return;
2696         }
2697
2698         intel_dp->psr.source_support = true;
2699
2700         /* Set link_standby x link_off defaults */
2701         if (DISPLAY_VER(dev_priv) < 12)
2702                 /* For new platforms up to TGL let's respect VBT back again */
2703                 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2704
2705         INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2706         INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2707         mutex_init(&intel_dp->psr.lock);
2708 }
2709
2710 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2711                                            u8 *status, u8 *error_status)
2712 {
2713         struct drm_dp_aux *aux = &intel_dp->aux;
2714         int ret;
2715
2716         ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2717         if (ret != 1)
2718                 return ret;
2719
2720         ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2721         if (ret != 1)
2722                 return ret;
2723
2724         *status = *status & DP_PSR_SINK_STATE_MASK;
2725
2726         return 0;
2727 }
2728
2729 static void psr_alpm_check(struct intel_dp *intel_dp)
2730 {
2731         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2732         struct drm_dp_aux *aux = &intel_dp->aux;
2733         struct intel_psr *psr = &intel_dp->psr;
2734         u8 val;
2735         int r;
2736
2737         if (!psr->psr2_enabled)
2738                 return;
2739
2740         r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2741         if (r != 1) {
2742                 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2743                 return;
2744         }
2745
2746         if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2747                 intel_psr_disable_locked(intel_dp);
2748                 psr->sink_not_reliable = true;
2749                 drm_dbg_kms(&dev_priv->drm,
2750                             "ALPM lock timeout error, disabling PSR\n");
2751
2752                 /* Clearing error */
2753                 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2754         }
2755 }
2756
2757 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2758 {
2759         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2760         struct intel_psr *psr = &intel_dp->psr;
2761         u8 val;
2762         int r;
2763
2764         r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2765         if (r != 1) {
2766                 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2767                 return;
2768         }
2769
2770         if (val & DP_PSR_CAPS_CHANGE) {
2771                 intel_psr_disable_locked(intel_dp);
2772                 psr->sink_not_reliable = true;
2773                 drm_dbg_kms(&dev_priv->drm,
2774                             "Sink PSR capability changed, disabling PSR\n");
2775
2776                 /* Clearing it */
2777                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2778         }
2779 }
2780
2781 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2782 {
2783         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2784         struct intel_psr *psr = &intel_dp->psr;
2785         u8 status, error_status;
2786         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2787                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2788                           DP_PSR_LINK_CRC_ERROR;
2789
2790         if (!CAN_PSR(intel_dp))
2791                 return;
2792
2793         mutex_lock(&psr->lock);
2794
2795         if (!psr->enabled)
2796                 goto exit;
2797
2798         if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2799                 drm_err(&dev_priv->drm,
2800                         "Error reading PSR status or error status\n");
2801                 goto exit;
2802         }
2803
2804         if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2805                 intel_psr_disable_locked(intel_dp);
2806                 psr->sink_not_reliable = true;
2807         }
2808
2809         if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2810                 drm_dbg_kms(&dev_priv->drm,
2811                             "PSR sink internal error, disabling PSR\n");
2812         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2813                 drm_dbg_kms(&dev_priv->drm,
2814                             "PSR RFB storage error, disabling PSR\n");
2815         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2816                 drm_dbg_kms(&dev_priv->drm,
2817                             "PSR VSC SDP uncorrectable error, disabling PSR\n");
2818         if (error_status & DP_PSR_LINK_CRC_ERROR)
2819                 drm_dbg_kms(&dev_priv->drm,
2820                             "PSR Link CRC error, disabling PSR\n");
2821
2822         if (error_status & ~errors)
2823                 drm_err(&dev_priv->drm,
2824                         "PSR_ERROR_STATUS unhandled errors %x\n",
2825                         error_status & ~errors);
2826         /* clear status register */
2827         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2828
2829         psr_alpm_check(intel_dp);
2830         psr_capability_changed_check(intel_dp);
2831
2832 exit:
2833         mutex_unlock(&psr->lock);
2834 }
2835
2836 bool intel_psr_enabled(struct intel_dp *intel_dp)
2837 {
2838         bool ret;
2839
2840         if (!CAN_PSR(intel_dp))
2841                 return false;
2842
2843         mutex_lock(&intel_dp->psr.lock);
2844         ret = intel_dp->psr.enabled;
2845         mutex_unlock(&intel_dp->psr.lock);
2846
2847         return ret;
2848 }
2849
2850 /**
2851  * intel_psr_lock - grab PSR lock
2852  * @crtc_state: the crtc state
2853  *
2854  * This is initially meant to be used by around CRTC update, when
2855  * vblank sensitive registers are updated and we need grab the lock
2856  * before it to avoid vblank evasion.
2857  */
2858 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2859 {
2860         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2861         struct intel_encoder *encoder;
2862
2863         if (!crtc_state->has_psr)
2864                 return;
2865
2866         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2867                                              crtc_state->uapi.encoder_mask) {
2868                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2869
2870                 mutex_lock(&intel_dp->psr.lock);
2871                 break;
2872         }
2873 }
2874
2875 /**
2876  * intel_psr_unlock - release PSR lock
2877  * @crtc_state: the crtc state
2878  *
2879  * Release the PSR lock that was held during pipe update.
2880  */
2881 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2882 {
2883         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2884         struct intel_encoder *encoder;
2885
2886         if (!crtc_state->has_psr)
2887                 return;
2888
2889         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2890                                              crtc_state->uapi.encoder_mask) {
2891                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2892
2893                 mutex_unlock(&intel_dp->psr.lock);
2894                 break;
2895         }
2896 }
2897
2898 static void
2899 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2900 {
2901         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2902         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2903         const char *status = "unknown";
2904         u32 val, status_val;
2905
2906         if (intel_dp->psr.psr2_enabled) {
2907                 static const char * const live_status[] = {
2908                         "IDLE",
2909                         "CAPTURE",
2910                         "CAPTURE_FS",
2911                         "SLEEP",
2912                         "BUFON_FW",
2913                         "ML_UP",
2914                         "SU_STANDBY",
2915                         "FAST_SLEEP",
2916                         "DEEP_SLEEP",
2917                         "BUF_ON",
2918                         "TG_ON"
2919                 };
2920                 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2921                 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2922                 if (status_val < ARRAY_SIZE(live_status))
2923                         status = live_status[status_val];
2924         } else {
2925                 static const char * const live_status[] = {
2926                         "IDLE",
2927                         "SRDONACK",
2928                         "SRDENT",
2929                         "BUFOFF",
2930                         "BUFON",
2931                         "AUXACK",
2932                         "SRDOFFACK",
2933                         "SRDENT_ON",
2934                 };
2935                 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2936                 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2937                 if (status_val < ARRAY_SIZE(live_status))
2938                         status = live_status[status_val];
2939         }
2940
2941         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2942 }
2943
2944 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2945 {
2946         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2947         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2948         struct intel_psr *psr = &intel_dp->psr;
2949         intel_wakeref_t wakeref;
2950         const char *status;
2951         bool enabled;
2952         u32 val;
2953
2954         seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2955         if (psr->sink_support)
2956                 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2957         seq_puts(m, "\n");
2958
2959         if (!psr->sink_support)
2960                 return 0;
2961
2962         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2963         mutex_lock(&psr->lock);
2964
2965         if (psr->enabled)
2966                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2967         else
2968                 status = "disabled";
2969         seq_printf(m, "PSR mode: %s\n", status);
2970
2971         if (!psr->enabled) {
2972                 seq_printf(m, "PSR sink not reliable: %s\n",
2973                            str_yes_no(psr->sink_not_reliable));
2974
2975                 goto unlock;
2976         }
2977
2978         if (psr->psr2_enabled) {
2979                 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2980                 enabled = val & EDP_PSR2_ENABLE;
2981         } else {
2982                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
2983                 enabled = val & EDP_PSR_ENABLE;
2984         }
2985         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2986                    str_enabled_disabled(enabled), val);
2987         psr_source_status(intel_dp, m);
2988         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2989                    psr->busy_frontbuffer_bits);
2990
2991         /*
2992          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2993          */
2994         val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
2995         seq_printf(m, "Performance counter: %u\n",
2996                    REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
2997
2998         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2999                 seq_printf(m, "Last attempted entry at: %lld\n",
3000                            psr->last_entry_attempt);
3001                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3002         }
3003
3004         if (psr->psr2_enabled) {
3005                 u32 su_frames_val[3];
3006                 int frame;
3007
3008                 /*
3009                  * Reading all 3 registers before hand to minimize crossing a
3010                  * frame boundary between register reads
3011                  */
3012                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3013                         val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3014                         su_frames_val[frame / 3] = val;
3015                 }
3016
3017                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3018
3019                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3020                         u32 su_blocks;
3021
3022                         su_blocks = su_frames_val[frame / 3] &
3023                                     PSR2_SU_STATUS_MASK(frame);
3024                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3025                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
3026                 }
3027
3028                 seq_printf(m, "PSR2 selective fetch: %s\n",
3029                            str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3030         }
3031
3032 unlock:
3033         mutex_unlock(&psr->lock);
3034         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3035
3036         return 0;
3037 }
3038
3039 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3040 {
3041         struct drm_i915_private *dev_priv = m->private;
3042         struct intel_dp *intel_dp = NULL;
3043         struct intel_encoder *encoder;
3044
3045         if (!HAS_PSR(dev_priv))
3046                 return -ENODEV;
3047
3048         /* Find the first EDP which supports PSR */
3049         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3050                 intel_dp = enc_to_intel_dp(encoder);
3051                 break;
3052         }
3053
3054         if (!intel_dp)
3055                 return -ENODEV;
3056
3057         return intel_psr_status(m, intel_dp);
3058 }
3059 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3060
3061 static int
3062 i915_edp_psr_debug_set(void *data, u64 val)
3063 {
3064         struct drm_i915_private *dev_priv = data;
3065         struct intel_encoder *encoder;
3066         intel_wakeref_t wakeref;
3067         int ret = -ENODEV;
3068
3069         if (!HAS_PSR(dev_priv))
3070                 return ret;
3071
3072         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3073                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3074
3075                 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3076
3077                 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3078
3079                 // TODO: split to each transcoder's PSR debug state
3080                 ret = intel_psr_debug_set(intel_dp, val);
3081
3082                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3083         }
3084
3085         return ret;
3086 }
3087
3088 static int
3089 i915_edp_psr_debug_get(void *data, u64 *val)
3090 {
3091         struct drm_i915_private *dev_priv = data;
3092         struct intel_encoder *encoder;
3093
3094         if (!HAS_PSR(dev_priv))
3095                 return -ENODEV;
3096
3097         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3098                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3099
3100                 // TODO: split to each transcoder's PSR debug state
3101                 *val = READ_ONCE(intel_dp->psr.debug);
3102                 return 0;
3103         }
3104
3105         return -ENODEV;
3106 }
3107
3108 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3109                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3110                         "%llu\n");
3111
3112 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3113 {
3114         struct drm_minor *minor = i915->drm.primary;
3115
3116         debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3117                             i915, &i915_edp_psr_debug_fops);
3118
3119         debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3120                             i915, &i915_edp_psr_status_fops);
3121 }
3122
3123 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3124 {
3125         struct intel_connector *connector = m->private;
3126         struct intel_dp *intel_dp = intel_attached_dp(connector);
3127         static const char * const sink_status[] = {
3128                 "inactive",
3129                 "transition to active, capture and display",
3130                 "active, display from RFB",
3131                 "active, capture and display on sink device timings",
3132                 "transition to inactive, capture and display, timing re-sync",
3133                 "reserved",
3134                 "reserved",
3135                 "sink internal error",
3136         };
3137         const char *str;
3138         int ret;
3139         u8 val;
3140
3141         if (!CAN_PSR(intel_dp)) {
3142                 seq_puts(m, "PSR Unsupported\n");
3143                 return -ENODEV;
3144         }
3145
3146         if (connector->base.status != connector_status_connected)
3147                 return -ENODEV;
3148
3149         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3150         if (ret != 1)
3151                 return ret < 0 ? ret : -EIO;
3152
3153         val &= DP_PSR_SINK_STATE_MASK;
3154         if (val < ARRAY_SIZE(sink_status))
3155                 str = sink_status[val];
3156         else
3157                 str = "unknown";
3158
3159         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3160
3161         return 0;
3162 }
3163 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3164
3165 static int i915_psr_status_show(struct seq_file *m, void *data)
3166 {
3167         struct intel_connector *connector = m->private;
3168         struct intel_dp *intel_dp = intel_attached_dp(connector);
3169
3170         return intel_psr_status(m, intel_dp);
3171 }
3172 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3173
3174 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3175 {
3176         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3177         struct dentry *root = connector->base.debugfs_entry;
3178
3179         if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3180                 return;
3181
3182         debugfs_create_file("i915_psr_sink_status", 0444, root,
3183                             connector, &i915_psr_sink_status_fops);
3184
3185         if (HAS_PSR(i915))
3186                 debugfs_create_file("i915_psr_status", 0444, root,
3187                                     connector, &i915_psr_status_fops);
3188 }