drm/i915/wm: switch to intel_de_* register accessors in display code
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / skl_watermark.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5
6 #include <drm/drm_blend.h>
7
8 #include "intel_atomic.h"
9 #include "intel_atomic_plane.h"
10 #include "intel_bw.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_power.h"
14 #include "intel_display_types.h"
15 #include "intel_fb.h"
16 #include "skl_watermark.h"
17
18 #include "i915_drv.h"
19 #include "i915_fixed.h"
20 #include "i915_reg.h"
21 #include "intel_pcode.h"
22 #include "intel_pm.h"
23
24 static void skl_sagv_disable(struct drm_i915_private *i915);
25
26 /* Stores plane specific WM parameters */
27 struct skl_wm_params {
28         bool x_tiled, y_tiled;
29         bool rc_surface;
30         bool is_planar;
31         u32 width;
32         u8 cpp;
33         u32 plane_pixel_rate;
34         u32 y_min_scanlines;
35         u32 plane_bytes_per_line;
36         uint_fixed_16_16_t plane_blocks_per_line;
37         uint_fixed_16_16_t y_tile_minimum;
38         u32 linetime_us;
39         u32 dbuf_block_size;
40 };
41
42 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
43 {
44         u8 enabled_slices = 0;
45         enum dbuf_slice slice;
46
47         for_each_dbuf_slice(i915, slice) {
48                 if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
49                         enabled_slices |= BIT(slice);
50         }
51
52         return enabled_slices;
53 }
54
55 /*
56  * FIXME: We still don't have the proper code detect if we need to apply the WA,
57  * so assume we'll always need it in order to avoid underruns.
58  */
59 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
60 {
61         return DISPLAY_VER(i915) == 9;
62 }
63
64 static bool
65 intel_has_sagv(struct drm_i915_private *i915)
66 {
67         return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
68                 i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
69 }
70
71 static u32
72 intel_sagv_block_time(struct drm_i915_private *i915)
73 {
74         if (DISPLAY_VER(i915) >= 14) {
75                 u32 val;
76
77                 val = intel_de_read(i915, MTL_LATENCY_SAGV);
78
79                 return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
80         } else if (DISPLAY_VER(i915) >= 12) {
81                 u32 val = 0;
82                 int ret;
83
84                 ret = snb_pcode_read(&i915->uncore,
85                                      GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
86                                      &val, NULL);
87                 if (ret) {
88                         drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
89                         return 0;
90                 }
91
92                 return val;
93         } else if (DISPLAY_VER(i915) == 11) {
94                 return 10;
95         } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
96                 return 30;
97         } else {
98                 return 0;
99         }
100 }
101
102 static void intel_sagv_init(struct drm_i915_private *i915)
103 {
104         if (!intel_has_sagv(i915))
105                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
106
107         /*
108          * Probe to see if we have working SAGV control.
109          * For icl+ this was already determined by intel_bw_init_hw().
110          */
111         if (DISPLAY_VER(i915) < 11)
112                 skl_sagv_disable(i915);
113
114         drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
115
116         i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
117
118         drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
119                     str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
120
121         /* avoid overflow when adding with wm0 latency/etc. */
122         if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
123                      "Excessive SAGV block time %u, ignoring\n",
124                      i915->display.sagv.block_time_us))
125                 i915->display.sagv.block_time_us = 0;
126
127         if (!intel_has_sagv(i915))
128                 i915->display.sagv.block_time_us = 0;
129 }
130
131 /*
132  * SAGV dynamically adjusts the system agent voltage and clock frequencies
133  * depending on power and performance requirements. The display engine access
134  * to system memory is blocked during the adjustment time. Because of the
135  * blocking time, having this enabled can cause full system hangs and/or pipe
136  * underruns if we don't meet all of the following requirements:
137  *
138  *  - <= 1 pipe enabled
139  *  - All planes can enable watermarks for latencies >= SAGV engine block time
140  *  - We're not using an interlaced display configuration
141  */
142 static void skl_sagv_enable(struct drm_i915_private *i915)
143 {
144         int ret;
145
146         if (!intel_has_sagv(i915))
147                 return;
148
149         if (i915->display.sagv.status == I915_SAGV_ENABLED)
150                 return;
151
152         drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
153         ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
154                               GEN9_SAGV_ENABLE);
155
156         /* We don't need to wait for SAGV when enabling */
157
158         /*
159          * Some skl systems, pre-release machines in particular,
160          * don't actually have SAGV.
161          */
162         if (IS_SKYLAKE(i915) && ret == -ENXIO) {
163                 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
164                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
165                 return;
166         } else if (ret < 0) {
167                 drm_err(&i915->drm, "Failed to enable SAGV\n");
168                 return;
169         }
170
171         i915->display.sagv.status = I915_SAGV_ENABLED;
172 }
173
174 static void skl_sagv_disable(struct drm_i915_private *i915)
175 {
176         int ret;
177
178         if (!intel_has_sagv(i915))
179                 return;
180
181         if (i915->display.sagv.status == I915_SAGV_DISABLED)
182                 return;
183
184         drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
185         /* bspec says to keep retrying for at least 1 ms */
186         ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
187                                 GEN9_SAGV_DISABLE,
188                                 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
189                                 1);
190         /*
191          * Some skl systems, pre-release machines in particular,
192          * don't actually have SAGV.
193          */
194         if (IS_SKYLAKE(i915) && ret == -ENXIO) {
195                 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
196                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
197                 return;
198         } else if (ret < 0) {
199                 drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
200                 return;
201         }
202
203         i915->display.sagv.status = I915_SAGV_DISABLED;
204 }
205
206 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
207 {
208         struct drm_i915_private *i915 = to_i915(state->base.dev);
209         const struct intel_bw_state *new_bw_state =
210                 intel_atomic_get_new_bw_state(state);
211
212         if (!new_bw_state)
213                 return;
214
215         if (!intel_can_enable_sagv(i915, new_bw_state))
216                 skl_sagv_disable(i915);
217 }
218
219 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
220 {
221         struct drm_i915_private *i915 = to_i915(state->base.dev);
222         const struct intel_bw_state *new_bw_state =
223                 intel_atomic_get_new_bw_state(state);
224
225         if (!new_bw_state)
226                 return;
227
228         if (intel_can_enable_sagv(i915, new_bw_state))
229                 skl_sagv_enable(i915);
230 }
231
232 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
233 {
234         struct drm_i915_private *i915 = to_i915(state->base.dev);
235         const struct intel_bw_state *old_bw_state =
236                 intel_atomic_get_old_bw_state(state);
237         const struct intel_bw_state *new_bw_state =
238                 intel_atomic_get_new_bw_state(state);
239         u16 old_mask, new_mask;
240
241         if (!new_bw_state)
242                 return;
243
244         old_mask = old_bw_state->qgv_points_mask;
245         new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
246
247         if (old_mask == new_mask)
248                 return;
249
250         WARN_ON(!new_bw_state->base.changed);
251
252         drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
253                     old_mask, new_mask);
254
255         /*
256          * Restrict required qgv points before updating the configuration.
257          * According to BSpec we can't mask and unmask qgv points at the same
258          * time. Also masking should be done before updating the configuration
259          * and unmasking afterwards.
260          */
261         icl_pcode_restrict_qgv_points(i915, new_mask);
262 }
263
264 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
265 {
266         struct drm_i915_private *i915 = to_i915(state->base.dev);
267         const struct intel_bw_state *old_bw_state =
268                 intel_atomic_get_old_bw_state(state);
269         const struct intel_bw_state *new_bw_state =
270                 intel_atomic_get_new_bw_state(state);
271         u16 old_mask, new_mask;
272
273         if (!new_bw_state)
274                 return;
275
276         old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
277         new_mask = new_bw_state->qgv_points_mask;
278
279         if (old_mask == new_mask)
280                 return;
281
282         WARN_ON(!new_bw_state->base.changed);
283
284         drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
285                     old_mask, new_mask);
286
287         /*
288          * Allow required qgv points after updating the configuration.
289          * According to BSpec we can't mask and unmask qgv points at the same
290          * time. Also masking should be done before updating the configuration
291          * and unmasking afterwards.
292          */
293         icl_pcode_restrict_qgv_points(i915, new_mask);
294 }
295
296 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
297 {
298         struct drm_i915_private *i915 = to_i915(state->base.dev);
299
300         /*
301          * Just return if we can't control SAGV or don't have it.
302          * This is different from situation when we have SAGV but just can't
303          * afford it due to DBuf limitation - in case if SAGV is completely
304          * disabled in a BIOS, we are not even allowed to send a PCode request,
305          * as it will throw an error. So have to check it here.
306          */
307         if (!intel_has_sagv(i915))
308                 return;
309
310         if (DISPLAY_VER(i915) >= 11)
311                 icl_sagv_pre_plane_update(state);
312         else
313                 skl_sagv_pre_plane_update(state);
314 }
315
316 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
317 {
318         struct drm_i915_private *i915 = to_i915(state->base.dev);
319
320         /*
321          * Just return if we can't control SAGV or don't have it.
322          * This is different from situation when we have SAGV but just can't
323          * afford it due to DBuf limitation - in case if SAGV is completely
324          * disabled in a BIOS, we are not even allowed to send a PCode request,
325          * as it will throw an error. So have to check it here.
326          */
327         if (!intel_has_sagv(i915))
328                 return;
329
330         if (DISPLAY_VER(i915) >= 11)
331                 icl_sagv_post_plane_update(state);
332         else
333                 skl_sagv_post_plane_update(state);
334 }
335
336 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
337 {
338         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
339         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
340         enum plane_id plane_id;
341         int max_level = INT_MAX;
342
343         if (!intel_has_sagv(i915))
344                 return false;
345
346         if (!crtc_state->hw.active)
347                 return true;
348
349         if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
350                 return false;
351
352         for_each_plane_id_on_crtc(crtc, plane_id) {
353                 const struct skl_plane_wm *wm =
354                         &crtc_state->wm.skl.optimal.planes[plane_id];
355                 int level;
356
357                 /* Skip this plane if it's not enabled */
358                 if (!wm->wm[0].enable)
359                         continue;
360
361                 /* Find the highest enabled wm level for this plane */
362                 for (level = ilk_wm_max_level(i915);
363                      !wm->wm[level].enable; --level)
364                      { }
365
366                 /* Highest common enabled wm level for all planes */
367                 max_level = min(level, max_level);
368         }
369
370         /* No enabled planes? */
371         if (max_level == INT_MAX)
372                 return true;
373
374         for_each_plane_id_on_crtc(crtc, plane_id) {
375                 const struct skl_plane_wm *wm =
376                         &crtc_state->wm.skl.optimal.planes[plane_id];
377
378                 /*
379                  * All enabled planes must have enabled a common wm level that
380                  * can tolerate memory latencies higher than sagv_block_time_us
381                  */
382                 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
383                         return false;
384         }
385
386         return true;
387 }
388
389 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
390 {
391         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
392         enum plane_id plane_id;
393
394         if (!crtc_state->hw.active)
395                 return true;
396
397         for_each_plane_id_on_crtc(crtc, plane_id) {
398                 const struct skl_plane_wm *wm =
399                         &crtc_state->wm.skl.optimal.planes[plane_id];
400
401                 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
402                         return false;
403         }
404
405         return true;
406 }
407
408 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
409 {
410         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
411         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
412
413         if (DISPLAY_VER(i915) >= 12)
414                 return tgl_crtc_can_enable_sagv(crtc_state);
415         else
416                 return skl_crtc_can_enable_sagv(crtc_state);
417 }
418
419 bool intel_can_enable_sagv(struct drm_i915_private *i915,
420                            const struct intel_bw_state *bw_state)
421 {
422         if (DISPLAY_VER(i915) < 11 &&
423             bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
424                 return false;
425
426         return bw_state->pipe_sagv_reject == 0;
427 }
428
429 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
430 {
431         struct drm_i915_private *i915 = to_i915(state->base.dev);
432         int ret;
433         struct intel_crtc *crtc;
434         struct intel_crtc_state *new_crtc_state;
435         struct intel_bw_state *new_bw_state = NULL;
436         const struct intel_bw_state *old_bw_state = NULL;
437         int i;
438
439         for_each_new_intel_crtc_in_state(state, crtc,
440                                          new_crtc_state, i) {
441                 new_bw_state = intel_atomic_get_bw_state(state);
442                 if (IS_ERR(new_bw_state))
443                         return PTR_ERR(new_bw_state);
444
445                 old_bw_state = intel_atomic_get_old_bw_state(state);
446
447                 if (intel_crtc_can_enable_sagv(new_crtc_state))
448                         new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
449                 else
450                         new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
451         }
452
453         if (!new_bw_state)
454                 return 0;
455
456         new_bw_state->active_pipes =
457                 intel_calc_active_pipes(state, old_bw_state->active_pipes);
458
459         if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
460                 ret = intel_atomic_lock_global_state(&new_bw_state->base);
461                 if (ret)
462                         return ret;
463         }
464
465         if (intel_can_enable_sagv(i915, new_bw_state) !=
466             intel_can_enable_sagv(i915, old_bw_state)) {
467                 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
468                 if (ret)
469                         return ret;
470         } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
471                 ret = intel_atomic_lock_global_state(&new_bw_state->base);
472                 if (ret)
473                         return ret;
474         }
475
476         for_each_new_intel_crtc_in_state(state, crtc,
477                                          new_crtc_state, i) {
478                 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
479
480                 /*
481                  * We store use_sagv_wm in the crtc state rather than relying on
482                  * that bw state since we have no convenient way to get at the
483                  * latter from the plane commit hooks (especially in the legacy
484                  * cursor case)
485                  */
486                 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
487                         DISPLAY_VER(i915) >= 12 &&
488                         intel_can_enable_sagv(i915, new_bw_state);
489         }
490
491         return 0;
492 }
493
494 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
495                               u16 start, u16 end)
496 {
497         entry->start = start;
498         entry->end = end;
499
500         return end;
501 }
502
503 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
504 {
505         return INTEL_INFO(i915)->display.dbuf.size /
506                 hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
507 }
508
509 static void
510 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
511                          struct skl_ddb_entry *ddb)
512 {
513         int slice_size = intel_dbuf_slice_size(i915);
514
515         if (!slice_mask) {
516                 ddb->start = 0;
517                 ddb->end = 0;
518                 return;
519         }
520
521         ddb->start = (ffs(slice_mask) - 1) * slice_size;
522         ddb->end = fls(slice_mask) * slice_size;
523
524         WARN_ON(ddb->start >= ddb->end);
525         WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
526 }
527
528 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
529 {
530         struct skl_ddb_entry ddb;
531
532         if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
533                 slice_mask = BIT(DBUF_S1);
534         else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
535                 slice_mask = BIT(DBUF_S3);
536
537         skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
538
539         return ddb.start;
540 }
541
542 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
543                             const struct skl_ddb_entry *entry)
544 {
545         int slice_size = intel_dbuf_slice_size(i915);
546         enum dbuf_slice start_slice, end_slice;
547         u8 slice_mask = 0;
548
549         if (!skl_ddb_entry_size(entry))
550                 return 0;
551
552         start_slice = entry->start / slice_size;
553         end_slice = (entry->end - 1) / slice_size;
554
555         /*
556          * Per plane DDB entry can in a really worst case be on multiple slices
557          * but single entry is anyway contigious.
558          */
559         while (start_slice <= end_slice) {
560                 slice_mask |= BIT(start_slice);
561                 start_slice++;
562         }
563
564         return slice_mask;
565 }
566
567 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
568 {
569         const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
570         int hdisplay, vdisplay;
571
572         if (!crtc_state->hw.active)
573                 return 0;
574
575         /*
576          * Watermark/ddb requirement highly depends upon width of the
577          * framebuffer, So instead of allocating DDB equally among pipes
578          * distribute DDB based on resolution/width of the display.
579          */
580         drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
581
582         return hdisplay;
583 }
584
585 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
586                                     enum pipe for_pipe,
587                                     unsigned int *weight_start,
588                                     unsigned int *weight_end,
589                                     unsigned int *weight_total)
590 {
591         struct drm_i915_private *i915 =
592                 to_i915(dbuf_state->base.state->base.dev);
593         enum pipe pipe;
594
595         *weight_start = 0;
596         *weight_end = 0;
597         *weight_total = 0;
598
599         for_each_pipe(i915, pipe) {
600                 int weight = dbuf_state->weight[pipe];
601
602                 /*
603                  * Do not account pipes using other slice sets
604                  * luckily as of current BSpec slice sets do not partially
605                  * intersect(pipes share either same one slice or same slice set
606                  * i.e no partial intersection), so it is enough to check for
607                  * equality for now.
608                  */
609                 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
610                         continue;
611
612                 *weight_total += weight;
613                 if (pipe < for_pipe) {
614                         *weight_start += weight;
615                         *weight_end += weight;
616                 } else if (pipe == for_pipe) {
617                         *weight_end += weight;
618                 }
619         }
620 }
621
622 static int
623 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
624 {
625         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
626         unsigned int weight_total, weight_start, weight_end;
627         const struct intel_dbuf_state *old_dbuf_state =
628                 intel_atomic_get_old_dbuf_state(state);
629         struct intel_dbuf_state *new_dbuf_state =
630                 intel_atomic_get_new_dbuf_state(state);
631         struct intel_crtc_state *crtc_state;
632         struct skl_ddb_entry ddb_slices;
633         enum pipe pipe = crtc->pipe;
634         unsigned int mbus_offset = 0;
635         u32 ddb_range_size;
636         u32 dbuf_slice_mask;
637         u32 start, end;
638         int ret;
639
640         if (new_dbuf_state->weight[pipe] == 0) {
641                 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
642                 goto out;
643         }
644
645         dbuf_slice_mask = new_dbuf_state->slices[pipe];
646
647         skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
648         mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
649         ddb_range_size = skl_ddb_entry_size(&ddb_slices);
650
651         intel_crtc_dbuf_weights(new_dbuf_state, pipe,
652                                 &weight_start, &weight_end, &weight_total);
653
654         start = ddb_range_size * weight_start / weight_total;
655         end = ddb_range_size * weight_end / weight_total;
656
657         skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
658                            ddb_slices.start - mbus_offset + start,
659                            ddb_slices.start - mbus_offset + end);
660
661 out:
662         if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
663             skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
664                                 &new_dbuf_state->ddb[pipe]))
665                 return 0;
666
667         ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
668         if (ret)
669                 return ret;
670
671         crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
672         if (IS_ERR(crtc_state))
673                 return PTR_ERR(crtc_state);
674
675         /*
676          * Used for checking overlaps, so we need absolute
677          * offsets instead of MBUS relative offsets.
678          */
679         crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
680         crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
681
682         drm_dbg_kms(&i915->drm,
683                     "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
684                     crtc->base.base.id, crtc->base.name,
685                     old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
686                     old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
687                     new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
688                     old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
689
690         return 0;
691 }
692
693 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
694                                  int width, const struct drm_format_info *format,
695                                  u64 modifier, unsigned int rotation,
696                                  u32 plane_pixel_rate, struct skl_wm_params *wp,
697                                  int color_plane);
698
699 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
700                                  struct intel_plane *plane,
701                                  int level,
702                                  unsigned int latency,
703                                  const struct skl_wm_params *wp,
704                                  const struct skl_wm_level *result_prev,
705                                  struct skl_wm_level *result /* out */);
706
707 static unsigned int
708 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
709                       int num_active)
710 {
711         struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
712         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
713         int level, max_level = ilk_wm_max_level(i915);
714         struct skl_wm_level wm = {};
715         int ret, min_ddb_alloc = 0;
716         struct skl_wm_params wp;
717
718         ret = skl_compute_wm_params(crtc_state, 256,
719                                     drm_format_info(DRM_FORMAT_ARGB8888),
720                                     DRM_FORMAT_MOD_LINEAR,
721                                     DRM_MODE_ROTATE_0,
722                                     crtc_state->pixel_rate, &wp, 0);
723         drm_WARN_ON(&i915->drm, ret);
724
725         for (level = 0; level <= max_level; level++) {
726                 unsigned int latency = i915->display.wm.skl_latency[level];
727
728                 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
729                 if (wm.min_ddb_alloc == U16_MAX)
730                         break;
731
732                 min_ddb_alloc = wm.min_ddb_alloc;
733         }
734
735         return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
736 }
737
738 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
739 {
740         skl_ddb_entry_init(entry,
741                            REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
742                            REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
743         if (entry->end)
744                 entry->end++;
745 }
746
747 static void
748 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
749                            const enum pipe pipe,
750                            const enum plane_id plane_id,
751                            struct skl_ddb_entry *ddb,
752                            struct skl_ddb_entry *ddb_y)
753 {
754         u32 val;
755
756         /* Cursor doesn't support NV12/planar, so no extra calculation needed */
757         if (plane_id == PLANE_CURSOR) {
758                 val = intel_de_read(i915, CUR_BUF_CFG(pipe));
759                 skl_ddb_entry_init_from_hw(ddb, val);
760                 return;
761         }
762
763         val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
764         skl_ddb_entry_init_from_hw(ddb, val);
765
766         if (DISPLAY_VER(i915) >= 11)
767                 return;
768
769         val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
770         skl_ddb_entry_init_from_hw(ddb_y, val);
771 }
772
773 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
774                                       struct skl_ddb_entry *ddb,
775                                       struct skl_ddb_entry *ddb_y)
776 {
777         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
778         enum intel_display_power_domain power_domain;
779         enum pipe pipe = crtc->pipe;
780         intel_wakeref_t wakeref;
781         enum plane_id plane_id;
782
783         power_domain = POWER_DOMAIN_PIPE(pipe);
784         wakeref = intel_display_power_get_if_enabled(i915, power_domain);
785         if (!wakeref)
786                 return;
787
788         for_each_plane_id_on_crtc(crtc, plane_id)
789                 skl_ddb_get_hw_plane_state(i915, pipe,
790                                            plane_id,
791                                            &ddb[plane_id],
792                                            &ddb_y[plane_id]);
793
794         intel_display_power_put(i915, power_domain, wakeref);
795 }
796
797 struct dbuf_slice_conf_entry {
798         u8 active_pipes;
799         u8 dbuf_mask[I915_MAX_PIPES];
800         bool join_mbus;
801 };
802
803 /*
804  * Table taken from Bspec 12716
805  * Pipes do have some preferred DBuf slice affinity,
806  * plus there are some hardcoded requirements on how
807  * those should be distributed for multipipe scenarios.
808  * For more DBuf slices algorithm can get even more messy
809  * and less readable, so decided to use a table almost
810  * as is from BSpec itself - that way it is at least easier
811  * to compare, change and check.
812  */
813 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
814 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
815 {
816         {
817                 .active_pipes = BIT(PIPE_A),
818                 .dbuf_mask = {
819                         [PIPE_A] = BIT(DBUF_S1),
820                 },
821         },
822         {
823                 .active_pipes = BIT(PIPE_B),
824                 .dbuf_mask = {
825                         [PIPE_B] = BIT(DBUF_S1),
826                 },
827         },
828         {
829                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
830                 .dbuf_mask = {
831                         [PIPE_A] = BIT(DBUF_S1),
832                         [PIPE_B] = BIT(DBUF_S2),
833                 },
834         },
835         {
836                 .active_pipes = BIT(PIPE_C),
837                 .dbuf_mask = {
838                         [PIPE_C] = BIT(DBUF_S2),
839                 },
840         },
841         {
842                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
843                 .dbuf_mask = {
844                         [PIPE_A] = BIT(DBUF_S1),
845                         [PIPE_C] = BIT(DBUF_S2),
846                 },
847         },
848         {
849                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
850                 .dbuf_mask = {
851                         [PIPE_B] = BIT(DBUF_S1),
852                         [PIPE_C] = BIT(DBUF_S2),
853                 },
854         },
855         {
856                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
857                 .dbuf_mask = {
858                         [PIPE_A] = BIT(DBUF_S1),
859                         [PIPE_B] = BIT(DBUF_S1),
860                         [PIPE_C] = BIT(DBUF_S2),
861                 },
862         },
863         {}
864 };
865
866 /*
867  * Table taken from Bspec 49255
868  * Pipes do have some preferred DBuf slice affinity,
869  * plus there are some hardcoded requirements on how
870  * those should be distributed for multipipe scenarios.
871  * For more DBuf slices algorithm can get even more messy
872  * and less readable, so decided to use a table almost
873  * as is from BSpec itself - that way it is at least easier
874  * to compare, change and check.
875  */
876 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
877 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
878 {
879         {
880                 .active_pipes = BIT(PIPE_A),
881                 .dbuf_mask = {
882                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
883                 },
884         },
885         {
886                 .active_pipes = BIT(PIPE_B),
887                 .dbuf_mask = {
888                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
889                 },
890         },
891         {
892                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
893                 .dbuf_mask = {
894                         [PIPE_A] = BIT(DBUF_S2),
895                         [PIPE_B] = BIT(DBUF_S1),
896                 },
897         },
898         {
899                 .active_pipes = BIT(PIPE_C),
900                 .dbuf_mask = {
901                         [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
902                 },
903         },
904         {
905                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
906                 .dbuf_mask = {
907                         [PIPE_A] = BIT(DBUF_S1),
908                         [PIPE_C] = BIT(DBUF_S2),
909                 },
910         },
911         {
912                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
913                 .dbuf_mask = {
914                         [PIPE_B] = BIT(DBUF_S1),
915                         [PIPE_C] = BIT(DBUF_S2),
916                 },
917         },
918         {
919                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
920                 .dbuf_mask = {
921                         [PIPE_A] = BIT(DBUF_S1),
922                         [PIPE_B] = BIT(DBUF_S1),
923                         [PIPE_C] = BIT(DBUF_S2),
924                 },
925         },
926         {
927                 .active_pipes = BIT(PIPE_D),
928                 .dbuf_mask = {
929                         [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
930                 },
931         },
932         {
933                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
934                 .dbuf_mask = {
935                         [PIPE_A] = BIT(DBUF_S1),
936                         [PIPE_D] = BIT(DBUF_S2),
937                 },
938         },
939         {
940                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
941                 .dbuf_mask = {
942                         [PIPE_B] = BIT(DBUF_S1),
943                         [PIPE_D] = BIT(DBUF_S2),
944                 },
945         },
946         {
947                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
948                 .dbuf_mask = {
949                         [PIPE_A] = BIT(DBUF_S1),
950                         [PIPE_B] = BIT(DBUF_S1),
951                         [PIPE_D] = BIT(DBUF_S2),
952                 },
953         },
954         {
955                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
956                 .dbuf_mask = {
957                         [PIPE_C] = BIT(DBUF_S1),
958                         [PIPE_D] = BIT(DBUF_S2),
959                 },
960         },
961         {
962                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
963                 .dbuf_mask = {
964                         [PIPE_A] = BIT(DBUF_S1),
965                         [PIPE_C] = BIT(DBUF_S2),
966                         [PIPE_D] = BIT(DBUF_S2),
967                 },
968         },
969         {
970                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
971                 .dbuf_mask = {
972                         [PIPE_B] = BIT(DBUF_S1),
973                         [PIPE_C] = BIT(DBUF_S2),
974                         [PIPE_D] = BIT(DBUF_S2),
975                 },
976         },
977         {
978                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
979                 .dbuf_mask = {
980                         [PIPE_A] = BIT(DBUF_S1),
981                         [PIPE_B] = BIT(DBUF_S1),
982                         [PIPE_C] = BIT(DBUF_S2),
983                         [PIPE_D] = BIT(DBUF_S2),
984                 },
985         },
986         {}
987 };
988
989 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
990         {
991                 .active_pipes = BIT(PIPE_A),
992                 .dbuf_mask = {
993                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
994                 },
995         },
996         {
997                 .active_pipes = BIT(PIPE_B),
998                 .dbuf_mask = {
999                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1000                 },
1001         },
1002         {
1003                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1004                 .dbuf_mask = {
1005                         [PIPE_A] = BIT(DBUF_S1),
1006                         [PIPE_B] = BIT(DBUF_S2),
1007                 },
1008         },
1009         {
1010                 .active_pipes = BIT(PIPE_C),
1011                 .dbuf_mask = {
1012                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1013                 },
1014         },
1015         {
1016                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1017                 .dbuf_mask = {
1018                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1019                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1020                 },
1021         },
1022         {
1023                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1024                 .dbuf_mask = {
1025                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1026                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1027                 },
1028         },
1029         {
1030                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1031                 .dbuf_mask = {
1032                         [PIPE_A] = BIT(DBUF_S1),
1033                         [PIPE_B] = BIT(DBUF_S2),
1034                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1035                 },
1036         },
1037         {
1038                 .active_pipes = BIT(PIPE_D),
1039                 .dbuf_mask = {
1040                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1041                 },
1042         },
1043         {
1044                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1045                 .dbuf_mask = {
1046                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1047                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1048                 },
1049         },
1050         {
1051                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1052                 .dbuf_mask = {
1053                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1054                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1055                 },
1056         },
1057         {
1058                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1059                 .dbuf_mask = {
1060                         [PIPE_A] = BIT(DBUF_S1),
1061                         [PIPE_B] = BIT(DBUF_S2),
1062                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1063                 },
1064         },
1065         {
1066                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1067                 .dbuf_mask = {
1068                         [PIPE_C] = BIT(DBUF_S3),
1069                         [PIPE_D] = BIT(DBUF_S4),
1070                 },
1071         },
1072         {
1073                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1074                 .dbuf_mask = {
1075                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1076                         [PIPE_C] = BIT(DBUF_S3),
1077                         [PIPE_D] = BIT(DBUF_S4),
1078                 },
1079         },
1080         {
1081                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1082                 .dbuf_mask = {
1083                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1084                         [PIPE_C] = BIT(DBUF_S3),
1085                         [PIPE_D] = BIT(DBUF_S4),
1086                 },
1087         },
1088         {
1089                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1090                 .dbuf_mask = {
1091                         [PIPE_A] = BIT(DBUF_S1),
1092                         [PIPE_B] = BIT(DBUF_S2),
1093                         [PIPE_C] = BIT(DBUF_S3),
1094                         [PIPE_D] = BIT(DBUF_S4),
1095                 },
1096         },
1097         {}
1098 };
1099
1100 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1101         /*
1102          * Keep the join_mbus cases first so check_mbus_joined()
1103          * will prefer them over the !join_mbus cases.
1104          */
1105         {
1106                 .active_pipes = BIT(PIPE_A),
1107                 .dbuf_mask = {
1108                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1109                 },
1110                 .join_mbus = true,
1111         },
1112         {
1113                 .active_pipes = BIT(PIPE_B),
1114                 .dbuf_mask = {
1115                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1116                 },
1117                 .join_mbus = true,
1118         },
1119         {
1120                 .active_pipes = BIT(PIPE_A),
1121                 .dbuf_mask = {
1122                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1123                 },
1124                 .join_mbus = false,
1125         },
1126         {
1127                 .active_pipes = BIT(PIPE_B),
1128                 .dbuf_mask = {
1129                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1130                 },
1131                 .join_mbus = false,
1132         },
1133         {
1134                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1135                 .dbuf_mask = {
1136                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1137                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1138                 },
1139         },
1140         {
1141                 .active_pipes = BIT(PIPE_C),
1142                 .dbuf_mask = {
1143                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1144                 },
1145         },
1146         {
1147                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1148                 .dbuf_mask = {
1149                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1150                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1151                 },
1152         },
1153         {
1154                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1155                 .dbuf_mask = {
1156                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1157                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1158                 },
1159         },
1160         {
1161                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1162                 .dbuf_mask = {
1163                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1164                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1165                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1166                 },
1167         },
1168         {
1169                 .active_pipes = BIT(PIPE_D),
1170                 .dbuf_mask = {
1171                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1172                 },
1173         },
1174         {
1175                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1176                 .dbuf_mask = {
1177                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1178                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1179                 },
1180         },
1181         {
1182                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1183                 .dbuf_mask = {
1184                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1185                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1186                 },
1187         },
1188         {
1189                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1190                 .dbuf_mask = {
1191                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1192                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1193                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1194                 },
1195         },
1196         {
1197                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1198                 .dbuf_mask = {
1199                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1200                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1201                 },
1202         },
1203         {
1204                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1205                 .dbuf_mask = {
1206                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1207                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1208                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1209                 },
1210         },
1211         {
1212                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1213                 .dbuf_mask = {
1214                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1215                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1216                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1217                 },
1218         },
1219         {
1220                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1221                 .dbuf_mask = {
1222                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1223                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1224                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1225                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1226                 },
1227         },
1228         {}
1229
1230 };
1231
1232 static bool check_mbus_joined(u8 active_pipes,
1233                               const struct dbuf_slice_conf_entry *dbuf_slices)
1234 {
1235         int i;
1236
1237         for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1238                 if (dbuf_slices[i].active_pipes == active_pipes)
1239                         return dbuf_slices[i].join_mbus;
1240         }
1241         return false;
1242 }
1243
1244 static bool adlp_check_mbus_joined(u8 active_pipes)
1245 {
1246         return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1247 }
1248
1249 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1250                               const struct dbuf_slice_conf_entry *dbuf_slices)
1251 {
1252         int i;
1253
1254         for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1255                 if (dbuf_slices[i].active_pipes == active_pipes &&
1256                     dbuf_slices[i].join_mbus == join_mbus)
1257                         return dbuf_slices[i].dbuf_mask[pipe];
1258         }
1259         return 0;
1260 }
1261
1262 /*
1263  * This function finds an entry with same enabled pipe configuration and
1264  * returns correspondent DBuf slice mask as stated in BSpec for particular
1265  * platform.
1266  */
1267 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1268 {
1269         /*
1270          * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1271          * required calculating "pipe ratio" in order to determine
1272          * if one or two slices can be used for single pipe configurations
1273          * as additional constraint to the existing table.
1274          * However based on recent info, it should be not "pipe ratio"
1275          * but rather ratio between pixel_rate and cdclk with additional
1276          * constants, so for now we are using only table until this is
1277          * clarified. Also this is the reason why crtc_state param is
1278          * still here - we will need it once those additional constraints
1279          * pop up.
1280          */
1281         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1282                                    icl_allowed_dbufs);
1283 }
1284
1285 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1286 {
1287         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1288                                    tgl_allowed_dbufs);
1289 }
1290
1291 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1292 {
1293         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1294                                    adlp_allowed_dbufs);
1295 }
1296
1297 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1298 {
1299         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1300                                    dg2_allowed_dbufs);
1301 }
1302
1303 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1304 {
1305         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1306         enum pipe pipe = crtc->pipe;
1307
1308         if (IS_DG2(i915))
1309                 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1310         else if (DISPLAY_VER(i915) >= 13)
1311                 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1312         else if (DISPLAY_VER(i915) == 12)
1313                 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1314         else if (DISPLAY_VER(i915) == 11)
1315                 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1316         /*
1317          * For anything else just return one slice yet.
1318          * Should be extended for other platforms.
1319          */
1320         return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1321 }
1322
1323 static bool
1324 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1325                      struct intel_plane *plane)
1326 {
1327         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1328
1329         return DISPLAY_VER(i915) >= 13 &&
1330                crtc_state->uapi.async_flip &&
1331                plane->async_flip;
1332 }
1333
1334 static u64
1335 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1336 {
1337         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1338         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1339         enum plane_id plane_id;
1340         u64 data_rate = 0;
1341
1342         for_each_plane_id_on_crtc(crtc, plane_id) {
1343                 if (plane_id == PLANE_CURSOR)
1344                         continue;
1345
1346                 data_rate += crtc_state->rel_data_rate[plane_id];
1347
1348                 if (DISPLAY_VER(i915) < 11)
1349                         data_rate += crtc_state->rel_data_rate_y[plane_id];
1350         }
1351
1352         return data_rate;
1353 }
1354
1355 static const struct skl_wm_level *
1356 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1357                    enum plane_id plane_id,
1358                    int level)
1359 {
1360         const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1361
1362         if (level == 0 && pipe_wm->use_sagv_wm)
1363                 return &wm->sagv.wm0;
1364
1365         return &wm->wm[level];
1366 }
1367
1368 static const struct skl_wm_level *
1369 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1370                    enum plane_id plane_id)
1371 {
1372         const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1373
1374         if (pipe_wm->use_sagv_wm)
1375                 return &wm->sagv.trans_wm;
1376
1377         return &wm->trans_wm;
1378 }
1379
1380 /*
1381  * We only disable the watermarks for each plane if
1382  * they exceed the ddb allocation of said plane. This
1383  * is done so that we don't end up touching cursor
1384  * watermarks needlessly when some other plane reduces
1385  * our max possible watermark level.
1386  *
1387  * Bspec has this to say about the PLANE_WM enable bit:
1388  * "All the watermarks at this level for all enabled
1389  *  planes must be enabled before the level will be used."
1390  * So this is actually safe to do.
1391  */
1392 static void
1393 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1394 {
1395         if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1396                 memset(wm, 0, sizeof(*wm));
1397 }
1398
1399 static void
1400 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1401                         const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1402 {
1403         if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1404             uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1405                 memset(wm, 0, sizeof(*wm));
1406                 memset(uv_wm, 0, sizeof(*uv_wm));
1407         }
1408 }
1409
1410 static bool icl_need_wm1_wa(struct drm_i915_private *i915,
1411                             enum plane_id plane_id)
1412 {
1413         /*
1414          * Wa_1408961008:icl, ehl
1415          * Wa_14012656716:tgl, adl
1416          * Underruns with WM1+ disabled
1417          */
1418         return DISPLAY_VER(i915) == 11 ||
1419                (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
1420 }
1421
1422 struct skl_plane_ddb_iter {
1423         u64 data_rate;
1424         u16 start, size;
1425 };
1426
1427 static void
1428 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1429                        struct skl_ddb_entry *ddb,
1430                        const struct skl_wm_level *wm,
1431                        u64 data_rate)
1432 {
1433         u16 size, extra = 0;
1434
1435         if (data_rate) {
1436                 extra = min_t(u16, iter->size,
1437                               DIV64_U64_ROUND_UP(iter->size * data_rate,
1438                                                  iter->data_rate));
1439                 iter->size -= extra;
1440                 iter->data_rate -= data_rate;
1441         }
1442
1443         /*
1444          * Keep ddb entry of all disabled planes explicitly zeroed
1445          * to avoid skl_ddb_add_affected_planes() adding them to
1446          * the state when other planes change their allocations.
1447          */
1448         size = wm->min_ddb_alloc + extra;
1449         if (size)
1450                 iter->start = skl_ddb_entry_init(ddb, iter->start,
1451                                                  iter->start + size);
1452 }
1453
1454 static int
1455 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1456                             struct intel_crtc *crtc)
1457 {
1458         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1459         struct intel_crtc_state *crtc_state =
1460                 intel_atomic_get_new_crtc_state(state, crtc);
1461         const struct intel_dbuf_state *dbuf_state =
1462                 intel_atomic_get_new_dbuf_state(state);
1463         const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1464         int num_active = hweight8(dbuf_state->active_pipes);
1465         struct skl_plane_ddb_iter iter;
1466         enum plane_id plane_id;
1467         u16 cursor_size;
1468         u32 blocks;
1469         int level;
1470
1471         /* Clear the partitioning for disabled planes. */
1472         memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1473         memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1474
1475         if (!crtc_state->hw.active)
1476                 return 0;
1477
1478         iter.start = alloc->start;
1479         iter.size = skl_ddb_entry_size(alloc);
1480         if (iter.size == 0)
1481                 return 0;
1482
1483         /* Allocate fixed number of blocks for cursor. */
1484         cursor_size = skl_cursor_allocation(crtc_state, num_active);
1485         iter.size -= cursor_size;
1486         skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1487                            alloc->end - cursor_size, alloc->end);
1488
1489         iter.data_rate = skl_total_relative_data_rate(crtc_state);
1490
1491         /*
1492          * Find the highest watermark level for which we can satisfy the block
1493          * requirement of active planes.
1494          */
1495         for (level = ilk_wm_max_level(i915); level >= 0; level--) {
1496                 blocks = 0;
1497                 for_each_plane_id_on_crtc(crtc, plane_id) {
1498                         const struct skl_plane_wm *wm =
1499                                 &crtc_state->wm.skl.optimal.planes[plane_id];
1500
1501                         if (plane_id == PLANE_CURSOR) {
1502                                 const struct skl_ddb_entry *ddb =
1503                                         &crtc_state->wm.skl.plane_ddb[plane_id];
1504
1505                                 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1506                                         drm_WARN_ON(&i915->drm,
1507                                                     wm->wm[level].min_ddb_alloc != U16_MAX);
1508                                         blocks = U32_MAX;
1509                                         break;
1510                                 }
1511                                 continue;
1512                         }
1513
1514                         blocks += wm->wm[level].min_ddb_alloc;
1515                         blocks += wm->uv_wm[level].min_ddb_alloc;
1516                 }
1517
1518                 if (blocks <= iter.size) {
1519                         iter.size -= blocks;
1520                         break;
1521                 }
1522         }
1523
1524         if (level < 0) {
1525                 drm_dbg_kms(&i915->drm,
1526                             "Requested display configuration exceeds system DDB limitations");
1527                 drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1528                             blocks, iter.size);
1529                 return -EINVAL;
1530         }
1531
1532         /* avoid the WARN later when we don't allocate any extra DDB */
1533         if (iter.data_rate == 0)
1534                 iter.size = 0;
1535
1536         /*
1537          * Grant each plane the blocks it requires at the highest achievable
1538          * watermark level, plus an extra share of the leftover blocks
1539          * proportional to its relative data rate.
1540          */
1541         for_each_plane_id_on_crtc(crtc, plane_id) {
1542                 struct skl_ddb_entry *ddb =
1543                         &crtc_state->wm.skl.plane_ddb[plane_id];
1544                 struct skl_ddb_entry *ddb_y =
1545                         &crtc_state->wm.skl.plane_ddb_y[plane_id];
1546                 const struct skl_plane_wm *wm =
1547                         &crtc_state->wm.skl.optimal.planes[plane_id];
1548
1549                 if (plane_id == PLANE_CURSOR)
1550                         continue;
1551
1552                 if (DISPLAY_VER(i915) < 11 &&
1553                     crtc_state->nv12_planes & BIT(plane_id)) {
1554                         skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1555                                                crtc_state->rel_data_rate_y[plane_id]);
1556                         skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1557                                                crtc_state->rel_data_rate[plane_id]);
1558                 } else {
1559                         skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1560                                                crtc_state->rel_data_rate[plane_id]);
1561                 }
1562         }
1563         drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1564
1565         /*
1566          * When we calculated watermark values we didn't know how high
1567          * of a level we'd actually be able to hit, so we just marked
1568          * all levels as "enabled."  Go back now and disable the ones
1569          * that aren't actually possible.
1570          */
1571         for (level++; level <= ilk_wm_max_level(i915); level++) {
1572                 for_each_plane_id_on_crtc(crtc, plane_id) {
1573                         const struct skl_ddb_entry *ddb =
1574                                 &crtc_state->wm.skl.plane_ddb[plane_id];
1575                         const struct skl_ddb_entry *ddb_y =
1576                                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1577                         struct skl_plane_wm *wm =
1578                                 &crtc_state->wm.skl.optimal.planes[plane_id];
1579
1580                         if (DISPLAY_VER(i915) < 11 &&
1581                             crtc_state->nv12_planes & BIT(plane_id))
1582                                 skl_check_nv12_wm_level(&wm->wm[level],
1583                                                         &wm->uv_wm[level],
1584                                                         ddb_y, ddb);
1585                         else
1586                                 skl_check_wm_level(&wm->wm[level], ddb);
1587
1588                         if (icl_need_wm1_wa(i915, plane_id) &&
1589                             level == 1 && wm->wm[0].enable) {
1590                                 wm->wm[level].blocks = wm->wm[0].blocks;
1591                                 wm->wm[level].lines = wm->wm[0].lines;
1592                                 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
1593                         }
1594                 }
1595         }
1596
1597         /*
1598          * Go back and disable the transition and SAGV watermarks
1599          * if it turns out we don't have enough DDB blocks for them.
1600          */
1601         for_each_plane_id_on_crtc(crtc, plane_id) {
1602                 const struct skl_ddb_entry *ddb =
1603                         &crtc_state->wm.skl.plane_ddb[plane_id];
1604                 const struct skl_ddb_entry *ddb_y =
1605                         &crtc_state->wm.skl.plane_ddb_y[plane_id];
1606                 struct skl_plane_wm *wm =
1607                         &crtc_state->wm.skl.optimal.planes[plane_id];
1608
1609                 if (DISPLAY_VER(i915) < 11 &&
1610                     crtc_state->nv12_planes & BIT(plane_id)) {
1611                         skl_check_wm_level(&wm->trans_wm, ddb_y);
1612                 } else {
1613                         WARN_ON(skl_ddb_entry_size(ddb_y));
1614
1615                         skl_check_wm_level(&wm->trans_wm, ddb);
1616                 }
1617
1618                 skl_check_wm_level(&wm->sagv.wm0, ddb);
1619                 skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1620         }
1621
1622         return 0;
1623 }
1624
1625 /*
1626  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1627  * for the read latency) and cpp should always be <= 8, so that
1628  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1629  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1630  */
1631 static uint_fixed_16_16_t
1632 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1633                u8 cpp, u32 latency, u32 dbuf_block_size)
1634 {
1635         u32 wm_intermediate_val;
1636         uint_fixed_16_16_t ret;
1637
1638         if (latency == 0)
1639                 return FP_16_16_MAX;
1640
1641         wm_intermediate_val = latency * pixel_rate * cpp;
1642         ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1643
1644         if (DISPLAY_VER(i915) >= 10)
1645                 ret = add_fixed16_u32(ret, 1);
1646
1647         return ret;
1648 }
1649
1650 static uint_fixed_16_16_t
1651 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1652                uint_fixed_16_16_t plane_blocks_per_line)
1653 {
1654         u32 wm_intermediate_val;
1655         uint_fixed_16_16_t ret;
1656
1657         if (latency == 0)
1658                 return FP_16_16_MAX;
1659
1660         wm_intermediate_val = latency * pixel_rate;
1661         wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1662                                            pipe_htotal * 1000);
1663         ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1664         return ret;
1665 }
1666
1667 static uint_fixed_16_16_t
1668 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1669 {
1670         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1671         u32 pixel_rate;
1672         u32 crtc_htotal;
1673         uint_fixed_16_16_t linetime_us;
1674
1675         if (!crtc_state->hw.active)
1676                 return u32_to_fixed16(0);
1677
1678         pixel_rate = crtc_state->pixel_rate;
1679
1680         if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1681                 return u32_to_fixed16(0);
1682
1683         crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1684         linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1685
1686         return linetime_us;
1687 }
1688
1689 static int
1690 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1691                       int width, const struct drm_format_info *format,
1692                       u64 modifier, unsigned int rotation,
1693                       u32 plane_pixel_rate, struct skl_wm_params *wp,
1694                       int color_plane)
1695 {
1696         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1697         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1698         u32 interm_pbpl;
1699
1700         /* only planar format has two planes */
1701         if (color_plane == 1 &&
1702             !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1703                 drm_dbg_kms(&i915->drm,
1704                             "Non planar format have single plane\n");
1705                 return -EINVAL;
1706         }
1707
1708         wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1709         wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1710                 intel_fb_is_tiled_modifier(modifier);
1711         wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1712         wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1713
1714         wp->width = width;
1715         if (color_plane == 1 && wp->is_planar)
1716                 wp->width /= 2;
1717
1718         wp->cpp = format->cpp[color_plane];
1719         wp->plane_pixel_rate = plane_pixel_rate;
1720
1721         if (DISPLAY_VER(i915) >= 11 &&
1722             modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1723                 wp->dbuf_block_size = 256;
1724         else
1725                 wp->dbuf_block_size = 512;
1726
1727         if (drm_rotation_90_or_270(rotation)) {
1728                 switch (wp->cpp) {
1729                 case 1:
1730                         wp->y_min_scanlines = 16;
1731                         break;
1732                 case 2:
1733                         wp->y_min_scanlines = 8;
1734                         break;
1735                 case 4:
1736                         wp->y_min_scanlines = 4;
1737                         break;
1738                 default:
1739                         MISSING_CASE(wp->cpp);
1740                         return -EINVAL;
1741                 }
1742         } else {
1743                 wp->y_min_scanlines = 4;
1744         }
1745
1746         if (skl_needs_memory_bw_wa(i915))
1747                 wp->y_min_scanlines *= 2;
1748
1749         wp->plane_bytes_per_line = wp->width * wp->cpp;
1750         if (wp->y_tiled) {
1751                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1752                                            wp->y_min_scanlines,
1753                                            wp->dbuf_block_size);
1754
1755                 if (DISPLAY_VER(i915) >= 10)
1756                         interm_pbpl++;
1757
1758                 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1759                                                         wp->y_min_scanlines);
1760         } else {
1761                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1762                                            wp->dbuf_block_size);
1763
1764                 if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1765                         interm_pbpl++;
1766
1767                 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1768         }
1769
1770         wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1771                                              wp->plane_blocks_per_line);
1772
1773         wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1774
1775         return 0;
1776 }
1777
1778 static int
1779 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1780                             const struct intel_plane_state *plane_state,
1781                             struct skl_wm_params *wp, int color_plane)
1782 {
1783         const struct drm_framebuffer *fb = plane_state->hw.fb;
1784         int width;
1785
1786         /*
1787          * Src coordinates are already rotated by 270 degrees for
1788          * the 90/270 degree plane rotation cases (to match the
1789          * GTT mapping), hence no need to account for rotation here.
1790          */
1791         width = drm_rect_width(&plane_state->uapi.src) >> 16;
1792
1793         return skl_compute_wm_params(crtc_state, width,
1794                                      fb->format, fb->modifier,
1795                                      plane_state->hw.rotation,
1796                                      intel_plane_pixel_rate(crtc_state, plane_state),
1797                                      wp, color_plane);
1798 }
1799
1800 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1801 {
1802         if (DISPLAY_VER(i915) >= 10)
1803                 return true;
1804
1805         /* The number of lines are ignored for the level 0 watermark. */
1806         return level > 0;
1807 }
1808
1809 static int skl_wm_max_lines(struct drm_i915_private *i915)
1810 {
1811         if (DISPLAY_VER(i915) >= 13)
1812                 return 255;
1813         else
1814                 return 31;
1815 }
1816
1817 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1818                                  struct intel_plane *plane,
1819                                  int level,
1820                                  unsigned int latency,
1821                                  const struct skl_wm_params *wp,
1822                                  const struct skl_wm_level *result_prev,
1823                                  struct skl_wm_level *result /* out */)
1824 {
1825         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1826         uint_fixed_16_16_t method1, method2;
1827         uint_fixed_16_16_t selected_result;
1828         u32 blocks, lines, min_ddb_alloc = 0;
1829
1830         if (latency == 0 ||
1831             (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1832                 /* reject it */
1833                 result->min_ddb_alloc = U16_MAX;
1834                 return;
1835         }
1836
1837         /*
1838          * WaIncreaseLatencyIPCEnabled: kbl,cfl
1839          * Display WA #1141: kbl,cfl
1840          */
1841         if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
1842             skl_watermark_ipc_enabled(i915))
1843                 latency += 4;
1844
1845         if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
1846                 latency += 15;
1847
1848         method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1849                                  wp->cpp, latency, wp->dbuf_block_size);
1850         method2 = skl_wm_method2(wp->plane_pixel_rate,
1851                                  crtc_state->hw.pipe_mode.crtc_htotal,
1852                                  latency,
1853                                  wp->plane_blocks_per_line);
1854
1855         if (wp->y_tiled) {
1856                 selected_result = max_fixed16(method2, wp->y_tile_minimum);
1857         } else {
1858                 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1859                      wp->dbuf_block_size < 1) &&
1860                      (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1861                         selected_result = method2;
1862                 } else if (latency >= wp->linetime_us) {
1863                         if (DISPLAY_VER(i915) == 9)
1864                                 selected_result = min_fixed16(method1, method2);
1865                         else
1866                                 selected_result = method2;
1867                 } else {
1868                         selected_result = method1;
1869                 }
1870         }
1871
1872         blocks = fixed16_to_u32_round_up(selected_result) + 1;
1873         /*
1874          * Lets have blocks at minimum equivalent to plane_blocks_per_line
1875          * as there will be at minimum one line for lines configuration. This
1876          * is a work around for FIFO underruns observed with resolutions like
1877          * 4k 60 Hz in single channel DRAM configurations.
1878          *
1879          * As per the Bspec 49325, if the ddb allocation can hold at least
1880          * one plane_blocks_per_line, we should have selected method2 in
1881          * the above logic. Assuming that modern versions have enough dbuf
1882          * and method2 guarantees blocks equivalent to at least 1 line,
1883          * select the blocks as plane_blocks_per_line.
1884          *
1885          * TODO: Revisit the logic when we have better understanding on DRAM
1886          * channels' impact on the level 0 memory latency and the relevant
1887          * wm calculations.
1888          */
1889         if (skl_wm_has_lines(i915, level))
1890                 blocks = max(blocks,
1891                              fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1892         lines = div_round_up_fixed16(selected_result,
1893                                      wp->plane_blocks_per_line);
1894
1895         if (DISPLAY_VER(i915) == 9) {
1896                 /* Display WA #1125: skl,bxt,kbl */
1897                 if (level == 0 && wp->rc_surface)
1898                         blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1899
1900                 /* Display WA #1126: skl,bxt,kbl */
1901                 if (level >= 1 && level <= 7) {
1902                         if (wp->y_tiled) {
1903                                 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1904                                 lines += wp->y_min_scanlines;
1905                         } else {
1906                                 blocks++;
1907                         }
1908
1909                         /*
1910                          * Make sure result blocks for higher latency levels are
1911                          * at least as high as level below the current level.
1912                          * Assumption in DDB algorithm optimization for special
1913                          * cases. Also covers Display WA #1125 for RC.
1914                          */
1915                         if (result_prev->blocks > blocks)
1916                                 blocks = result_prev->blocks;
1917                 }
1918         }
1919
1920         if (DISPLAY_VER(i915) >= 11) {
1921                 if (wp->y_tiled) {
1922                         int extra_lines;
1923
1924                         if (lines % wp->y_min_scanlines == 0)
1925                                 extra_lines = wp->y_min_scanlines;
1926                         else
1927                                 extra_lines = wp->y_min_scanlines * 2 -
1928                                         lines % wp->y_min_scanlines;
1929
1930                         min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1931                                                                  wp->plane_blocks_per_line);
1932                 } else {
1933                         min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1934                 }
1935         }
1936
1937         if (!skl_wm_has_lines(i915, level))
1938                 lines = 0;
1939
1940         if (lines > skl_wm_max_lines(i915)) {
1941                 /* reject it */
1942                 result->min_ddb_alloc = U16_MAX;
1943                 return;
1944         }
1945
1946         /*
1947          * If lines is valid, assume we can use this watermark level
1948          * for now.  We'll come back and disable it after we calculate the
1949          * DDB allocation if it turns out we don't actually have enough
1950          * blocks to satisfy it.
1951          */
1952         result->blocks = blocks;
1953         result->lines = lines;
1954         /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1955         result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1956         result->enable = true;
1957
1958         if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1959                 result->can_sagv = latency >= i915->display.sagv.block_time_us;
1960 }
1961
1962 static void
1963 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
1964                       struct intel_plane *plane,
1965                       const struct skl_wm_params *wm_params,
1966                       struct skl_wm_level *levels)
1967 {
1968         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1969         int level, max_level = ilk_wm_max_level(i915);
1970         struct skl_wm_level *result_prev = &levels[0];
1971
1972         for (level = 0; level <= max_level; level++) {
1973                 struct skl_wm_level *result = &levels[level];
1974                 unsigned int latency = i915->display.wm.skl_latency[level];
1975
1976                 skl_compute_plane_wm(crtc_state, plane, level, latency,
1977                                      wm_params, result_prev, result);
1978
1979                 result_prev = result;
1980         }
1981 }
1982
1983 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
1984                                 struct intel_plane *plane,
1985                                 const struct skl_wm_params *wm_params,
1986                                 struct skl_plane_wm *plane_wm)
1987 {
1988         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1989         struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
1990         struct skl_wm_level *levels = plane_wm->wm;
1991         unsigned int latency = 0;
1992
1993         if (i915->display.sagv.block_time_us)
1994                 latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
1995
1996         skl_compute_plane_wm(crtc_state, plane, 0, latency,
1997                              wm_params, &levels[0],
1998                              sagv_wm);
1999 }
2000
2001 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2002                                       struct skl_wm_level *trans_wm,
2003                                       const struct skl_wm_level *wm0,
2004                                       const struct skl_wm_params *wp)
2005 {
2006         u16 trans_min, trans_amount, trans_y_tile_min;
2007         u16 wm0_blocks, trans_offset, blocks;
2008
2009         /* Transition WM don't make any sense if ipc is disabled */
2010         if (!skl_watermark_ipc_enabled(i915))
2011                 return;
2012
2013         /*
2014          * WaDisableTWM:skl,kbl,cfl,bxt
2015          * Transition WM are not recommended by HW team for GEN9
2016          */
2017         if (DISPLAY_VER(i915) == 9)
2018                 return;
2019
2020         if (DISPLAY_VER(i915) >= 11)
2021                 trans_min = 4;
2022         else
2023                 trans_min = 14;
2024
2025         /* Display WA #1140: glk,cnl */
2026         if (DISPLAY_VER(i915) == 10)
2027                 trans_amount = 0;
2028         else
2029                 trans_amount = 10; /* This is configurable amount */
2030
2031         trans_offset = trans_min + trans_amount;
2032
2033         /*
2034          * The spec asks for Selected Result Blocks for wm0 (the real value),
2035          * not Result Blocks (the integer value). Pay attention to the capital
2036          * letters. The value wm_l0->blocks is actually Result Blocks, but
2037          * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2038          * and since we later will have to get the ceiling of the sum in the
2039          * transition watermarks calculation, we can just pretend Selected
2040          * Result Blocks is Result Blocks minus 1 and it should work for the
2041          * current platforms.
2042          */
2043         wm0_blocks = wm0->blocks - 1;
2044
2045         if (wp->y_tiled) {
2046                 trans_y_tile_min =
2047                         (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2048                 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2049         } else {
2050                 blocks = wm0_blocks + trans_offset;
2051         }
2052         blocks++;
2053
2054         /*
2055          * Just assume we can enable the transition watermark.  After
2056          * computing the DDB we'll come back and disable it if that
2057          * assumption turns out to be false.
2058          */
2059         trans_wm->blocks = blocks;
2060         trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2061         trans_wm->enable = true;
2062 }
2063
2064 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2065                                      const struct intel_plane_state *plane_state,
2066                                      struct intel_plane *plane, int color_plane)
2067 {
2068         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2069         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2070         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2071         struct skl_wm_params wm_params;
2072         int ret;
2073
2074         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2075                                           &wm_params, color_plane);
2076         if (ret)
2077                 return ret;
2078
2079         skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2080
2081         skl_compute_transition_wm(i915, &wm->trans_wm,
2082                                   &wm->wm[0], &wm_params);
2083
2084         if (DISPLAY_VER(i915) >= 12) {
2085                 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2086
2087                 skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2088                                           &wm->sagv.wm0, &wm_params);
2089         }
2090
2091         return 0;
2092 }
2093
2094 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2095                                  const struct intel_plane_state *plane_state,
2096                                  struct intel_plane *plane)
2097 {
2098         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2099         struct skl_wm_params wm_params;
2100         int ret;
2101
2102         wm->is_planar = true;
2103
2104         /* uv plane watermarks must also be validated for NV12/Planar */
2105         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2106                                           &wm_params, 1);
2107         if (ret)
2108                 return ret;
2109
2110         skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2111
2112         return 0;
2113 }
2114
2115 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2116                               const struct intel_plane_state *plane_state)
2117 {
2118         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2119         enum plane_id plane_id = plane->id;
2120         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2121         const struct drm_framebuffer *fb = plane_state->hw.fb;
2122         int ret;
2123
2124         memset(wm, 0, sizeof(*wm));
2125
2126         if (!intel_wm_plane_visible(crtc_state, plane_state))
2127                 return 0;
2128
2129         ret = skl_build_plane_wm_single(crtc_state, plane_state,
2130                                         plane, 0);
2131         if (ret)
2132                 return ret;
2133
2134         if (fb->format->is_yuv && fb->format->num_planes > 1) {
2135                 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2136                                             plane);
2137                 if (ret)
2138                         return ret;
2139         }
2140
2141         return 0;
2142 }
2143
2144 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2145                               const struct intel_plane_state *plane_state)
2146 {
2147         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2148         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2149         enum plane_id plane_id = plane->id;
2150         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2151         int ret;
2152
2153         /* Watermarks calculated in master */
2154         if (plane_state->planar_slave)
2155                 return 0;
2156
2157         memset(wm, 0, sizeof(*wm));
2158
2159         if (plane_state->planar_linked_plane) {
2160                 const struct drm_framebuffer *fb = plane_state->hw.fb;
2161
2162                 drm_WARN_ON(&i915->drm,
2163                             !intel_wm_plane_visible(crtc_state, plane_state));
2164                 drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2165                             fb->format->num_planes == 1);
2166
2167                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2168                                                 plane_state->planar_linked_plane, 0);
2169                 if (ret)
2170                         return ret;
2171
2172                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2173                                                 plane, 1);
2174                 if (ret)
2175                         return ret;
2176         } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2177                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2178                                                 plane, 0);
2179                 if (ret)
2180                         return ret;
2181         }
2182
2183         return 0;
2184 }
2185
2186 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2187                              struct intel_crtc *crtc)
2188 {
2189         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2190         struct intel_crtc_state *crtc_state =
2191                 intel_atomic_get_new_crtc_state(state, crtc);
2192         const struct intel_plane_state *plane_state;
2193         struct intel_plane *plane;
2194         int ret, i;
2195
2196         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2197                 /*
2198                  * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2199                  * instead but we don't populate that correctly for NV12 Y
2200                  * planes so for now hack this.
2201                  */
2202                 if (plane->pipe != crtc->pipe)
2203                         continue;
2204
2205                 if (DISPLAY_VER(i915) >= 11)
2206                         ret = icl_build_plane_wm(crtc_state, plane_state);
2207                 else
2208                         ret = skl_build_plane_wm(crtc_state, plane_state);
2209                 if (ret)
2210                         return ret;
2211         }
2212
2213         crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2214
2215         return 0;
2216 }
2217
2218 static void skl_ddb_entry_write(struct drm_i915_private *i915,
2219                                 i915_reg_t reg,
2220                                 const struct skl_ddb_entry *entry)
2221 {
2222         if (entry->end)
2223                 intel_de_write_fw(i915, reg,
2224                                   PLANE_BUF_END(entry->end - 1) |
2225                                   PLANE_BUF_START(entry->start));
2226         else
2227                 intel_de_write_fw(i915, reg, 0);
2228 }
2229
2230 static void skl_write_wm_level(struct drm_i915_private *i915,
2231                                i915_reg_t reg,
2232                                const struct skl_wm_level *level)
2233 {
2234         u32 val = 0;
2235
2236         if (level->enable)
2237                 val |= PLANE_WM_EN;
2238         if (level->ignore_lines)
2239                 val |= PLANE_WM_IGNORE_LINES;
2240         val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
2241         val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
2242
2243         intel_de_write_fw(i915, reg, val);
2244 }
2245
2246 void skl_write_plane_wm(struct intel_plane *plane,
2247                         const struct intel_crtc_state *crtc_state)
2248 {
2249         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2250         int level, max_level = ilk_wm_max_level(i915);
2251         enum plane_id plane_id = plane->id;
2252         enum pipe pipe = plane->pipe;
2253         const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2254         const struct skl_ddb_entry *ddb =
2255                 &crtc_state->wm.skl.plane_ddb[plane_id];
2256         const struct skl_ddb_entry *ddb_y =
2257                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2258
2259         for (level = 0; level <= max_level; level++)
2260                 skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
2261                                    skl_plane_wm_level(pipe_wm, plane_id, level));
2262
2263         skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
2264                            skl_plane_trans_wm(pipe_wm, plane_id));
2265
2266         if (HAS_HW_SAGV_WM(i915)) {
2267                 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2268
2269                 skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
2270                                    &wm->sagv.wm0);
2271                 skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
2272                                    &wm->sagv.trans_wm);
2273         }
2274
2275         skl_ddb_entry_write(i915,
2276                             PLANE_BUF_CFG(pipe, plane_id), ddb);
2277
2278         if (DISPLAY_VER(i915) < 11)
2279                 skl_ddb_entry_write(i915,
2280                                     PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
2281 }
2282
2283 void skl_write_cursor_wm(struct intel_plane *plane,
2284                          const struct intel_crtc_state *crtc_state)
2285 {
2286         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2287         int level, max_level = ilk_wm_max_level(i915);
2288         enum plane_id plane_id = plane->id;
2289         enum pipe pipe = plane->pipe;
2290         const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2291         const struct skl_ddb_entry *ddb =
2292                 &crtc_state->wm.skl.plane_ddb[plane_id];
2293
2294         for (level = 0; level <= max_level; level++)
2295                 skl_write_wm_level(i915, CUR_WM(pipe, level),
2296                                    skl_plane_wm_level(pipe_wm, plane_id, level));
2297
2298         skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
2299                            skl_plane_trans_wm(pipe_wm, plane_id));
2300
2301         if (HAS_HW_SAGV_WM(i915)) {
2302                 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2303
2304                 skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
2305                                    &wm->sagv.wm0);
2306                 skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
2307                                    &wm->sagv.trans_wm);
2308         }
2309
2310         skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
2311 }
2312
2313 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2314                                 const struct skl_wm_level *l2)
2315 {
2316         return l1->enable == l2->enable &&
2317                 l1->ignore_lines == l2->ignore_lines &&
2318                 l1->lines == l2->lines &&
2319                 l1->blocks == l2->blocks;
2320 }
2321
2322 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2323                                 const struct skl_plane_wm *wm1,
2324                                 const struct skl_plane_wm *wm2)
2325 {
2326         int level, max_level = ilk_wm_max_level(i915);
2327
2328         for (level = 0; level <= max_level; level++) {
2329                 /*
2330                  * We don't check uv_wm as the hardware doesn't actually
2331                  * use it. It only gets used for calculating the required
2332                  * ddb allocation.
2333                  */
2334                 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2335                         return false;
2336         }
2337
2338         return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2339                 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2340                 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2341 }
2342
2343 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2344                                     const struct skl_ddb_entry *b)
2345 {
2346         return a->start < b->end && b->start < a->end;
2347 }
2348
2349 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2350                                 const struct skl_ddb_entry *b)
2351 {
2352         if (a->end && b->end) {
2353                 a->start = min(a->start, b->start);
2354                 a->end = max(a->end, b->end);
2355         } else if (b->end) {
2356                 a->start = b->start;
2357                 a->end = b->end;
2358         }
2359 }
2360
2361 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2362                                  const struct skl_ddb_entry *entries,
2363                                  int num_entries, int ignore_idx)
2364 {
2365         int i;
2366
2367         for (i = 0; i < num_entries; i++) {
2368                 if (i != ignore_idx &&
2369                     skl_ddb_entries_overlap(ddb, &entries[i]))
2370                         return true;
2371         }
2372
2373         return false;
2374 }
2375
2376 static int
2377 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
2378                             struct intel_crtc_state *new_crtc_state)
2379 {
2380         struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
2381         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2382         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2383         struct intel_plane *plane;
2384
2385         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2386                 struct intel_plane_state *plane_state;
2387                 enum plane_id plane_id = plane->id;
2388
2389                 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2390                                         &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2391                     skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2392                                         &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2393                         continue;
2394
2395                 plane_state = intel_atomic_get_plane_state(state, plane);
2396                 if (IS_ERR(plane_state))
2397                         return PTR_ERR(plane_state);
2398
2399                 new_crtc_state->update_planes |= BIT(plane_id);
2400         }
2401
2402         return 0;
2403 }
2404
2405 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2406 {
2407         struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2408         u8 enabled_slices;
2409         enum pipe pipe;
2410
2411         /*
2412          * FIXME: For now we always enable slice S1 as per
2413          * the Bspec display initialization sequence.
2414          */
2415         enabled_slices = BIT(DBUF_S1);
2416
2417         for_each_pipe(i915, pipe)
2418                 enabled_slices |= dbuf_state->slices[pipe];
2419
2420         return enabled_slices;
2421 }
2422
2423 static int
2424 skl_compute_ddb(struct intel_atomic_state *state)
2425 {
2426         struct drm_i915_private *i915 = to_i915(state->base.dev);
2427         const struct intel_dbuf_state *old_dbuf_state;
2428         struct intel_dbuf_state *new_dbuf_state = NULL;
2429         const struct intel_crtc_state *old_crtc_state;
2430         struct intel_crtc_state *new_crtc_state;
2431         struct intel_crtc *crtc;
2432         int ret, i;
2433
2434         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2435                 new_dbuf_state = intel_atomic_get_dbuf_state(state);
2436                 if (IS_ERR(new_dbuf_state))
2437                         return PTR_ERR(new_dbuf_state);
2438
2439                 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2440                 break;
2441         }
2442
2443         if (!new_dbuf_state)
2444                 return 0;
2445
2446         new_dbuf_state->active_pipes =
2447                 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2448
2449         if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2450                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2451                 if (ret)
2452                         return ret;
2453         }
2454
2455         if (HAS_MBUS_JOINING(i915))
2456                 new_dbuf_state->joined_mbus =
2457                         adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2458
2459         for_each_intel_crtc(&i915->drm, crtc) {
2460                 enum pipe pipe = crtc->pipe;
2461
2462                 new_dbuf_state->slices[pipe] =
2463                         skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2464                                                 new_dbuf_state->joined_mbus);
2465
2466                 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2467                         continue;
2468
2469                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2470                 if (ret)
2471                         return ret;
2472         }
2473
2474         new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2475
2476         if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2477             old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2478                 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2479                 if (ret)
2480                         return ret;
2481
2482                 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2483                         /* TODO: Implement vblank synchronized MBUS joining changes */
2484                         ret = intel_modeset_all_pipes(state, "MBUS joining change");
2485                         if (ret)
2486                                 return ret;
2487                 }
2488
2489                 drm_dbg_kms(&i915->drm,
2490                             "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2491                             old_dbuf_state->enabled_slices,
2492                             new_dbuf_state->enabled_slices,
2493                             INTEL_INFO(i915)->display.dbuf.slice_mask,
2494                             str_yes_no(old_dbuf_state->joined_mbus),
2495                             str_yes_no(new_dbuf_state->joined_mbus));
2496         }
2497
2498         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2499                 enum pipe pipe = crtc->pipe;
2500
2501                 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2502
2503                 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2504                         continue;
2505
2506                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2507                 if (ret)
2508                         return ret;
2509         }
2510
2511         for_each_intel_crtc(&i915->drm, crtc) {
2512                 ret = skl_crtc_allocate_ddb(state, crtc);
2513                 if (ret)
2514                         return ret;
2515         }
2516
2517         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2518                                             new_crtc_state, i) {
2519                 ret = skl_crtc_allocate_plane_ddb(state, crtc);
2520                 if (ret)
2521                         return ret;
2522
2523                 ret = skl_ddb_add_affected_planes(old_crtc_state,
2524                                                   new_crtc_state);
2525                 if (ret)
2526                         return ret;
2527         }
2528
2529         return 0;
2530 }
2531
2532 static char enast(bool enable)
2533 {
2534         return enable ? '*' : ' ';
2535 }
2536
2537 static void
2538 skl_print_wm_changes(struct intel_atomic_state *state)
2539 {
2540         struct drm_i915_private *i915 = to_i915(state->base.dev);
2541         const struct intel_crtc_state *old_crtc_state;
2542         const struct intel_crtc_state *new_crtc_state;
2543         struct intel_plane *plane;
2544         struct intel_crtc *crtc;
2545         int i;
2546
2547         if (!drm_debug_enabled(DRM_UT_KMS))
2548                 return;
2549
2550         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2551                                             new_crtc_state, i) {
2552                 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2553
2554                 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2555                 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2556
2557                 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2558                         enum plane_id plane_id = plane->id;
2559                         const struct skl_ddb_entry *old, *new;
2560
2561                         old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2562                         new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2563
2564                         if (skl_ddb_entry_equal(old, new))
2565                                 continue;
2566
2567                         drm_dbg_kms(&i915->drm,
2568                                     "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2569                                     plane->base.base.id, plane->base.name,
2570                                     old->start, old->end, new->start, new->end,
2571                                     skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2572                 }
2573
2574                 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2575                         enum plane_id plane_id = plane->id;
2576                         const struct skl_plane_wm *old_wm, *new_wm;
2577
2578                         old_wm = &old_pipe_wm->planes[plane_id];
2579                         new_wm = &new_pipe_wm->planes[plane_id];
2580
2581                         if (skl_plane_wm_equals(i915, old_wm, new_wm))
2582                                 continue;
2583
2584                         drm_dbg_kms(&i915->drm,
2585                                     "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2586                                     " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2587                                     plane->base.base.id, plane->base.name,
2588                                     enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2589                                     enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2590                                     enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2591                                     enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2592                                     enast(old_wm->trans_wm.enable),
2593                                     enast(old_wm->sagv.wm0.enable),
2594                                     enast(old_wm->sagv.trans_wm.enable),
2595                                     enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2596                                     enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2597                                     enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2598                                     enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2599                                     enast(new_wm->trans_wm.enable),
2600                                     enast(new_wm->sagv.wm0.enable),
2601                                     enast(new_wm->sagv.trans_wm.enable));
2602
2603                         drm_dbg_kms(&i915->drm,
2604                                     "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2605                                       " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2606                                     plane->base.base.id, plane->base.name,
2607                                     enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2608                                     enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2609                                     enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2610                                     enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2611                                     enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2612                                     enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2613                                     enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2614                                     enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2615                                     enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2616                                     enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2617                                     enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2618                                     enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2619                                     enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2620                                     enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2621                                     enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2622                                     enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2623                                     enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2624                                     enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2625                                     enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2626                                     enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2627                                     enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2628                                     enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2629
2630                         drm_dbg_kms(&i915->drm,
2631                                     "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2632                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2633                                     plane->base.base.id, plane->base.name,
2634                                     old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2635                                     old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2636                                     old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2637                                     old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2638                                     old_wm->trans_wm.blocks,
2639                                     old_wm->sagv.wm0.blocks,
2640                                     old_wm->sagv.trans_wm.blocks,
2641                                     new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2642                                     new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2643                                     new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2644                                     new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2645                                     new_wm->trans_wm.blocks,
2646                                     new_wm->sagv.wm0.blocks,
2647                                     new_wm->sagv.trans_wm.blocks);
2648
2649                         drm_dbg_kms(&i915->drm,
2650                                     "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2651                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2652                                     plane->base.base.id, plane->base.name,
2653                                     old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2654                                     old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2655                                     old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2656                                     old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2657                                     old_wm->trans_wm.min_ddb_alloc,
2658                                     old_wm->sagv.wm0.min_ddb_alloc,
2659                                     old_wm->sagv.trans_wm.min_ddb_alloc,
2660                                     new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2661                                     new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2662                                     new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2663                                     new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2664                                     new_wm->trans_wm.min_ddb_alloc,
2665                                     new_wm->sagv.wm0.min_ddb_alloc,
2666                                     new_wm->sagv.trans_wm.min_ddb_alloc);
2667                 }
2668         }
2669 }
2670
2671 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2672                                          const struct skl_pipe_wm *old_pipe_wm,
2673                                          const struct skl_pipe_wm *new_pipe_wm)
2674 {
2675         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2676         int level, max_level = ilk_wm_max_level(i915);
2677
2678         for (level = 0; level <= max_level; level++) {
2679                 /*
2680                  * We don't check uv_wm as the hardware doesn't actually
2681                  * use it. It only gets used for calculating the required
2682                  * ddb allocation.
2683                  */
2684                 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2685                                          skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2686                         return false;
2687         }
2688
2689         if (HAS_HW_SAGV_WM(i915)) {
2690                 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2691                 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2692
2693                 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2694                     !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2695                         return false;
2696         }
2697
2698         return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2699                                    skl_plane_trans_wm(new_pipe_wm, plane->id));
2700 }
2701
2702 /*
2703  * To make sure the cursor watermark registers are always consistent
2704  * with our computed state the following scenario needs special
2705  * treatment:
2706  *
2707  * 1. enable cursor
2708  * 2. move cursor entirely offscreen
2709  * 3. disable cursor
2710  *
2711  * Step 2. does call .disable_plane() but does not zero the watermarks
2712  * (since we consider an offscreen cursor still active for the purposes
2713  * of watermarks). Step 3. would not normally call .disable_plane()
2714  * because the actual plane visibility isn't changing, and we don't
2715  * deallocate the cursor ddb until the pipe gets disabled. So we must
2716  * force step 3. to call .disable_plane() to update the watermark
2717  * registers properly.
2718  *
2719  * Other planes do not suffer from this issues as their watermarks are
2720  * calculated based on the actual plane visibility. The only time this
2721  * can trigger for the other planes is during the initial readout as the
2722  * default value of the watermarks registers is not zero.
2723  */
2724 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2725                                       struct intel_crtc *crtc)
2726 {
2727         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2728         const struct intel_crtc_state *old_crtc_state =
2729                 intel_atomic_get_old_crtc_state(state, crtc);
2730         struct intel_crtc_state *new_crtc_state =
2731                 intel_atomic_get_new_crtc_state(state, crtc);
2732         struct intel_plane *plane;
2733
2734         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2735                 struct intel_plane_state *plane_state;
2736                 enum plane_id plane_id = plane->id;
2737
2738                 /*
2739                  * Force a full wm update for every plane on modeset.
2740                  * Required because the reset value of the wm registers
2741                  * is non-zero, whereas we want all disabled planes to
2742                  * have zero watermarks. So if we turn off the relevant
2743                  * power well the hardware state will go out of sync
2744                  * with the software state.
2745                  */
2746                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
2747                     skl_plane_selected_wm_equals(plane,
2748                                                  &old_crtc_state->wm.skl.optimal,
2749                                                  &new_crtc_state->wm.skl.optimal))
2750                         continue;
2751
2752                 plane_state = intel_atomic_get_plane_state(state, plane);
2753                 if (IS_ERR(plane_state))
2754                         return PTR_ERR(plane_state);
2755
2756                 new_crtc_state->update_planes |= BIT(plane_id);
2757         }
2758
2759         return 0;
2760 }
2761
2762 static int
2763 skl_compute_wm(struct intel_atomic_state *state)
2764 {
2765         struct intel_crtc *crtc;
2766         struct intel_crtc_state *new_crtc_state;
2767         int ret, i;
2768
2769         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2770                 ret = skl_build_pipe_wm(state, crtc);
2771                 if (ret)
2772                         return ret;
2773         }
2774
2775         ret = skl_compute_ddb(state);
2776         if (ret)
2777                 return ret;
2778
2779         ret = intel_compute_sagv_mask(state);
2780         if (ret)
2781                 return ret;
2782
2783         /*
2784          * skl_compute_ddb() will have adjusted the final watermarks
2785          * based on how much ddb is available. Now we can actually
2786          * check if the final watermarks changed.
2787          */
2788         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2789                 ret = skl_wm_add_affected_planes(state, crtc);
2790                 if (ret)
2791                         return ret;
2792         }
2793
2794         skl_print_wm_changes(state);
2795
2796         return 0;
2797 }
2798
2799 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2800 {
2801         level->enable = val & PLANE_WM_EN;
2802         level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2803         level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2804         level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2805 }
2806
2807 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2808                                      struct skl_pipe_wm *out)
2809 {
2810         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2811         enum pipe pipe = crtc->pipe;
2812         int level, max_level;
2813         enum plane_id plane_id;
2814         u32 val;
2815
2816         max_level = ilk_wm_max_level(i915);
2817
2818         for_each_plane_id_on_crtc(crtc, plane_id) {
2819                 struct skl_plane_wm *wm = &out->planes[plane_id];
2820
2821                 for (level = 0; level <= max_level; level++) {
2822                         if (plane_id != PLANE_CURSOR)
2823                                 val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
2824                         else
2825                                 val = intel_de_read(i915, CUR_WM(pipe, level));
2826
2827                         skl_wm_level_from_reg_val(val, &wm->wm[level]);
2828                 }
2829
2830                 if (plane_id != PLANE_CURSOR)
2831                         val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
2832                 else
2833                         val = intel_de_read(i915, CUR_WM_TRANS(pipe));
2834
2835                 skl_wm_level_from_reg_val(val, &wm->trans_wm);
2836
2837                 if (HAS_HW_SAGV_WM(i915)) {
2838                         if (plane_id != PLANE_CURSOR)
2839                                 val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
2840                         else
2841                                 val = intel_de_read(i915, CUR_WM_SAGV(pipe));
2842
2843                         skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
2844
2845                         if (plane_id != PLANE_CURSOR)
2846                                 val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
2847                         else
2848                                 val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
2849
2850                         skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
2851                 } else if (DISPLAY_VER(i915) >= 12) {
2852                         wm->sagv.wm0 = wm->wm[0];
2853                         wm->sagv.trans_wm = wm->trans_wm;
2854                 }
2855         }
2856 }
2857
2858 void skl_wm_get_hw_state(struct drm_i915_private *i915)
2859 {
2860         struct intel_dbuf_state *dbuf_state =
2861                 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2862         struct intel_crtc *crtc;
2863
2864         if (HAS_MBUS_JOINING(i915))
2865                 dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
2866
2867         for_each_intel_crtc(&i915->drm, crtc) {
2868                 struct intel_crtc_state *crtc_state =
2869                         to_intel_crtc_state(crtc->base.state);
2870                 enum pipe pipe = crtc->pipe;
2871                 unsigned int mbus_offset;
2872                 enum plane_id plane_id;
2873                 u8 slices;
2874
2875                 memset(&crtc_state->wm.skl.optimal, 0,
2876                        sizeof(crtc_state->wm.skl.optimal));
2877                 if (crtc_state->hw.active)
2878                         skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
2879                 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
2880
2881                 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
2882
2883                 for_each_plane_id_on_crtc(crtc, plane_id) {
2884                         struct skl_ddb_entry *ddb =
2885                                 &crtc_state->wm.skl.plane_ddb[plane_id];
2886                         struct skl_ddb_entry *ddb_y =
2887                                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2888
2889                         if (!crtc_state->hw.active)
2890                                 continue;
2891
2892                         skl_ddb_get_hw_plane_state(i915, crtc->pipe,
2893                                                    plane_id, ddb, ddb_y);
2894
2895                         skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
2896                         skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
2897                 }
2898
2899                 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
2900
2901                 /*
2902                  * Used for checking overlaps, so we need absolute
2903                  * offsets instead of MBUS relative offsets.
2904                  */
2905                 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2906                                                  dbuf_state->joined_mbus);
2907                 mbus_offset = mbus_ddb_offset(i915, slices);
2908                 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
2909                 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
2910
2911                 /* The slices actually used by the planes on the pipe */
2912                 dbuf_state->slices[pipe] =
2913                         skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
2914
2915                 drm_dbg_kms(&i915->drm,
2916                             "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
2917                             crtc->base.base.id, crtc->base.name,
2918                             dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
2919                             dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
2920                             str_yes_no(dbuf_state->joined_mbus));
2921         }
2922
2923         dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
2924 }
2925
2926 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
2927 {
2928         const struct intel_dbuf_state *dbuf_state =
2929                 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2930         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
2931         struct intel_crtc *crtc;
2932
2933         for_each_intel_crtc(&i915->drm, crtc) {
2934                 const struct intel_crtc_state *crtc_state =
2935                         to_intel_crtc_state(crtc->base.state);
2936
2937                 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
2938         }
2939
2940         for_each_intel_crtc(&i915->drm, crtc) {
2941                 const struct intel_crtc_state *crtc_state =
2942                         to_intel_crtc_state(crtc->base.state);
2943                 u8 slices;
2944
2945                 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2946                                                  dbuf_state->joined_mbus);
2947                 if (dbuf_state->slices[crtc->pipe] & ~slices)
2948                         return true;
2949
2950                 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
2951                                                 I915_MAX_PIPES, crtc->pipe))
2952                         return true;
2953         }
2954
2955         return false;
2956 }
2957
2958 void skl_wm_sanitize(struct drm_i915_private *i915)
2959 {
2960         struct intel_crtc *crtc;
2961
2962         /*
2963          * On TGL/RKL (at least) the BIOS likes to assign the planes
2964          * to the wrong DBUF slices. This will cause an infinite loop
2965          * in skl_commit_modeset_enables() as it can't find a way to
2966          * transition between the old bogus DBUF layout to the new
2967          * proper DBUF layout without DBUF allocation overlaps between
2968          * the planes (which cannot be allowed or else the hardware
2969          * may hang). If we detect a bogus DBUF layout just turn off
2970          * all the planes so that skl_commit_modeset_enables() can
2971          * simply ignore them.
2972          */
2973         if (!skl_dbuf_is_misconfigured(i915))
2974                 return;
2975
2976         drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
2977
2978         for_each_intel_crtc(&i915->drm, crtc) {
2979                 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2980                 const struct intel_plane_state *plane_state =
2981                         to_intel_plane_state(plane->base.state);
2982                 struct intel_crtc_state *crtc_state =
2983                         to_intel_crtc_state(crtc->base.state);
2984
2985                 if (plane_state->uapi.visible)
2986                         intel_plane_disable_noatomic(crtc, plane);
2987
2988                 drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
2989
2990                 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
2991         }
2992 }
2993
2994 void intel_wm_state_verify(struct intel_crtc *crtc,
2995                            struct intel_crtc_state *new_crtc_state)
2996 {
2997         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2998         struct skl_hw_state {
2999                 struct skl_ddb_entry ddb[I915_MAX_PLANES];
3000                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3001                 struct skl_pipe_wm wm;
3002         } *hw;
3003         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3004         int level, max_level = ilk_wm_max_level(i915);
3005         struct intel_plane *plane;
3006         u8 hw_enabled_slices;
3007
3008         if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3009                 return;
3010
3011         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3012         if (!hw)
3013                 return;
3014
3015         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3016
3017         skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3018
3019         hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3020
3021         if (DISPLAY_VER(i915) >= 11 &&
3022             hw_enabled_slices != i915->display.dbuf.enabled_slices)
3023                 drm_err(&i915->drm,
3024                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3025                         i915->display.dbuf.enabled_slices,
3026                         hw_enabled_slices);
3027
3028         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3029                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3030                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3031
3032                 /* Watermarks */
3033                 for (level = 0; level <= max_level; level++) {
3034                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3035                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3036
3037                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3038                                 continue;
3039
3040                         drm_err(&i915->drm,
3041                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3042                                 plane->base.base.id, plane->base.name, level,
3043                                 sw_wm_level->enable,
3044                                 sw_wm_level->blocks,
3045                                 sw_wm_level->lines,
3046                                 hw_wm_level->enable,
3047                                 hw_wm_level->blocks,
3048                                 hw_wm_level->lines);
3049                 }
3050
3051                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3052                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3053
3054                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3055                         drm_err(&i915->drm,
3056                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3057                                 plane->base.base.id, plane->base.name,
3058                                 sw_wm_level->enable,
3059                                 sw_wm_level->blocks,
3060                                 sw_wm_level->lines,
3061                                 hw_wm_level->enable,
3062                                 hw_wm_level->blocks,
3063                                 hw_wm_level->lines);
3064                 }
3065
3066                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3067                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3068
3069                 if (HAS_HW_SAGV_WM(i915) &&
3070                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3071                         drm_err(&i915->drm,
3072                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3073                                 plane->base.base.id, plane->base.name,
3074                                 sw_wm_level->enable,
3075                                 sw_wm_level->blocks,
3076                                 sw_wm_level->lines,
3077                                 hw_wm_level->enable,
3078                                 hw_wm_level->blocks,
3079                                 hw_wm_level->lines);
3080                 }
3081
3082                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3083                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3084
3085                 if (HAS_HW_SAGV_WM(i915) &&
3086                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3087                         drm_err(&i915->drm,
3088                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3089                                 plane->base.base.id, plane->base.name,
3090                                 sw_wm_level->enable,
3091                                 sw_wm_level->blocks,
3092                                 sw_wm_level->lines,
3093                                 hw_wm_level->enable,
3094                                 hw_wm_level->blocks,
3095                                 hw_wm_level->lines);
3096                 }
3097
3098                 /* DDB */
3099                 hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3100                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3101
3102                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3103                         drm_err(&i915->drm,
3104                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3105                                 plane->base.base.id, plane->base.name,
3106                                 sw_ddb_entry->start, sw_ddb_entry->end,
3107                                 hw_ddb_entry->start, hw_ddb_entry->end);
3108                 }
3109         }
3110
3111         kfree(hw);
3112 }
3113
3114 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3115 {
3116         return i915->display.wm.ipc_enabled;
3117 }
3118
3119 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3120 {
3121         if (!HAS_IPC(i915))
3122                 return;
3123
3124         intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3125                      skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3126 }
3127
3128 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3129 {
3130         /* Display WA #0477 WaDisableIPC: skl */
3131         if (IS_SKYLAKE(i915))
3132                 return false;
3133
3134         /* Display WA #1141: SKL:all KBL:all CFL */
3135         if (IS_KABYLAKE(i915) ||
3136             IS_COFFEELAKE(i915) ||
3137             IS_COMETLAKE(i915))
3138                 return i915->dram_info.symmetric_memory;
3139
3140         return true;
3141 }
3142
3143 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3144 {
3145         if (!HAS_IPC(i915))
3146                 return;
3147
3148         i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3149
3150         skl_watermark_ipc_update(i915);
3151 }
3152
3153 static void
3154 adjust_wm_latency(struct drm_i915_private *i915,
3155                   u16 wm[], int max_level, int read_latency)
3156 {
3157         bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3158         int i, level;
3159
3160         /*
3161          * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3162          * need to be disabled. We make sure to sanitize the values out
3163          * of the punit to satisfy this requirement.
3164          */
3165         for (level = 1; level <= max_level; level++) {
3166                 if (wm[level] == 0) {
3167                         for (i = level + 1; i <= max_level; i++)
3168                                 wm[i] = 0;
3169
3170                         max_level = level - 1;
3171                         break;
3172                 }
3173         }
3174
3175         /*
3176          * WaWmMemoryReadLatency
3177          *
3178          * punit doesn't take into account the read latency so we need
3179          * to add proper adjustement to each valid level we retrieve
3180          * from the punit when level 0 response data is 0us.
3181          */
3182         if (wm[0] == 0) {
3183                 for (level = 0; level <= max_level; level++)
3184                         wm[level] += read_latency;
3185         }
3186
3187         /*
3188          * WA Level-0 adjustment for 16GB DIMMs: SKL+
3189          * If we could not get dimm info enable this WA to prevent from
3190          * any underrun. If not able to get Dimm info assume 16GB dimm
3191          * to avoid any underrun.
3192          */
3193         if (wm_lv_0_adjust_needed)
3194                 wm[0] += 1;
3195 }
3196
3197 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3198 {
3199         int max_level = ilk_wm_max_level(i915);
3200         u32 val;
3201
3202         val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3203         wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3204         wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3205
3206         val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3207         wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3208         wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3209
3210         val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3211         wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3212         wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3213
3214         adjust_wm_latency(i915, wm, max_level, 6);
3215 }
3216
3217 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3218 {
3219         int max_level = ilk_wm_max_level(i915);
3220         int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3221         int mult = IS_DG2(i915) ? 2 : 1;
3222         u32 val;
3223         int ret;
3224
3225         /* read the first set of memory latencies[0:3] */
3226         val = 0; /* data0 to be programmed to 0 for first set */
3227         ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3228         if (ret) {
3229                 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3230                 return;
3231         }
3232
3233         wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3234         wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3235         wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3236         wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3237
3238         /* read the second set of memory latencies[4:7] */
3239         val = 1; /* data0 to be programmed to 1 for second set */
3240         ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3241         if (ret) {
3242                 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3243                 return;
3244         }
3245
3246         wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3247         wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3248         wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3249         wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3250
3251         adjust_wm_latency(i915, wm, max_level, read_latency);
3252 }
3253
3254 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3255 {
3256         if (DISPLAY_VER(i915) >= 14)
3257                 mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3258         else
3259                 skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3260
3261         intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3262 }
3263
3264 static const struct intel_wm_funcs skl_wm_funcs = {
3265         .compute_global_watermarks = skl_compute_wm,
3266 };
3267
3268 void skl_wm_init(struct drm_i915_private *i915)
3269 {
3270         intel_sagv_init(i915);
3271
3272         skl_setup_wm_latency(i915);
3273
3274         i915->display.funcs.wm = &skl_wm_funcs;
3275 }
3276
3277 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3278 {
3279         struct intel_dbuf_state *dbuf_state;
3280
3281         dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3282         if (!dbuf_state)
3283                 return NULL;
3284
3285         return &dbuf_state->base;
3286 }
3287
3288 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3289                                      struct intel_global_state *state)
3290 {
3291         kfree(state);
3292 }
3293
3294 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3295         .atomic_duplicate_state = intel_dbuf_duplicate_state,
3296         .atomic_destroy_state = intel_dbuf_destroy_state,
3297 };
3298
3299 struct intel_dbuf_state *
3300 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3301 {
3302         struct drm_i915_private *i915 = to_i915(state->base.dev);
3303         struct intel_global_state *dbuf_state;
3304
3305         dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3306         if (IS_ERR(dbuf_state))
3307                 return ERR_CAST(dbuf_state);
3308
3309         return to_intel_dbuf_state(dbuf_state);
3310 }
3311
3312 int intel_dbuf_init(struct drm_i915_private *i915)
3313 {
3314         struct intel_dbuf_state *dbuf_state;
3315
3316         dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3317         if (!dbuf_state)
3318                 return -ENOMEM;
3319
3320         intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3321                                      &dbuf_state->base, &intel_dbuf_funcs);
3322
3323         return 0;
3324 }
3325
3326 /*
3327  * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
3328  * update the request state of all DBUS slices.
3329  */
3330 static void update_mbus_pre_enable(struct intel_atomic_state *state)
3331 {
3332         struct drm_i915_private *i915 = to_i915(state->base.dev);
3333         u32 mbus_ctl, dbuf_min_tracker_val;
3334         enum dbuf_slice slice;
3335         const struct intel_dbuf_state *dbuf_state =
3336                 intel_atomic_get_new_dbuf_state(state);
3337
3338         if (!HAS_MBUS_JOINING(i915))
3339                 return;
3340
3341         /*
3342          * TODO: Implement vblank synchronized MBUS joining changes.
3343          * Must be properly coordinated with dbuf reprogramming.
3344          */
3345         if (dbuf_state->joined_mbus) {
3346                 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
3347                         MBUS_JOIN_PIPE_SELECT_NONE;
3348                 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
3349         } else {
3350                 mbus_ctl = MBUS_HASHING_MODE_2x2 |
3351                         MBUS_JOIN_PIPE_SELECT_NONE;
3352                 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
3353         }
3354
3355         intel_de_rmw(i915, MBUS_CTL,
3356                      MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3357                      MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3358
3359         for_each_dbuf_slice(i915, slice)
3360                 intel_de_rmw(i915, DBUF_CTL_S(slice),
3361                              DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3362                              dbuf_min_tracker_val);
3363 }
3364
3365 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3366 {
3367         struct drm_i915_private *i915 = to_i915(state->base.dev);
3368         const struct intel_dbuf_state *new_dbuf_state =
3369                 intel_atomic_get_new_dbuf_state(state);
3370         const struct intel_dbuf_state *old_dbuf_state =
3371                 intel_atomic_get_old_dbuf_state(state);
3372
3373         if (!new_dbuf_state ||
3374             (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3375              new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3376                 return;
3377
3378         WARN_ON(!new_dbuf_state->base.changed);
3379
3380         update_mbus_pre_enable(state);
3381         gen9_dbuf_slices_update(i915,
3382                                 old_dbuf_state->enabled_slices |
3383                                 new_dbuf_state->enabled_slices);
3384 }
3385
3386 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3387 {
3388         struct drm_i915_private *i915 = to_i915(state->base.dev);
3389         const struct intel_dbuf_state *new_dbuf_state =
3390                 intel_atomic_get_new_dbuf_state(state);
3391         const struct intel_dbuf_state *old_dbuf_state =
3392                 intel_atomic_get_old_dbuf_state(state);
3393
3394         if (!new_dbuf_state ||
3395             (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3396              new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3397                 return;
3398
3399         WARN_ON(!new_dbuf_state->base.changed);
3400
3401         gen9_dbuf_slices_update(i915,
3402                                 new_dbuf_state->enabled_slices);
3403 }
3404
3405 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3406 {
3407         switch (pipe) {
3408         case PIPE_A:
3409                 return !(active_pipes & BIT(PIPE_D));
3410         case PIPE_D:
3411                 return !(active_pipes & BIT(PIPE_A));
3412         case PIPE_B:
3413                 return !(active_pipes & BIT(PIPE_C));
3414         case PIPE_C:
3415                 return !(active_pipes & BIT(PIPE_B));
3416         default: /* to suppress compiler warning */
3417                 MISSING_CASE(pipe);
3418                 break;
3419         }
3420
3421         return false;
3422 }
3423
3424 void intel_mbus_dbox_update(struct intel_atomic_state *state)
3425 {
3426         struct drm_i915_private *i915 = to_i915(state->base.dev);
3427         const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3428         const struct intel_crtc_state *new_crtc_state;
3429         const struct intel_crtc *crtc;
3430         u32 val = 0;
3431         int i;
3432
3433         if (DISPLAY_VER(i915) < 11)
3434                 return;
3435
3436         new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3437         old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3438         if (!new_dbuf_state ||
3439             (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3440              new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3441                 return;
3442
3443         if (DISPLAY_VER(i915) >= 14)
3444                 val |= MBUS_DBOX_I_CREDIT(2);
3445
3446         if (DISPLAY_VER(i915) >= 12) {
3447                 val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3448                 val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3449                 val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3450         }
3451
3452         if (DISPLAY_VER(i915) >= 14)
3453                 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3454                                                      MBUS_DBOX_A_CREDIT(8);
3455         else if (IS_ALDERLAKE_P(i915))
3456                 /* Wa_22010947358:adl-p */
3457                 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3458                                                      MBUS_DBOX_A_CREDIT(4);
3459         else
3460                 val |= MBUS_DBOX_A_CREDIT(2);
3461
3462         if (DISPLAY_VER(i915) >= 14) {
3463                 val |= MBUS_DBOX_B_CREDIT(0xA);
3464         } else if (IS_ALDERLAKE_P(i915)) {
3465                 val |= MBUS_DBOX_BW_CREDIT(2);
3466                 val |= MBUS_DBOX_B_CREDIT(8);
3467         } else if (DISPLAY_VER(i915) >= 12) {
3468                 val |= MBUS_DBOX_BW_CREDIT(2);
3469                 val |= MBUS_DBOX_B_CREDIT(12);
3470         } else {
3471                 val |= MBUS_DBOX_BW_CREDIT(1);
3472                 val |= MBUS_DBOX_B_CREDIT(8);
3473         }
3474
3475         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
3476                 u32 pipe_val = val;
3477
3478                 if (!new_crtc_state->hw.active)
3479                         continue;
3480
3481                 if (DISPLAY_VER(i915) >= 14) {
3482                         if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3483                                                               new_dbuf_state->active_pipes))
3484                                 pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3485                         else
3486                                 pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3487                 }
3488
3489                 intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3490         }
3491 }
3492
3493 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3494 {
3495         struct drm_i915_private *i915 = m->private;
3496
3497         seq_printf(m, "Isochronous Priority Control: %s\n",
3498                    str_yes_no(skl_watermark_ipc_enabled(i915)));
3499         return 0;
3500 }
3501
3502 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3503 {
3504         struct drm_i915_private *i915 = inode->i_private;
3505
3506         return single_open(file, skl_watermark_ipc_status_show, i915);
3507 }
3508
3509 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3510                                               const char __user *ubuf,
3511                                               size_t len, loff_t *offp)
3512 {
3513         struct seq_file *m = file->private_data;
3514         struct drm_i915_private *i915 = m->private;
3515         intel_wakeref_t wakeref;
3516         bool enable;
3517         int ret;
3518
3519         ret = kstrtobool_from_user(ubuf, len, &enable);
3520         if (ret < 0)
3521                 return ret;
3522
3523         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3524                 if (!skl_watermark_ipc_enabled(i915) && enable)
3525                         drm_info(&i915->drm,
3526                                  "Enabling IPC: WM will be proper only after next commit\n");
3527                 i915->display.wm.ipc_enabled = enable;
3528                 skl_watermark_ipc_update(i915);
3529         }
3530
3531         return len;
3532 }
3533
3534 static const struct file_operations skl_watermark_ipc_status_fops = {
3535         .owner = THIS_MODULE,
3536         .open = skl_watermark_ipc_status_open,
3537         .read = seq_read,
3538         .llseek = seq_lseek,
3539         .release = single_release,
3540         .write = skl_watermark_ipc_status_write
3541 };
3542
3543 void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
3544 {
3545         struct drm_minor *minor = i915->drm.primary;
3546
3547         if (!HAS_IPC(i915))
3548                 return;
3549
3550         debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3551                             &skl_watermark_ipc_status_fops);
3552 }