Merge tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / skl_watermark.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5
6 #include <drm/drm_blend.h>
7
8 #include "intel_atomic.h"
9 #include "intel_atomic_plane.h"
10 #include "intel_bw.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_power.h"
14 #include "intel_display_types.h"
15 #include "intel_fb.h"
16 #include "skl_watermark.h"
17
18 #include "i915_drv.h"
19 #include "i915_fixed.h"
20 #include "i915_reg.h"
21 #include "intel_pcode.h"
22 #include "intel_pm.h"
23
24 static void skl_sagv_disable(struct drm_i915_private *i915);
25
26 /* Stores plane specific WM parameters */
27 struct skl_wm_params {
28         bool x_tiled, y_tiled;
29         bool rc_surface;
30         bool is_planar;
31         u32 width;
32         u8 cpp;
33         u32 plane_pixel_rate;
34         u32 y_min_scanlines;
35         u32 plane_bytes_per_line;
36         uint_fixed_16_16_t plane_blocks_per_line;
37         uint_fixed_16_16_t y_tile_minimum;
38         u32 linetime_us;
39         u32 dbuf_block_size;
40 };
41
42 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
43 {
44         u8 enabled_slices = 0;
45         enum dbuf_slice slice;
46
47         for_each_dbuf_slice(i915, slice) {
48                 if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
49                         enabled_slices |= BIT(slice);
50         }
51
52         return enabled_slices;
53 }
54
55 /*
56  * FIXME: We still don't have the proper code detect if we need to apply the WA,
57  * so assume we'll always need it in order to avoid underruns.
58  */
59 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
60 {
61         return DISPLAY_VER(i915) == 9;
62 }
63
64 static bool
65 intel_has_sagv(struct drm_i915_private *i915)
66 {
67         return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
68                 i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
69 }
70
71 static u32
72 intel_sagv_block_time(struct drm_i915_private *i915)
73 {
74         if (DISPLAY_VER(i915) >= 14) {
75                 u32 val;
76
77                 val = intel_de_read(i915, MTL_LATENCY_SAGV);
78
79                 return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
80         } else if (DISPLAY_VER(i915) >= 12) {
81                 u32 val = 0;
82                 int ret;
83
84                 ret = snb_pcode_read(&i915->uncore,
85                                      GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
86                                      &val, NULL);
87                 if (ret) {
88                         drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
89                         return 0;
90                 }
91
92                 return val;
93         } else if (DISPLAY_VER(i915) == 11) {
94                 return 10;
95         } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
96                 return 30;
97         } else {
98                 return 0;
99         }
100 }
101
102 static void intel_sagv_init(struct drm_i915_private *i915)
103 {
104         if (!intel_has_sagv(i915))
105                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
106
107         /*
108          * Probe to see if we have working SAGV control.
109          * For icl+ this was already determined by intel_bw_init_hw().
110          */
111         if (DISPLAY_VER(i915) < 11)
112                 skl_sagv_disable(i915);
113
114         drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
115
116         i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
117
118         drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
119                     str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
120
121         /* avoid overflow when adding with wm0 latency/etc. */
122         if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
123                      "Excessive SAGV block time %u, ignoring\n",
124                      i915->display.sagv.block_time_us))
125                 i915->display.sagv.block_time_us = 0;
126
127         if (!intel_has_sagv(i915))
128                 i915->display.sagv.block_time_us = 0;
129 }
130
131 /*
132  * SAGV dynamically adjusts the system agent voltage and clock frequencies
133  * depending on power and performance requirements. The display engine access
134  * to system memory is blocked during the adjustment time. Because of the
135  * blocking time, having this enabled can cause full system hangs and/or pipe
136  * underruns if we don't meet all of the following requirements:
137  *
138  *  - <= 1 pipe enabled
139  *  - All planes can enable watermarks for latencies >= SAGV engine block time
140  *  - We're not using an interlaced display configuration
141  */
142 static void skl_sagv_enable(struct drm_i915_private *i915)
143 {
144         int ret;
145
146         if (!intel_has_sagv(i915))
147                 return;
148
149         if (i915->display.sagv.status == I915_SAGV_ENABLED)
150                 return;
151
152         drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
153         ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
154                               GEN9_SAGV_ENABLE);
155
156         /* We don't need to wait for SAGV when enabling */
157
158         /*
159          * Some skl systems, pre-release machines in particular,
160          * don't actually have SAGV.
161          */
162         if (IS_SKYLAKE(i915) && ret == -ENXIO) {
163                 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
164                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
165                 return;
166         } else if (ret < 0) {
167                 drm_err(&i915->drm, "Failed to enable SAGV\n");
168                 return;
169         }
170
171         i915->display.sagv.status = I915_SAGV_ENABLED;
172 }
173
174 static void skl_sagv_disable(struct drm_i915_private *i915)
175 {
176         int ret;
177
178         if (!intel_has_sagv(i915))
179                 return;
180
181         if (i915->display.sagv.status == I915_SAGV_DISABLED)
182                 return;
183
184         drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
185         /* bspec says to keep retrying for at least 1 ms */
186         ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
187                                 GEN9_SAGV_DISABLE,
188                                 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
189                                 1);
190         /*
191          * Some skl systems, pre-release machines in particular,
192          * don't actually have SAGV.
193          */
194         if (IS_SKYLAKE(i915) && ret == -ENXIO) {
195                 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
196                 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
197                 return;
198         } else if (ret < 0) {
199                 drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
200                 return;
201         }
202
203         i915->display.sagv.status = I915_SAGV_DISABLED;
204 }
205
206 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
207 {
208         struct drm_i915_private *i915 = to_i915(state->base.dev);
209         const struct intel_bw_state *new_bw_state =
210                 intel_atomic_get_new_bw_state(state);
211
212         if (!new_bw_state)
213                 return;
214
215         if (!intel_can_enable_sagv(i915, new_bw_state))
216                 skl_sagv_disable(i915);
217 }
218
219 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
220 {
221         struct drm_i915_private *i915 = to_i915(state->base.dev);
222         const struct intel_bw_state *new_bw_state =
223                 intel_atomic_get_new_bw_state(state);
224
225         if (!new_bw_state)
226                 return;
227
228         if (intel_can_enable_sagv(i915, new_bw_state))
229                 skl_sagv_enable(i915);
230 }
231
232 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
233 {
234         struct drm_i915_private *i915 = to_i915(state->base.dev);
235         const struct intel_bw_state *old_bw_state =
236                 intel_atomic_get_old_bw_state(state);
237         const struct intel_bw_state *new_bw_state =
238                 intel_atomic_get_new_bw_state(state);
239         u16 old_mask, new_mask;
240
241         if (!new_bw_state)
242                 return;
243
244         old_mask = old_bw_state->qgv_points_mask;
245         new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
246
247         if (old_mask == new_mask)
248                 return;
249
250         WARN_ON(!new_bw_state->base.changed);
251
252         drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
253                     old_mask, new_mask);
254
255         /*
256          * Restrict required qgv points before updating the configuration.
257          * According to BSpec we can't mask and unmask qgv points at the same
258          * time. Also masking should be done before updating the configuration
259          * and unmasking afterwards.
260          */
261         icl_pcode_restrict_qgv_points(i915, new_mask);
262 }
263
264 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
265 {
266         struct drm_i915_private *i915 = to_i915(state->base.dev);
267         const struct intel_bw_state *old_bw_state =
268                 intel_atomic_get_old_bw_state(state);
269         const struct intel_bw_state *new_bw_state =
270                 intel_atomic_get_new_bw_state(state);
271         u16 old_mask, new_mask;
272
273         if (!new_bw_state)
274                 return;
275
276         old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
277         new_mask = new_bw_state->qgv_points_mask;
278
279         if (old_mask == new_mask)
280                 return;
281
282         WARN_ON(!new_bw_state->base.changed);
283
284         drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
285                     old_mask, new_mask);
286
287         /*
288          * Allow required qgv points after updating the configuration.
289          * According to BSpec we can't mask and unmask qgv points at the same
290          * time. Also masking should be done before updating the configuration
291          * and unmasking afterwards.
292          */
293         icl_pcode_restrict_qgv_points(i915, new_mask);
294 }
295
296 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
297 {
298         struct drm_i915_private *i915 = to_i915(state->base.dev);
299
300         /*
301          * Just return if we can't control SAGV or don't have it.
302          * This is different from situation when we have SAGV but just can't
303          * afford it due to DBuf limitation - in case if SAGV is completely
304          * disabled in a BIOS, we are not even allowed to send a PCode request,
305          * as it will throw an error. So have to check it here.
306          */
307         if (!intel_has_sagv(i915))
308                 return;
309
310         if (DISPLAY_VER(i915) >= 11)
311                 icl_sagv_pre_plane_update(state);
312         else
313                 skl_sagv_pre_plane_update(state);
314 }
315
316 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
317 {
318         struct drm_i915_private *i915 = to_i915(state->base.dev);
319
320         /*
321          * Just return if we can't control SAGV or don't have it.
322          * This is different from situation when we have SAGV but just can't
323          * afford it due to DBuf limitation - in case if SAGV is completely
324          * disabled in a BIOS, we are not even allowed to send a PCode request,
325          * as it will throw an error. So have to check it here.
326          */
327         if (!intel_has_sagv(i915))
328                 return;
329
330         if (DISPLAY_VER(i915) >= 11)
331                 icl_sagv_post_plane_update(state);
332         else
333                 skl_sagv_post_plane_update(state);
334 }
335
336 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
337 {
338         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
339         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
340         enum plane_id plane_id;
341         int max_level = INT_MAX;
342
343         if (!intel_has_sagv(i915))
344                 return false;
345
346         if (!crtc_state->hw.active)
347                 return true;
348
349         if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
350                 return false;
351
352         for_each_plane_id_on_crtc(crtc, plane_id) {
353                 const struct skl_plane_wm *wm =
354                         &crtc_state->wm.skl.optimal.planes[plane_id];
355                 int level;
356
357                 /* Skip this plane if it's not enabled */
358                 if (!wm->wm[0].enable)
359                         continue;
360
361                 /* Find the highest enabled wm level for this plane */
362                 for (level = ilk_wm_max_level(i915);
363                      !wm->wm[level].enable; --level)
364                      { }
365
366                 /* Highest common enabled wm level for all planes */
367                 max_level = min(level, max_level);
368         }
369
370         /* No enabled planes? */
371         if (max_level == INT_MAX)
372                 return true;
373
374         for_each_plane_id_on_crtc(crtc, plane_id) {
375                 const struct skl_plane_wm *wm =
376                         &crtc_state->wm.skl.optimal.planes[plane_id];
377
378                 /*
379                  * All enabled planes must have enabled a common wm level that
380                  * can tolerate memory latencies higher than sagv_block_time_us
381                  */
382                 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
383                         return false;
384         }
385
386         return true;
387 }
388
389 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
390 {
391         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
392         enum plane_id plane_id;
393
394         if (!crtc_state->hw.active)
395                 return true;
396
397         for_each_plane_id_on_crtc(crtc, plane_id) {
398                 const struct skl_plane_wm *wm =
399                         &crtc_state->wm.skl.optimal.planes[plane_id];
400
401                 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
402                         return false;
403         }
404
405         return true;
406 }
407
408 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
409 {
410         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
411         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
412
413         if (DISPLAY_VER(i915) >= 12)
414                 return tgl_crtc_can_enable_sagv(crtc_state);
415         else
416                 return skl_crtc_can_enable_sagv(crtc_state);
417 }
418
419 bool intel_can_enable_sagv(struct drm_i915_private *i915,
420                            const struct intel_bw_state *bw_state)
421 {
422         if (DISPLAY_VER(i915) < 11 &&
423             bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
424                 return false;
425
426         return bw_state->pipe_sagv_reject == 0;
427 }
428
429 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
430 {
431         struct drm_i915_private *i915 = to_i915(state->base.dev);
432         int ret;
433         struct intel_crtc *crtc;
434         struct intel_crtc_state *new_crtc_state;
435         struct intel_bw_state *new_bw_state = NULL;
436         const struct intel_bw_state *old_bw_state = NULL;
437         int i;
438
439         for_each_new_intel_crtc_in_state(state, crtc,
440                                          new_crtc_state, i) {
441                 new_bw_state = intel_atomic_get_bw_state(state);
442                 if (IS_ERR(new_bw_state))
443                         return PTR_ERR(new_bw_state);
444
445                 old_bw_state = intel_atomic_get_old_bw_state(state);
446
447                 if (intel_crtc_can_enable_sagv(new_crtc_state))
448                         new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
449                 else
450                         new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
451         }
452
453         if (!new_bw_state)
454                 return 0;
455
456         new_bw_state->active_pipes =
457                 intel_calc_active_pipes(state, old_bw_state->active_pipes);
458
459         if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
460                 ret = intel_atomic_lock_global_state(&new_bw_state->base);
461                 if (ret)
462                         return ret;
463         }
464
465         if (intel_can_enable_sagv(i915, new_bw_state) !=
466             intel_can_enable_sagv(i915, old_bw_state)) {
467                 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
468                 if (ret)
469                         return ret;
470         } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
471                 ret = intel_atomic_lock_global_state(&new_bw_state->base);
472                 if (ret)
473                         return ret;
474         }
475
476         for_each_new_intel_crtc_in_state(state, crtc,
477                                          new_crtc_state, i) {
478                 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
479
480                 /*
481                  * We store use_sagv_wm in the crtc state rather than relying on
482                  * that bw state since we have no convenient way to get at the
483                  * latter from the plane commit hooks (especially in the legacy
484                  * cursor case)
485                  */
486                 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
487                         DISPLAY_VER(i915) >= 12 &&
488                         intel_can_enable_sagv(i915, new_bw_state);
489         }
490
491         return 0;
492 }
493
494 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
495                               u16 start, u16 end)
496 {
497         entry->start = start;
498         entry->end = end;
499
500         return end;
501 }
502
503 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
504 {
505         return INTEL_INFO(i915)->display.dbuf.size /
506                 hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
507 }
508
509 static void
510 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
511                          struct skl_ddb_entry *ddb)
512 {
513         int slice_size = intel_dbuf_slice_size(i915);
514
515         if (!slice_mask) {
516                 ddb->start = 0;
517                 ddb->end = 0;
518                 return;
519         }
520
521         ddb->start = (ffs(slice_mask) - 1) * slice_size;
522         ddb->end = fls(slice_mask) * slice_size;
523
524         WARN_ON(ddb->start >= ddb->end);
525         WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
526 }
527
528 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
529 {
530         struct skl_ddb_entry ddb;
531
532         if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
533                 slice_mask = BIT(DBUF_S1);
534         else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
535                 slice_mask = BIT(DBUF_S3);
536
537         skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
538
539         return ddb.start;
540 }
541
542 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
543                             const struct skl_ddb_entry *entry)
544 {
545         int slice_size = intel_dbuf_slice_size(i915);
546         enum dbuf_slice start_slice, end_slice;
547         u8 slice_mask = 0;
548
549         if (!skl_ddb_entry_size(entry))
550                 return 0;
551
552         start_slice = entry->start / slice_size;
553         end_slice = (entry->end - 1) / slice_size;
554
555         /*
556          * Per plane DDB entry can in a really worst case be on multiple slices
557          * but single entry is anyway contigious.
558          */
559         while (start_slice <= end_slice) {
560                 slice_mask |= BIT(start_slice);
561                 start_slice++;
562         }
563
564         return slice_mask;
565 }
566
567 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
568 {
569         const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
570         int hdisplay, vdisplay;
571
572         if (!crtc_state->hw.active)
573                 return 0;
574
575         /*
576          * Watermark/ddb requirement highly depends upon width of the
577          * framebuffer, So instead of allocating DDB equally among pipes
578          * distribute DDB based on resolution/width of the display.
579          */
580         drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
581
582         return hdisplay;
583 }
584
585 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
586                                     enum pipe for_pipe,
587                                     unsigned int *weight_start,
588                                     unsigned int *weight_end,
589                                     unsigned int *weight_total)
590 {
591         struct drm_i915_private *i915 =
592                 to_i915(dbuf_state->base.state->base.dev);
593         enum pipe pipe;
594
595         *weight_start = 0;
596         *weight_end = 0;
597         *weight_total = 0;
598
599         for_each_pipe(i915, pipe) {
600                 int weight = dbuf_state->weight[pipe];
601
602                 /*
603                  * Do not account pipes using other slice sets
604                  * luckily as of current BSpec slice sets do not partially
605                  * intersect(pipes share either same one slice or same slice set
606                  * i.e no partial intersection), so it is enough to check for
607                  * equality for now.
608                  */
609                 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
610                         continue;
611
612                 *weight_total += weight;
613                 if (pipe < for_pipe) {
614                         *weight_start += weight;
615                         *weight_end += weight;
616                 } else if (pipe == for_pipe) {
617                         *weight_end += weight;
618                 }
619         }
620 }
621
622 static int
623 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
624 {
625         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
626         unsigned int weight_total, weight_start, weight_end;
627         const struct intel_dbuf_state *old_dbuf_state =
628                 intel_atomic_get_old_dbuf_state(state);
629         struct intel_dbuf_state *new_dbuf_state =
630                 intel_atomic_get_new_dbuf_state(state);
631         struct intel_crtc_state *crtc_state;
632         struct skl_ddb_entry ddb_slices;
633         enum pipe pipe = crtc->pipe;
634         unsigned int mbus_offset = 0;
635         u32 ddb_range_size;
636         u32 dbuf_slice_mask;
637         u32 start, end;
638         int ret;
639
640         if (new_dbuf_state->weight[pipe] == 0) {
641                 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
642                 goto out;
643         }
644
645         dbuf_slice_mask = new_dbuf_state->slices[pipe];
646
647         skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
648         mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
649         ddb_range_size = skl_ddb_entry_size(&ddb_slices);
650
651         intel_crtc_dbuf_weights(new_dbuf_state, pipe,
652                                 &weight_start, &weight_end, &weight_total);
653
654         start = ddb_range_size * weight_start / weight_total;
655         end = ddb_range_size * weight_end / weight_total;
656
657         skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
658                            ddb_slices.start - mbus_offset + start,
659                            ddb_slices.start - mbus_offset + end);
660
661 out:
662         if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
663             skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
664                                 &new_dbuf_state->ddb[pipe]))
665                 return 0;
666
667         ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
668         if (ret)
669                 return ret;
670
671         crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
672         if (IS_ERR(crtc_state))
673                 return PTR_ERR(crtc_state);
674
675         /*
676          * Used for checking overlaps, so we need absolute
677          * offsets instead of MBUS relative offsets.
678          */
679         crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
680         crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
681
682         drm_dbg_kms(&i915->drm,
683                     "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
684                     crtc->base.base.id, crtc->base.name,
685                     old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
686                     old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
687                     new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
688                     old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
689
690         return 0;
691 }
692
693 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
694                                  int width, const struct drm_format_info *format,
695                                  u64 modifier, unsigned int rotation,
696                                  u32 plane_pixel_rate, struct skl_wm_params *wp,
697                                  int color_plane);
698
699 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
700                                  struct intel_plane *plane,
701                                  int level,
702                                  unsigned int latency,
703                                  const struct skl_wm_params *wp,
704                                  const struct skl_wm_level *result_prev,
705                                  struct skl_wm_level *result /* out */);
706
707 static unsigned int
708 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
709                       int num_active)
710 {
711         struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
712         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
713         int level, max_level = ilk_wm_max_level(i915);
714         struct skl_wm_level wm = {};
715         int ret, min_ddb_alloc = 0;
716         struct skl_wm_params wp;
717
718         ret = skl_compute_wm_params(crtc_state, 256,
719                                     drm_format_info(DRM_FORMAT_ARGB8888),
720                                     DRM_FORMAT_MOD_LINEAR,
721                                     DRM_MODE_ROTATE_0,
722                                     crtc_state->pixel_rate, &wp, 0);
723         drm_WARN_ON(&i915->drm, ret);
724
725         for (level = 0; level <= max_level; level++) {
726                 unsigned int latency = i915->display.wm.skl_latency[level];
727
728                 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
729                 if (wm.min_ddb_alloc == U16_MAX)
730                         break;
731
732                 min_ddb_alloc = wm.min_ddb_alloc;
733         }
734
735         return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
736 }
737
738 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
739 {
740         skl_ddb_entry_init(entry,
741                            REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
742                            REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
743         if (entry->end)
744                 entry->end++;
745 }
746
747 static void
748 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
749                            const enum pipe pipe,
750                            const enum plane_id plane_id,
751                            struct skl_ddb_entry *ddb,
752                            struct skl_ddb_entry *ddb_y)
753 {
754         u32 val;
755
756         /* Cursor doesn't support NV12/planar, so no extra calculation needed */
757         if (plane_id == PLANE_CURSOR) {
758                 val = intel_de_read(i915, CUR_BUF_CFG(pipe));
759                 skl_ddb_entry_init_from_hw(ddb, val);
760                 return;
761         }
762
763         val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
764         skl_ddb_entry_init_from_hw(ddb, val);
765
766         if (DISPLAY_VER(i915) >= 11)
767                 return;
768
769         val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
770         skl_ddb_entry_init_from_hw(ddb_y, val);
771 }
772
773 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
774                                       struct skl_ddb_entry *ddb,
775                                       struct skl_ddb_entry *ddb_y)
776 {
777         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
778         enum intel_display_power_domain power_domain;
779         enum pipe pipe = crtc->pipe;
780         intel_wakeref_t wakeref;
781         enum plane_id plane_id;
782
783         power_domain = POWER_DOMAIN_PIPE(pipe);
784         wakeref = intel_display_power_get_if_enabled(i915, power_domain);
785         if (!wakeref)
786                 return;
787
788         for_each_plane_id_on_crtc(crtc, plane_id)
789                 skl_ddb_get_hw_plane_state(i915, pipe,
790                                            plane_id,
791                                            &ddb[plane_id],
792                                            &ddb_y[plane_id]);
793
794         intel_display_power_put(i915, power_domain, wakeref);
795 }
796
797 struct dbuf_slice_conf_entry {
798         u8 active_pipes;
799         u8 dbuf_mask[I915_MAX_PIPES];
800         bool join_mbus;
801 };
802
803 /*
804  * Table taken from Bspec 12716
805  * Pipes do have some preferred DBuf slice affinity,
806  * plus there are some hardcoded requirements on how
807  * those should be distributed for multipipe scenarios.
808  * For more DBuf slices algorithm can get even more messy
809  * and less readable, so decided to use a table almost
810  * as is from BSpec itself - that way it is at least easier
811  * to compare, change and check.
812  */
813 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
814 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
815 {
816         {
817                 .active_pipes = BIT(PIPE_A),
818                 .dbuf_mask = {
819                         [PIPE_A] = BIT(DBUF_S1),
820                 },
821         },
822         {
823                 .active_pipes = BIT(PIPE_B),
824                 .dbuf_mask = {
825                         [PIPE_B] = BIT(DBUF_S1),
826                 },
827         },
828         {
829                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
830                 .dbuf_mask = {
831                         [PIPE_A] = BIT(DBUF_S1),
832                         [PIPE_B] = BIT(DBUF_S2),
833                 },
834         },
835         {
836                 .active_pipes = BIT(PIPE_C),
837                 .dbuf_mask = {
838                         [PIPE_C] = BIT(DBUF_S2),
839                 },
840         },
841         {
842                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
843                 .dbuf_mask = {
844                         [PIPE_A] = BIT(DBUF_S1),
845                         [PIPE_C] = BIT(DBUF_S2),
846                 },
847         },
848         {
849                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
850                 .dbuf_mask = {
851                         [PIPE_B] = BIT(DBUF_S1),
852                         [PIPE_C] = BIT(DBUF_S2),
853                 },
854         },
855         {
856                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
857                 .dbuf_mask = {
858                         [PIPE_A] = BIT(DBUF_S1),
859                         [PIPE_B] = BIT(DBUF_S1),
860                         [PIPE_C] = BIT(DBUF_S2),
861                 },
862         },
863         {}
864 };
865
866 /*
867  * Table taken from Bspec 49255
868  * Pipes do have some preferred DBuf slice affinity,
869  * plus there are some hardcoded requirements on how
870  * those should be distributed for multipipe scenarios.
871  * For more DBuf slices algorithm can get even more messy
872  * and less readable, so decided to use a table almost
873  * as is from BSpec itself - that way it is at least easier
874  * to compare, change and check.
875  */
876 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
877 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
878 {
879         {
880                 .active_pipes = BIT(PIPE_A),
881                 .dbuf_mask = {
882                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
883                 },
884         },
885         {
886                 .active_pipes = BIT(PIPE_B),
887                 .dbuf_mask = {
888                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
889                 },
890         },
891         {
892                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
893                 .dbuf_mask = {
894                         [PIPE_A] = BIT(DBUF_S2),
895                         [PIPE_B] = BIT(DBUF_S1),
896                 },
897         },
898         {
899                 .active_pipes = BIT(PIPE_C),
900                 .dbuf_mask = {
901                         [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
902                 },
903         },
904         {
905                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
906                 .dbuf_mask = {
907                         [PIPE_A] = BIT(DBUF_S1),
908                         [PIPE_C] = BIT(DBUF_S2),
909                 },
910         },
911         {
912                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
913                 .dbuf_mask = {
914                         [PIPE_B] = BIT(DBUF_S1),
915                         [PIPE_C] = BIT(DBUF_S2),
916                 },
917         },
918         {
919                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
920                 .dbuf_mask = {
921                         [PIPE_A] = BIT(DBUF_S1),
922                         [PIPE_B] = BIT(DBUF_S1),
923                         [PIPE_C] = BIT(DBUF_S2),
924                 },
925         },
926         {
927                 .active_pipes = BIT(PIPE_D),
928                 .dbuf_mask = {
929                         [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
930                 },
931         },
932         {
933                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
934                 .dbuf_mask = {
935                         [PIPE_A] = BIT(DBUF_S1),
936                         [PIPE_D] = BIT(DBUF_S2),
937                 },
938         },
939         {
940                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
941                 .dbuf_mask = {
942                         [PIPE_B] = BIT(DBUF_S1),
943                         [PIPE_D] = BIT(DBUF_S2),
944                 },
945         },
946         {
947                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
948                 .dbuf_mask = {
949                         [PIPE_A] = BIT(DBUF_S1),
950                         [PIPE_B] = BIT(DBUF_S1),
951                         [PIPE_D] = BIT(DBUF_S2),
952                 },
953         },
954         {
955                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
956                 .dbuf_mask = {
957                         [PIPE_C] = BIT(DBUF_S1),
958                         [PIPE_D] = BIT(DBUF_S2),
959                 },
960         },
961         {
962                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
963                 .dbuf_mask = {
964                         [PIPE_A] = BIT(DBUF_S1),
965                         [PIPE_C] = BIT(DBUF_S2),
966                         [PIPE_D] = BIT(DBUF_S2),
967                 },
968         },
969         {
970                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
971                 .dbuf_mask = {
972                         [PIPE_B] = BIT(DBUF_S1),
973                         [PIPE_C] = BIT(DBUF_S2),
974                         [PIPE_D] = BIT(DBUF_S2),
975                 },
976         },
977         {
978                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
979                 .dbuf_mask = {
980                         [PIPE_A] = BIT(DBUF_S1),
981                         [PIPE_B] = BIT(DBUF_S1),
982                         [PIPE_C] = BIT(DBUF_S2),
983                         [PIPE_D] = BIT(DBUF_S2),
984                 },
985         },
986         {}
987 };
988
989 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
990         {
991                 .active_pipes = BIT(PIPE_A),
992                 .dbuf_mask = {
993                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
994                 },
995         },
996         {
997                 .active_pipes = BIT(PIPE_B),
998                 .dbuf_mask = {
999                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1000                 },
1001         },
1002         {
1003                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1004                 .dbuf_mask = {
1005                         [PIPE_A] = BIT(DBUF_S1),
1006                         [PIPE_B] = BIT(DBUF_S2),
1007                 },
1008         },
1009         {
1010                 .active_pipes = BIT(PIPE_C),
1011                 .dbuf_mask = {
1012                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1013                 },
1014         },
1015         {
1016                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1017                 .dbuf_mask = {
1018                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1019                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1020                 },
1021         },
1022         {
1023                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1024                 .dbuf_mask = {
1025                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1026                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1027                 },
1028         },
1029         {
1030                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1031                 .dbuf_mask = {
1032                         [PIPE_A] = BIT(DBUF_S1),
1033                         [PIPE_B] = BIT(DBUF_S2),
1034                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1035                 },
1036         },
1037         {
1038                 .active_pipes = BIT(PIPE_D),
1039                 .dbuf_mask = {
1040                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1041                 },
1042         },
1043         {
1044                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1045                 .dbuf_mask = {
1046                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1047                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1048                 },
1049         },
1050         {
1051                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1052                 .dbuf_mask = {
1053                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1054                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1055                 },
1056         },
1057         {
1058                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1059                 .dbuf_mask = {
1060                         [PIPE_A] = BIT(DBUF_S1),
1061                         [PIPE_B] = BIT(DBUF_S2),
1062                         [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1063                 },
1064         },
1065         {
1066                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1067                 .dbuf_mask = {
1068                         [PIPE_C] = BIT(DBUF_S3),
1069                         [PIPE_D] = BIT(DBUF_S4),
1070                 },
1071         },
1072         {
1073                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1074                 .dbuf_mask = {
1075                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1076                         [PIPE_C] = BIT(DBUF_S3),
1077                         [PIPE_D] = BIT(DBUF_S4),
1078                 },
1079         },
1080         {
1081                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1082                 .dbuf_mask = {
1083                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1084                         [PIPE_C] = BIT(DBUF_S3),
1085                         [PIPE_D] = BIT(DBUF_S4),
1086                 },
1087         },
1088         {
1089                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1090                 .dbuf_mask = {
1091                         [PIPE_A] = BIT(DBUF_S1),
1092                         [PIPE_B] = BIT(DBUF_S2),
1093                         [PIPE_C] = BIT(DBUF_S3),
1094                         [PIPE_D] = BIT(DBUF_S4),
1095                 },
1096         },
1097         {}
1098 };
1099
1100 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1101         /*
1102          * Keep the join_mbus cases first so check_mbus_joined()
1103          * will prefer them over the !join_mbus cases.
1104          */
1105         {
1106                 .active_pipes = BIT(PIPE_A),
1107                 .dbuf_mask = {
1108                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1109                 },
1110                 .join_mbus = true,
1111         },
1112         {
1113                 .active_pipes = BIT(PIPE_B),
1114                 .dbuf_mask = {
1115                         [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1116                 },
1117                 .join_mbus = true,
1118         },
1119         {
1120                 .active_pipes = BIT(PIPE_A),
1121                 .dbuf_mask = {
1122                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1123                 },
1124                 .join_mbus = false,
1125         },
1126         {
1127                 .active_pipes = BIT(PIPE_B),
1128                 .dbuf_mask = {
1129                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1130                 },
1131                 .join_mbus = false,
1132         },
1133         {
1134                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1135                 .dbuf_mask = {
1136                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1137                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1138                 },
1139         },
1140         {
1141                 .active_pipes = BIT(PIPE_C),
1142                 .dbuf_mask = {
1143                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1144                 },
1145         },
1146         {
1147                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1148                 .dbuf_mask = {
1149                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1150                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1151                 },
1152         },
1153         {
1154                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1155                 .dbuf_mask = {
1156                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1157                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1158                 },
1159         },
1160         {
1161                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1162                 .dbuf_mask = {
1163                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1164                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1165                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1166                 },
1167         },
1168         {
1169                 .active_pipes = BIT(PIPE_D),
1170                 .dbuf_mask = {
1171                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1172                 },
1173         },
1174         {
1175                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1176                 .dbuf_mask = {
1177                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1178                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1179                 },
1180         },
1181         {
1182                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1183                 .dbuf_mask = {
1184                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1185                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1186                 },
1187         },
1188         {
1189                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1190                 .dbuf_mask = {
1191                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1192                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1193                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1194                 },
1195         },
1196         {
1197                 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1198                 .dbuf_mask = {
1199                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1200                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1201                 },
1202         },
1203         {
1204                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1205                 .dbuf_mask = {
1206                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1207                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1208                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1209                 },
1210         },
1211         {
1212                 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1213                 .dbuf_mask = {
1214                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1215                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1216                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1217                 },
1218         },
1219         {
1220                 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1221                 .dbuf_mask = {
1222                         [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1223                         [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1224                         [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1225                         [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1226                 },
1227         },
1228         {}
1229
1230 };
1231
1232 static bool check_mbus_joined(u8 active_pipes,
1233                               const struct dbuf_slice_conf_entry *dbuf_slices)
1234 {
1235         int i;
1236
1237         for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1238                 if (dbuf_slices[i].active_pipes == active_pipes)
1239                         return dbuf_slices[i].join_mbus;
1240         }
1241         return false;
1242 }
1243
1244 static bool adlp_check_mbus_joined(u8 active_pipes)
1245 {
1246         return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1247 }
1248
1249 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1250                               const struct dbuf_slice_conf_entry *dbuf_slices)
1251 {
1252         int i;
1253
1254         for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1255                 if (dbuf_slices[i].active_pipes == active_pipes &&
1256                     dbuf_slices[i].join_mbus == join_mbus)
1257                         return dbuf_slices[i].dbuf_mask[pipe];
1258         }
1259         return 0;
1260 }
1261
1262 /*
1263  * This function finds an entry with same enabled pipe configuration and
1264  * returns correspondent DBuf slice mask as stated in BSpec for particular
1265  * platform.
1266  */
1267 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1268 {
1269         /*
1270          * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1271          * required calculating "pipe ratio" in order to determine
1272          * if one or two slices can be used for single pipe configurations
1273          * as additional constraint to the existing table.
1274          * However based on recent info, it should be not "pipe ratio"
1275          * but rather ratio between pixel_rate and cdclk with additional
1276          * constants, so for now we are using only table until this is
1277          * clarified. Also this is the reason why crtc_state param is
1278          * still here - we will need it once those additional constraints
1279          * pop up.
1280          */
1281         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1282                                    icl_allowed_dbufs);
1283 }
1284
1285 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1286 {
1287         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1288                                    tgl_allowed_dbufs);
1289 }
1290
1291 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1292 {
1293         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1294                                    adlp_allowed_dbufs);
1295 }
1296
1297 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1298 {
1299         return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1300                                    dg2_allowed_dbufs);
1301 }
1302
1303 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1304 {
1305         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1306         enum pipe pipe = crtc->pipe;
1307
1308         if (IS_DG2(i915))
1309                 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1310         else if (DISPLAY_VER(i915) >= 13)
1311                 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1312         else if (DISPLAY_VER(i915) == 12)
1313                 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1314         else if (DISPLAY_VER(i915) == 11)
1315                 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1316         /*
1317          * For anything else just return one slice yet.
1318          * Should be extended for other platforms.
1319          */
1320         return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1321 }
1322
1323 static bool
1324 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1325                      struct intel_plane *plane)
1326 {
1327         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1328
1329         return DISPLAY_VER(i915) >= 13 &&
1330                crtc_state->uapi.async_flip &&
1331                plane->async_flip;
1332 }
1333
1334 static u64
1335 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1336 {
1337         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1338         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1339         enum plane_id plane_id;
1340         u64 data_rate = 0;
1341
1342         for_each_plane_id_on_crtc(crtc, plane_id) {
1343                 if (plane_id == PLANE_CURSOR)
1344                         continue;
1345
1346                 data_rate += crtc_state->rel_data_rate[plane_id];
1347
1348                 if (DISPLAY_VER(i915) < 11)
1349                         data_rate += crtc_state->rel_data_rate_y[plane_id];
1350         }
1351
1352         return data_rate;
1353 }
1354
1355 static const struct skl_wm_level *
1356 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1357                    enum plane_id plane_id,
1358                    int level)
1359 {
1360         const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1361
1362         if (level == 0 && pipe_wm->use_sagv_wm)
1363                 return &wm->sagv.wm0;
1364
1365         return &wm->wm[level];
1366 }
1367
1368 static const struct skl_wm_level *
1369 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1370                    enum plane_id plane_id)
1371 {
1372         const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1373
1374         if (pipe_wm->use_sagv_wm)
1375                 return &wm->sagv.trans_wm;
1376
1377         return &wm->trans_wm;
1378 }
1379
1380 /*
1381  * We only disable the watermarks for each plane if
1382  * they exceed the ddb allocation of said plane. This
1383  * is done so that we don't end up touching cursor
1384  * watermarks needlessly when some other plane reduces
1385  * our max possible watermark level.
1386  *
1387  * Bspec has this to say about the PLANE_WM enable bit:
1388  * "All the watermarks at this level for all enabled
1389  *  planes must be enabled before the level will be used."
1390  * So this is actually safe to do.
1391  */
1392 static void
1393 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1394 {
1395         if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1396                 memset(wm, 0, sizeof(*wm));
1397 }
1398
1399 static void
1400 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1401                         const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1402 {
1403         if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1404             uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1405                 memset(wm, 0, sizeof(*wm));
1406                 memset(uv_wm, 0, sizeof(*uv_wm));
1407         }
1408 }
1409
1410 static bool icl_need_wm1_wa(struct drm_i915_private *i915,
1411                             enum plane_id plane_id)
1412 {
1413         /*
1414          * Wa_1408961008:icl, ehl
1415          * Wa_14012656716:tgl, adl
1416          * Underruns with WM1+ disabled
1417          */
1418         return DISPLAY_VER(i915) == 11 ||
1419                (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
1420 }
1421
1422 struct skl_plane_ddb_iter {
1423         u64 data_rate;
1424         u16 start, size;
1425 };
1426
1427 static void
1428 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1429                        struct skl_ddb_entry *ddb,
1430                        const struct skl_wm_level *wm,
1431                        u64 data_rate)
1432 {
1433         u16 size, extra = 0;
1434
1435         if (data_rate) {
1436                 extra = min_t(u16, iter->size,
1437                               DIV64_U64_ROUND_UP(iter->size * data_rate,
1438                                                  iter->data_rate));
1439                 iter->size -= extra;
1440                 iter->data_rate -= data_rate;
1441         }
1442
1443         /*
1444          * Keep ddb entry of all disabled planes explicitly zeroed
1445          * to avoid skl_ddb_add_affected_planes() adding them to
1446          * the state when other planes change their allocations.
1447          */
1448         size = wm->min_ddb_alloc + extra;
1449         if (size)
1450                 iter->start = skl_ddb_entry_init(ddb, iter->start,
1451                                                  iter->start + size);
1452 }
1453
1454 static int
1455 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1456                             struct intel_crtc *crtc)
1457 {
1458         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1459         struct intel_crtc_state *crtc_state =
1460                 intel_atomic_get_new_crtc_state(state, crtc);
1461         const struct intel_dbuf_state *dbuf_state =
1462                 intel_atomic_get_new_dbuf_state(state);
1463         const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1464         int num_active = hweight8(dbuf_state->active_pipes);
1465         struct skl_plane_ddb_iter iter;
1466         enum plane_id plane_id;
1467         u16 cursor_size;
1468         u32 blocks;
1469         int level;
1470
1471         /* Clear the partitioning for disabled planes. */
1472         memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1473         memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1474
1475         if (!crtc_state->hw.active)
1476                 return 0;
1477
1478         iter.start = alloc->start;
1479         iter.size = skl_ddb_entry_size(alloc);
1480         if (iter.size == 0)
1481                 return 0;
1482
1483         /* Allocate fixed number of blocks for cursor. */
1484         cursor_size = skl_cursor_allocation(crtc_state, num_active);
1485         iter.size -= cursor_size;
1486         skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1487                            alloc->end - cursor_size, alloc->end);
1488
1489         iter.data_rate = skl_total_relative_data_rate(crtc_state);
1490
1491         /*
1492          * Find the highest watermark level for which we can satisfy the block
1493          * requirement of active planes.
1494          */
1495         for (level = ilk_wm_max_level(i915); level >= 0; level--) {
1496                 blocks = 0;
1497                 for_each_plane_id_on_crtc(crtc, plane_id) {
1498                         const struct skl_plane_wm *wm =
1499                                 &crtc_state->wm.skl.optimal.planes[plane_id];
1500
1501                         if (plane_id == PLANE_CURSOR) {
1502                                 const struct skl_ddb_entry *ddb =
1503                                         &crtc_state->wm.skl.plane_ddb[plane_id];
1504
1505                                 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1506                                         drm_WARN_ON(&i915->drm,
1507                                                     wm->wm[level].min_ddb_alloc != U16_MAX);
1508                                         blocks = U32_MAX;
1509                                         break;
1510                                 }
1511                                 continue;
1512                         }
1513
1514                         blocks += wm->wm[level].min_ddb_alloc;
1515                         blocks += wm->uv_wm[level].min_ddb_alloc;
1516                 }
1517
1518                 if (blocks <= iter.size) {
1519                         iter.size -= blocks;
1520                         break;
1521                 }
1522         }
1523
1524         if (level < 0) {
1525                 drm_dbg_kms(&i915->drm,
1526                             "Requested display configuration exceeds system DDB limitations");
1527                 drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1528                             blocks, iter.size);
1529                 return -EINVAL;
1530         }
1531
1532         /* avoid the WARN later when we don't allocate any extra DDB */
1533         if (iter.data_rate == 0)
1534                 iter.size = 0;
1535
1536         /*
1537          * Grant each plane the blocks it requires at the highest achievable
1538          * watermark level, plus an extra share of the leftover blocks
1539          * proportional to its relative data rate.
1540          */
1541         for_each_plane_id_on_crtc(crtc, plane_id) {
1542                 struct skl_ddb_entry *ddb =
1543                         &crtc_state->wm.skl.plane_ddb[plane_id];
1544                 struct skl_ddb_entry *ddb_y =
1545                         &crtc_state->wm.skl.plane_ddb_y[plane_id];
1546                 const struct skl_plane_wm *wm =
1547                         &crtc_state->wm.skl.optimal.planes[plane_id];
1548
1549                 if (plane_id == PLANE_CURSOR)
1550                         continue;
1551
1552                 if (DISPLAY_VER(i915) < 11 &&
1553                     crtc_state->nv12_planes & BIT(plane_id)) {
1554                         skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1555                                                crtc_state->rel_data_rate_y[plane_id]);
1556                         skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1557                                                crtc_state->rel_data_rate[plane_id]);
1558                 } else {
1559                         skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1560                                                crtc_state->rel_data_rate[plane_id]);
1561                 }
1562         }
1563         drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1564
1565         /*
1566          * When we calculated watermark values we didn't know how high
1567          * of a level we'd actually be able to hit, so we just marked
1568          * all levels as "enabled."  Go back now and disable the ones
1569          * that aren't actually possible.
1570          */
1571         for (level++; level <= ilk_wm_max_level(i915); level++) {
1572                 for_each_plane_id_on_crtc(crtc, plane_id) {
1573                         const struct skl_ddb_entry *ddb =
1574                                 &crtc_state->wm.skl.plane_ddb[plane_id];
1575                         const struct skl_ddb_entry *ddb_y =
1576                                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1577                         struct skl_plane_wm *wm =
1578                                 &crtc_state->wm.skl.optimal.planes[plane_id];
1579
1580                         if (DISPLAY_VER(i915) < 11 &&
1581                             crtc_state->nv12_planes & BIT(plane_id))
1582                                 skl_check_nv12_wm_level(&wm->wm[level],
1583                                                         &wm->uv_wm[level],
1584                                                         ddb_y, ddb);
1585                         else
1586                                 skl_check_wm_level(&wm->wm[level], ddb);
1587
1588                         if (icl_need_wm1_wa(i915, plane_id) &&
1589                             level == 1 && !wm->wm[level].enable &&
1590                             wm->wm[0].enable) {
1591                                 wm->wm[level].blocks = wm->wm[0].blocks;
1592                                 wm->wm[level].lines = wm->wm[0].lines;
1593                                 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
1594                         }
1595                 }
1596         }
1597
1598         /*
1599          * Go back and disable the transition and SAGV watermarks
1600          * if it turns out we don't have enough DDB blocks for them.
1601          */
1602         for_each_plane_id_on_crtc(crtc, plane_id) {
1603                 const struct skl_ddb_entry *ddb =
1604                         &crtc_state->wm.skl.plane_ddb[plane_id];
1605                 const struct skl_ddb_entry *ddb_y =
1606                         &crtc_state->wm.skl.plane_ddb_y[plane_id];
1607                 struct skl_plane_wm *wm =
1608                         &crtc_state->wm.skl.optimal.planes[plane_id];
1609
1610                 if (DISPLAY_VER(i915) < 11 &&
1611                     crtc_state->nv12_planes & BIT(plane_id)) {
1612                         skl_check_wm_level(&wm->trans_wm, ddb_y);
1613                 } else {
1614                         WARN_ON(skl_ddb_entry_size(ddb_y));
1615
1616                         skl_check_wm_level(&wm->trans_wm, ddb);
1617                 }
1618
1619                 skl_check_wm_level(&wm->sagv.wm0, ddb);
1620                 skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1621         }
1622
1623         return 0;
1624 }
1625
1626 /*
1627  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1628  * for the read latency) and cpp should always be <= 8, so that
1629  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1630  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1631  */
1632 static uint_fixed_16_16_t
1633 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1634                u8 cpp, u32 latency, u32 dbuf_block_size)
1635 {
1636         u32 wm_intermediate_val;
1637         uint_fixed_16_16_t ret;
1638
1639         if (latency == 0)
1640                 return FP_16_16_MAX;
1641
1642         wm_intermediate_val = latency * pixel_rate * cpp;
1643         ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1644
1645         if (DISPLAY_VER(i915) >= 10)
1646                 ret = add_fixed16_u32(ret, 1);
1647
1648         return ret;
1649 }
1650
1651 static uint_fixed_16_16_t
1652 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1653                uint_fixed_16_16_t plane_blocks_per_line)
1654 {
1655         u32 wm_intermediate_val;
1656         uint_fixed_16_16_t ret;
1657
1658         if (latency == 0)
1659                 return FP_16_16_MAX;
1660
1661         wm_intermediate_val = latency * pixel_rate;
1662         wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1663                                            pipe_htotal * 1000);
1664         ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1665         return ret;
1666 }
1667
1668 static uint_fixed_16_16_t
1669 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1670 {
1671         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1672         u32 pixel_rate;
1673         u32 crtc_htotal;
1674         uint_fixed_16_16_t linetime_us;
1675
1676         if (!crtc_state->hw.active)
1677                 return u32_to_fixed16(0);
1678
1679         pixel_rate = crtc_state->pixel_rate;
1680
1681         if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1682                 return u32_to_fixed16(0);
1683
1684         crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1685         linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1686
1687         return linetime_us;
1688 }
1689
1690 static int
1691 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1692                       int width, const struct drm_format_info *format,
1693                       u64 modifier, unsigned int rotation,
1694                       u32 plane_pixel_rate, struct skl_wm_params *wp,
1695                       int color_plane)
1696 {
1697         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1698         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1699         u32 interm_pbpl;
1700
1701         /* only planar format has two planes */
1702         if (color_plane == 1 &&
1703             !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1704                 drm_dbg_kms(&i915->drm,
1705                             "Non planar format have single plane\n");
1706                 return -EINVAL;
1707         }
1708
1709         wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1710         wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1711                 intel_fb_is_tiled_modifier(modifier);
1712         wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1713         wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1714
1715         wp->width = width;
1716         if (color_plane == 1 && wp->is_planar)
1717                 wp->width /= 2;
1718
1719         wp->cpp = format->cpp[color_plane];
1720         wp->plane_pixel_rate = plane_pixel_rate;
1721
1722         if (DISPLAY_VER(i915) >= 11 &&
1723             modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1724                 wp->dbuf_block_size = 256;
1725         else
1726                 wp->dbuf_block_size = 512;
1727
1728         if (drm_rotation_90_or_270(rotation)) {
1729                 switch (wp->cpp) {
1730                 case 1:
1731                         wp->y_min_scanlines = 16;
1732                         break;
1733                 case 2:
1734                         wp->y_min_scanlines = 8;
1735                         break;
1736                 case 4:
1737                         wp->y_min_scanlines = 4;
1738                         break;
1739                 default:
1740                         MISSING_CASE(wp->cpp);
1741                         return -EINVAL;
1742                 }
1743         } else {
1744                 wp->y_min_scanlines = 4;
1745         }
1746
1747         if (skl_needs_memory_bw_wa(i915))
1748                 wp->y_min_scanlines *= 2;
1749
1750         wp->plane_bytes_per_line = wp->width * wp->cpp;
1751         if (wp->y_tiled) {
1752                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1753                                            wp->y_min_scanlines,
1754                                            wp->dbuf_block_size);
1755
1756                 if (DISPLAY_VER(i915) >= 10)
1757                         interm_pbpl++;
1758
1759                 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1760                                                         wp->y_min_scanlines);
1761         } else {
1762                 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1763                                            wp->dbuf_block_size);
1764
1765                 if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1766                         interm_pbpl++;
1767
1768                 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1769         }
1770
1771         wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1772                                              wp->plane_blocks_per_line);
1773
1774         wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1775
1776         return 0;
1777 }
1778
1779 static int
1780 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1781                             const struct intel_plane_state *plane_state,
1782                             struct skl_wm_params *wp, int color_plane)
1783 {
1784         const struct drm_framebuffer *fb = plane_state->hw.fb;
1785         int width;
1786
1787         /*
1788          * Src coordinates are already rotated by 270 degrees for
1789          * the 90/270 degree plane rotation cases (to match the
1790          * GTT mapping), hence no need to account for rotation here.
1791          */
1792         width = drm_rect_width(&plane_state->uapi.src) >> 16;
1793
1794         return skl_compute_wm_params(crtc_state, width,
1795                                      fb->format, fb->modifier,
1796                                      plane_state->hw.rotation,
1797                                      intel_plane_pixel_rate(crtc_state, plane_state),
1798                                      wp, color_plane);
1799 }
1800
1801 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1802 {
1803         if (DISPLAY_VER(i915) >= 10)
1804                 return true;
1805
1806         /* The number of lines are ignored for the level 0 watermark. */
1807         return level > 0;
1808 }
1809
1810 static int skl_wm_max_lines(struct drm_i915_private *i915)
1811 {
1812         if (DISPLAY_VER(i915) >= 13)
1813                 return 255;
1814         else
1815                 return 31;
1816 }
1817
1818 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1819                                  struct intel_plane *plane,
1820                                  int level,
1821                                  unsigned int latency,
1822                                  const struct skl_wm_params *wp,
1823                                  const struct skl_wm_level *result_prev,
1824                                  struct skl_wm_level *result /* out */)
1825 {
1826         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1827         uint_fixed_16_16_t method1, method2;
1828         uint_fixed_16_16_t selected_result;
1829         u32 blocks, lines, min_ddb_alloc = 0;
1830
1831         if (latency == 0 ||
1832             (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1833                 /* reject it */
1834                 result->min_ddb_alloc = U16_MAX;
1835                 return;
1836         }
1837
1838         /*
1839          * WaIncreaseLatencyIPCEnabled: kbl,cfl
1840          * Display WA #1141: kbl,cfl
1841          */
1842         if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
1843             skl_watermark_ipc_enabled(i915))
1844                 latency += 4;
1845
1846         if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
1847                 latency += 15;
1848
1849         method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1850                                  wp->cpp, latency, wp->dbuf_block_size);
1851         method2 = skl_wm_method2(wp->plane_pixel_rate,
1852                                  crtc_state->hw.pipe_mode.crtc_htotal,
1853                                  latency,
1854                                  wp->plane_blocks_per_line);
1855
1856         if (wp->y_tiled) {
1857                 selected_result = max_fixed16(method2, wp->y_tile_minimum);
1858         } else {
1859                 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1860                      wp->dbuf_block_size < 1) &&
1861                      (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1862                         selected_result = method2;
1863                 } else if (latency >= wp->linetime_us) {
1864                         if (DISPLAY_VER(i915) == 9)
1865                                 selected_result = min_fixed16(method1, method2);
1866                         else
1867                                 selected_result = method2;
1868                 } else {
1869                         selected_result = method1;
1870                 }
1871         }
1872
1873         blocks = fixed16_to_u32_round_up(selected_result) + 1;
1874         /*
1875          * Lets have blocks at minimum equivalent to plane_blocks_per_line
1876          * as there will be at minimum one line for lines configuration. This
1877          * is a work around for FIFO underruns observed with resolutions like
1878          * 4k 60 Hz in single channel DRAM configurations.
1879          *
1880          * As per the Bspec 49325, if the ddb allocation can hold at least
1881          * one plane_blocks_per_line, we should have selected method2 in
1882          * the above logic. Assuming that modern versions have enough dbuf
1883          * and method2 guarantees blocks equivalent to at least 1 line,
1884          * select the blocks as plane_blocks_per_line.
1885          *
1886          * TODO: Revisit the logic when we have better understanding on DRAM
1887          * channels' impact on the level 0 memory latency and the relevant
1888          * wm calculations.
1889          */
1890         if (skl_wm_has_lines(i915, level))
1891                 blocks = max(blocks,
1892                              fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1893         lines = div_round_up_fixed16(selected_result,
1894                                      wp->plane_blocks_per_line);
1895
1896         if (DISPLAY_VER(i915) == 9) {
1897                 /* Display WA #1125: skl,bxt,kbl */
1898                 if (level == 0 && wp->rc_surface)
1899                         blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1900
1901                 /* Display WA #1126: skl,bxt,kbl */
1902                 if (level >= 1 && level <= 7) {
1903                         if (wp->y_tiled) {
1904                                 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1905                                 lines += wp->y_min_scanlines;
1906                         } else {
1907                                 blocks++;
1908                         }
1909
1910                         /*
1911                          * Make sure result blocks for higher latency levels are
1912                          * at least as high as level below the current level.
1913                          * Assumption in DDB algorithm optimization for special
1914                          * cases. Also covers Display WA #1125 for RC.
1915                          */
1916                         if (result_prev->blocks > blocks)
1917                                 blocks = result_prev->blocks;
1918                 }
1919         }
1920
1921         if (DISPLAY_VER(i915) >= 11) {
1922                 if (wp->y_tiled) {
1923                         int extra_lines;
1924
1925                         if (lines % wp->y_min_scanlines == 0)
1926                                 extra_lines = wp->y_min_scanlines;
1927                         else
1928                                 extra_lines = wp->y_min_scanlines * 2 -
1929                                         lines % wp->y_min_scanlines;
1930
1931                         min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1932                                                                  wp->plane_blocks_per_line);
1933                 } else {
1934                         min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1935                 }
1936         }
1937
1938         if (!skl_wm_has_lines(i915, level))
1939                 lines = 0;
1940
1941         if (lines > skl_wm_max_lines(i915)) {
1942                 /* reject it */
1943                 result->min_ddb_alloc = U16_MAX;
1944                 return;
1945         }
1946
1947         /*
1948          * If lines is valid, assume we can use this watermark level
1949          * for now.  We'll come back and disable it after we calculate the
1950          * DDB allocation if it turns out we don't actually have enough
1951          * blocks to satisfy it.
1952          */
1953         result->blocks = blocks;
1954         result->lines = lines;
1955         /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1956         result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1957         result->enable = true;
1958
1959         if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1960                 result->can_sagv = latency >= i915->display.sagv.block_time_us;
1961 }
1962
1963 static void
1964 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
1965                       struct intel_plane *plane,
1966                       const struct skl_wm_params *wm_params,
1967                       struct skl_wm_level *levels)
1968 {
1969         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1970         int level, max_level = ilk_wm_max_level(i915);
1971         struct skl_wm_level *result_prev = &levels[0];
1972
1973         for (level = 0; level <= max_level; level++) {
1974                 struct skl_wm_level *result = &levels[level];
1975                 unsigned int latency = i915->display.wm.skl_latency[level];
1976
1977                 skl_compute_plane_wm(crtc_state, plane, level, latency,
1978                                      wm_params, result_prev, result);
1979
1980                 result_prev = result;
1981         }
1982 }
1983
1984 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
1985                                 struct intel_plane *plane,
1986                                 const struct skl_wm_params *wm_params,
1987                                 struct skl_plane_wm *plane_wm)
1988 {
1989         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1990         struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
1991         struct skl_wm_level *levels = plane_wm->wm;
1992         unsigned int latency = 0;
1993
1994         if (i915->display.sagv.block_time_us)
1995                 latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
1996
1997         skl_compute_plane_wm(crtc_state, plane, 0, latency,
1998                              wm_params, &levels[0],
1999                              sagv_wm);
2000 }
2001
2002 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2003                                       struct skl_wm_level *trans_wm,
2004                                       const struct skl_wm_level *wm0,
2005                                       const struct skl_wm_params *wp)
2006 {
2007         u16 trans_min, trans_amount, trans_y_tile_min;
2008         u16 wm0_blocks, trans_offset, blocks;
2009
2010         /* Transition WM don't make any sense if ipc is disabled */
2011         if (!skl_watermark_ipc_enabled(i915))
2012                 return;
2013
2014         /*
2015          * WaDisableTWM:skl,kbl,cfl,bxt
2016          * Transition WM are not recommended by HW team for GEN9
2017          */
2018         if (DISPLAY_VER(i915) == 9)
2019                 return;
2020
2021         if (DISPLAY_VER(i915) >= 11)
2022                 trans_min = 4;
2023         else
2024                 trans_min = 14;
2025
2026         /* Display WA #1140: glk,cnl */
2027         if (DISPLAY_VER(i915) == 10)
2028                 trans_amount = 0;
2029         else
2030                 trans_amount = 10; /* This is configurable amount */
2031
2032         trans_offset = trans_min + trans_amount;
2033
2034         /*
2035          * The spec asks for Selected Result Blocks for wm0 (the real value),
2036          * not Result Blocks (the integer value). Pay attention to the capital
2037          * letters. The value wm_l0->blocks is actually Result Blocks, but
2038          * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2039          * and since we later will have to get the ceiling of the sum in the
2040          * transition watermarks calculation, we can just pretend Selected
2041          * Result Blocks is Result Blocks minus 1 and it should work for the
2042          * current platforms.
2043          */
2044         wm0_blocks = wm0->blocks - 1;
2045
2046         if (wp->y_tiled) {
2047                 trans_y_tile_min =
2048                         (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2049                 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2050         } else {
2051                 blocks = wm0_blocks + trans_offset;
2052         }
2053         blocks++;
2054
2055         /*
2056          * Just assume we can enable the transition watermark.  After
2057          * computing the DDB we'll come back and disable it if that
2058          * assumption turns out to be false.
2059          */
2060         trans_wm->blocks = blocks;
2061         trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2062         trans_wm->enable = true;
2063 }
2064
2065 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2066                                      const struct intel_plane_state *plane_state,
2067                                      struct intel_plane *plane, int color_plane)
2068 {
2069         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2070         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2071         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2072         struct skl_wm_params wm_params;
2073         int ret;
2074
2075         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2076                                           &wm_params, color_plane);
2077         if (ret)
2078                 return ret;
2079
2080         skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2081
2082         skl_compute_transition_wm(i915, &wm->trans_wm,
2083                                   &wm->wm[0], &wm_params);
2084
2085         if (DISPLAY_VER(i915) >= 12) {
2086                 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2087
2088                 skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2089                                           &wm->sagv.wm0, &wm_params);
2090         }
2091
2092         return 0;
2093 }
2094
2095 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2096                                  const struct intel_plane_state *plane_state,
2097                                  struct intel_plane *plane)
2098 {
2099         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2100         struct skl_wm_params wm_params;
2101         int ret;
2102
2103         wm->is_planar = true;
2104
2105         /* uv plane watermarks must also be validated for NV12/Planar */
2106         ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2107                                           &wm_params, 1);
2108         if (ret)
2109                 return ret;
2110
2111         skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2112
2113         return 0;
2114 }
2115
2116 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2117                               const struct intel_plane_state *plane_state)
2118 {
2119         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2120         enum plane_id plane_id = plane->id;
2121         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2122         const struct drm_framebuffer *fb = plane_state->hw.fb;
2123         int ret;
2124
2125         memset(wm, 0, sizeof(*wm));
2126
2127         if (!intel_wm_plane_visible(crtc_state, plane_state))
2128                 return 0;
2129
2130         ret = skl_build_plane_wm_single(crtc_state, plane_state,
2131                                         plane, 0);
2132         if (ret)
2133                 return ret;
2134
2135         if (fb->format->is_yuv && fb->format->num_planes > 1) {
2136                 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2137                                             plane);
2138                 if (ret)
2139                         return ret;
2140         }
2141
2142         return 0;
2143 }
2144
2145 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2146                               const struct intel_plane_state *plane_state)
2147 {
2148         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2149         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2150         enum plane_id plane_id = plane->id;
2151         struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2152         int ret;
2153
2154         /* Watermarks calculated in master */
2155         if (plane_state->planar_slave)
2156                 return 0;
2157
2158         memset(wm, 0, sizeof(*wm));
2159
2160         if (plane_state->planar_linked_plane) {
2161                 const struct drm_framebuffer *fb = plane_state->hw.fb;
2162
2163                 drm_WARN_ON(&i915->drm,
2164                             !intel_wm_plane_visible(crtc_state, plane_state));
2165                 drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2166                             fb->format->num_planes == 1);
2167
2168                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2169                                                 plane_state->planar_linked_plane, 0);
2170                 if (ret)
2171                         return ret;
2172
2173                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2174                                                 plane, 1);
2175                 if (ret)
2176                         return ret;
2177         } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2178                 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2179                                                 plane, 0);
2180                 if (ret)
2181                         return ret;
2182         }
2183
2184         return 0;
2185 }
2186
2187 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2188                              struct intel_crtc *crtc)
2189 {
2190         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2191         struct intel_crtc_state *crtc_state =
2192                 intel_atomic_get_new_crtc_state(state, crtc);
2193         const struct intel_plane_state *plane_state;
2194         struct intel_plane *plane;
2195         int ret, i;
2196
2197         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2198                 /*
2199                  * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2200                  * instead but we don't populate that correctly for NV12 Y
2201                  * planes so for now hack this.
2202                  */
2203                 if (plane->pipe != crtc->pipe)
2204                         continue;
2205
2206                 if (DISPLAY_VER(i915) >= 11)
2207                         ret = icl_build_plane_wm(crtc_state, plane_state);
2208                 else
2209                         ret = skl_build_plane_wm(crtc_state, plane_state);
2210                 if (ret)
2211                         return ret;
2212         }
2213
2214         crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2215
2216         return 0;
2217 }
2218
2219 static void skl_ddb_entry_write(struct drm_i915_private *i915,
2220                                 i915_reg_t reg,
2221                                 const struct skl_ddb_entry *entry)
2222 {
2223         if (entry->end)
2224                 intel_de_write_fw(i915, reg,
2225                                   PLANE_BUF_END(entry->end - 1) |
2226                                   PLANE_BUF_START(entry->start));
2227         else
2228                 intel_de_write_fw(i915, reg, 0);
2229 }
2230
2231 static void skl_write_wm_level(struct drm_i915_private *i915,
2232                                i915_reg_t reg,
2233                                const struct skl_wm_level *level)
2234 {
2235         u32 val = 0;
2236
2237         if (level->enable)
2238                 val |= PLANE_WM_EN;
2239         if (level->ignore_lines)
2240                 val |= PLANE_WM_IGNORE_LINES;
2241         val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
2242         val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
2243
2244         intel_de_write_fw(i915, reg, val);
2245 }
2246
2247 void skl_write_plane_wm(struct intel_plane *plane,
2248                         const struct intel_crtc_state *crtc_state)
2249 {
2250         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2251         int level, max_level = ilk_wm_max_level(i915);
2252         enum plane_id plane_id = plane->id;
2253         enum pipe pipe = plane->pipe;
2254         const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2255         const struct skl_ddb_entry *ddb =
2256                 &crtc_state->wm.skl.plane_ddb[plane_id];
2257         const struct skl_ddb_entry *ddb_y =
2258                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2259
2260         for (level = 0; level <= max_level; level++)
2261                 skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
2262                                    skl_plane_wm_level(pipe_wm, plane_id, level));
2263
2264         skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
2265                            skl_plane_trans_wm(pipe_wm, plane_id));
2266
2267         if (HAS_HW_SAGV_WM(i915)) {
2268                 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2269
2270                 skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
2271                                    &wm->sagv.wm0);
2272                 skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
2273                                    &wm->sagv.trans_wm);
2274         }
2275
2276         skl_ddb_entry_write(i915,
2277                             PLANE_BUF_CFG(pipe, plane_id), ddb);
2278
2279         if (DISPLAY_VER(i915) < 11)
2280                 skl_ddb_entry_write(i915,
2281                                     PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
2282 }
2283
2284 void skl_write_cursor_wm(struct intel_plane *plane,
2285                          const struct intel_crtc_state *crtc_state)
2286 {
2287         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2288         int level, max_level = ilk_wm_max_level(i915);
2289         enum plane_id plane_id = plane->id;
2290         enum pipe pipe = plane->pipe;
2291         const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2292         const struct skl_ddb_entry *ddb =
2293                 &crtc_state->wm.skl.plane_ddb[plane_id];
2294
2295         for (level = 0; level <= max_level; level++)
2296                 skl_write_wm_level(i915, CUR_WM(pipe, level),
2297                                    skl_plane_wm_level(pipe_wm, plane_id, level));
2298
2299         skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
2300                            skl_plane_trans_wm(pipe_wm, plane_id));
2301
2302         if (HAS_HW_SAGV_WM(i915)) {
2303                 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2304
2305                 skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
2306                                    &wm->sagv.wm0);
2307                 skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
2308                                    &wm->sagv.trans_wm);
2309         }
2310
2311         skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
2312 }
2313
2314 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2315                                 const struct skl_wm_level *l2)
2316 {
2317         return l1->enable == l2->enable &&
2318                 l1->ignore_lines == l2->ignore_lines &&
2319                 l1->lines == l2->lines &&
2320                 l1->blocks == l2->blocks;
2321 }
2322
2323 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2324                                 const struct skl_plane_wm *wm1,
2325                                 const struct skl_plane_wm *wm2)
2326 {
2327         int level, max_level = ilk_wm_max_level(i915);
2328
2329         for (level = 0; level <= max_level; level++) {
2330                 /*
2331                  * We don't check uv_wm as the hardware doesn't actually
2332                  * use it. It only gets used for calculating the required
2333                  * ddb allocation.
2334                  */
2335                 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2336                         return false;
2337         }
2338
2339         return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2340                 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2341                 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2342 }
2343
2344 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2345                                     const struct skl_ddb_entry *b)
2346 {
2347         return a->start < b->end && b->start < a->end;
2348 }
2349
2350 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2351                                 const struct skl_ddb_entry *b)
2352 {
2353         if (a->end && b->end) {
2354                 a->start = min(a->start, b->start);
2355                 a->end = max(a->end, b->end);
2356         } else if (b->end) {
2357                 a->start = b->start;
2358                 a->end = b->end;
2359         }
2360 }
2361
2362 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2363                                  const struct skl_ddb_entry *entries,
2364                                  int num_entries, int ignore_idx)
2365 {
2366         int i;
2367
2368         for (i = 0; i < num_entries; i++) {
2369                 if (i != ignore_idx &&
2370                     skl_ddb_entries_overlap(ddb, &entries[i]))
2371                         return true;
2372         }
2373
2374         return false;
2375 }
2376
2377 static int
2378 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
2379                             struct intel_crtc_state *new_crtc_state)
2380 {
2381         struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
2382         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2383         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2384         struct intel_plane *plane;
2385
2386         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2387                 struct intel_plane_state *plane_state;
2388                 enum plane_id plane_id = plane->id;
2389
2390                 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2391                                         &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2392                     skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2393                                         &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2394                         continue;
2395
2396                 plane_state = intel_atomic_get_plane_state(state, plane);
2397                 if (IS_ERR(plane_state))
2398                         return PTR_ERR(plane_state);
2399
2400                 new_crtc_state->update_planes |= BIT(plane_id);
2401         }
2402
2403         return 0;
2404 }
2405
2406 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2407 {
2408         struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2409         u8 enabled_slices;
2410         enum pipe pipe;
2411
2412         /*
2413          * FIXME: For now we always enable slice S1 as per
2414          * the Bspec display initialization sequence.
2415          */
2416         enabled_slices = BIT(DBUF_S1);
2417
2418         for_each_pipe(i915, pipe)
2419                 enabled_slices |= dbuf_state->slices[pipe];
2420
2421         return enabled_slices;
2422 }
2423
2424 static int
2425 skl_compute_ddb(struct intel_atomic_state *state)
2426 {
2427         struct drm_i915_private *i915 = to_i915(state->base.dev);
2428         const struct intel_dbuf_state *old_dbuf_state;
2429         struct intel_dbuf_state *new_dbuf_state = NULL;
2430         const struct intel_crtc_state *old_crtc_state;
2431         struct intel_crtc_state *new_crtc_state;
2432         struct intel_crtc *crtc;
2433         int ret, i;
2434
2435         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2436                 new_dbuf_state = intel_atomic_get_dbuf_state(state);
2437                 if (IS_ERR(new_dbuf_state))
2438                         return PTR_ERR(new_dbuf_state);
2439
2440                 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2441                 break;
2442         }
2443
2444         if (!new_dbuf_state)
2445                 return 0;
2446
2447         new_dbuf_state->active_pipes =
2448                 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2449
2450         if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2451                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2452                 if (ret)
2453                         return ret;
2454         }
2455
2456         if (HAS_MBUS_JOINING(i915))
2457                 new_dbuf_state->joined_mbus =
2458                         adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2459
2460         for_each_intel_crtc(&i915->drm, crtc) {
2461                 enum pipe pipe = crtc->pipe;
2462
2463                 new_dbuf_state->slices[pipe] =
2464                         skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2465                                                 new_dbuf_state->joined_mbus);
2466
2467                 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2468                         continue;
2469
2470                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2471                 if (ret)
2472                         return ret;
2473         }
2474
2475         new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2476
2477         if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2478             old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2479                 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2480                 if (ret)
2481                         return ret;
2482
2483                 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2484                         /* TODO: Implement vblank synchronized MBUS joining changes */
2485                         ret = intel_modeset_all_pipes(state, "MBUS joining change");
2486                         if (ret)
2487                                 return ret;
2488                 }
2489
2490                 drm_dbg_kms(&i915->drm,
2491                             "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2492                             old_dbuf_state->enabled_slices,
2493                             new_dbuf_state->enabled_slices,
2494                             INTEL_INFO(i915)->display.dbuf.slice_mask,
2495                             str_yes_no(old_dbuf_state->joined_mbus),
2496                             str_yes_no(new_dbuf_state->joined_mbus));
2497         }
2498
2499         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2500                 enum pipe pipe = crtc->pipe;
2501
2502                 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2503
2504                 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2505                         continue;
2506
2507                 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2508                 if (ret)
2509                         return ret;
2510         }
2511
2512         for_each_intel_crtc(&i915->drm, crtc) {
2513                 ret = skl_crtc_allocate_ddb(state, crtc);
2514                 if (ret)
2515                         return ret;
2516         }
2517
2518         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2519                                             new_crtc_state, i) {
2520                 ret = skl_crtc_allocate_plane_ddb(state, crtc);
2521                 if (ret)
2522                         return ret;
2523
2524                 ret = skl_ddb_add_affected_planes(old_crtc_state,
2525                                                   new_crtc_state);
2526                 if (ret)
2527                         return ret;
2528         }
2529
2530         return 0;
2531 }
2532
2533 static char enast(bool enable)
2534 {
2535         return enable ? '*' : ' ';
2536 }
2537
2538 static void
2539 skl_print_wm_changes(struct intel_atomic_state *state)
2540 {
2541         struct drm_i915_private *i915 = to_i915(state->base.dev);
2542         const struct intel_crtc_state *old_crtc_state;
2543         const struct intel_crtc_state *new_crtc_state;
2544         struct intel_plane *plane;
2545         struct intel_crtc *crtc;
2546         int i;
2547
2548         if (!drm_debug_enabled(DRM_UT_KMS))
2549                 return;
2550
2551         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2552                                             new_crtc_state, i) {
2553                 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2554
2555                 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2556                 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2557
2558                 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2559                         enum plane_id plane_id = plane->id;
2560                         const struct skl_ddb_entry *old, *new;
2561
2562                         old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2563                         new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2564
2565                         if (skl_ddb_entry_equal(old, new))
2566                                 continue;
2567
2568                         drm_dbg_kms(&i915->drm,
2569                                     "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2570                                     plane->base.base.id, plane->base.name,
2571                                     old->start, old->end, new->start, new->end,
2572                                     skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2573                 }
2574
2575                 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2576                         enum plane_id plane_id = plane->id;
2577                         const struct skl_plane_wm *old_wm, *new_wm;
2578
2579                         old_wm = &old_pipe_wm->planes[plane_id];
2580                         new_wm = &new_pipe_wm->planes[plane_id];
2581
2582                         if (skl_plane_wm_equals(i915, old_wm, new_wm))
2583                                 continue;
2584
2585                         drm_dbg_kms(&i915->drm,
2586                                     "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2587                                     " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2588                                     plane->base.base.id, plane->base.name,
2589                                     enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2590                                     enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2591                                     enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2592                                     enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2593                                     enast(old_wm->trans_wm.enable),
2594                                     enast(old_wm->sagv.wm0.enable),
2595                                     enast(old_wm->sagv.trans_wm.enable),
2596                                     enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2597                                     enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2598                                     enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2599                                     enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2600                                     enast(new_wm->trans_wm.enable),
2601                                     enast(new_wm->sagv.wm0.enable),
2602                                     enast(new_wm->sagv.trans_wm.enable));
2603
2604                         drm_dbg_kms(&i915->drm,
2605                                     "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2606                                       " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2607                                     plane->base.base.id, plane->base.name,
2608                                     enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2609                                     enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2610                                     enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2611                                     enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2612                                     enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2613                                     enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2614                                     enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2615                                     enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2616                                     enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2617                                     enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2618                                     enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2619                                     enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2620                                     enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2621                                     enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2622                                     enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2623                                     enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2624                                     enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2625                                     enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2626                                     enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2627                                     enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2628                                     enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2629                                     enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2630
2631                         drm_dbg_kms(&i915->drm,
2632                                     "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2633                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2634                                     plane->base.base.id, plane->base.name,
2635                                     old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2636                                     old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2637                                     old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2638                                     old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2639                                     old_wm->trans_wm.blocks,
2640                                     old_wm->sagv.wm0.blocks,
2641                                     old_wm->sagv.trans_wm.blocks,
2642                                     new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2643                                     new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2644                                     new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2645                                     new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2646                                     new_wm->trans_wm.blocks,
2647                                     new_wm->sagv.wm0.blocks,
2648                                     new_wm->sagv.trans_wm.blocks);
2649
2650                         drm_dbg_kms(&i915->drm,
2651                                     "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2652                                     " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2653                                     plane->base.base.id, plane->base.name,
2654                                     old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2655                                     old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2656                                     old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2657                                     old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2658                                     old_wm->trans_wm.min_ddb_alloc,
2659                                     old_wm->sagv.wm0.min_ddb_alloc,
2660                                     old_wm->sagv.trans_wm.min_ddb_alloc,
2661                                     new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2662                                     new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2663                                     new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2664                                     new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2665                                     new_wm->trans_wm.min_ddb_alloc,
2666                                     new_wm->sagv.wm0.min_ddb_alloc,
2667                                     new_wm->sagv.trans_wm.min_ddb_alloc);
2668                 }
2669         }
2670 }
2671
2672 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2673                                          const struct skl_pipe_wm *old_pipe_wm,
2674                                          const struct skl_pipe_wm *new_pipe_wm)
2675 {
2676         struct drm_i915_private *i915 = to_i915(plane->base.dev);
2677         int level, max_level = ilk_wm_max_level(i915);
2678
2679         for (level = 0; level <= max_level; level++) {
2680                 /*
2681                  * We don't check uv_wm as the hardware doesn't actually
2682                  * use it. It only gets used for calculating the required
2683                  * ddb allocation.
2684                  */
2685                 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2686                                          skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2687                         return false;
2688         }
2689
2690         if (HAS_HW_SAGV_WM(i915)) {
2691                 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2692                 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2693
2694                 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2695                     !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2696                         return false;
2697         }
2698
2699         return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2700                                    skl_plane_trans_wm(new_pipe_wm, plane->id));
2701 }
2702
2703 /*
2704  * To make sure the cursor watermark registers are always consistent
2705  * with our computed state the following scenario needs special
2706  * treatment:
2707  *
2708  * 1. enable cursor
2709  * 2. move cursor entirely offscreen
2710  * 3. disable cursor
2711  *
2712  * Step 2. does call .disable_plane() but does not zero the watermarks
2713  * (since we consider an offscreen cursor still active for the purposes
2714  * of watermarks). Step 3. would not normally call .disable_plane()
2715  * because the actual plane visibility isn't changing, and we don't
2716  * deallocate the cursor ddb until the pipe gets disabled. So we must
2717  * force step 3. to call .disable_plane() to update the watermark
2718  * registers properly.
2719  *
2720  * Other planes do not suffer from this issues as their watermarks are
2721  * calculated based on the actual plane visibility. The only time this
2722  * can trigger for the other planes is during the initial readout as the
2723  * default value of the watermarks registers is not zero.
2724  */
2725 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2726                                       struct intel_crtc *crtc)
2727 {
2728         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2729         const struct intel_crtc_state *old_crtc_state =
2730                 intel_atomic_get_old_crtc_state(state, crtc);
2731         struct intel_crtc_state *new_crtc_state =
2732                 intel_atomic_get_new_crtc_state(state, crtc);
2733         struct intel_plane *plane;
2734
2735         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2736                 struct intel_plane_state *plane_state;
2737                 enum plane_id plane_id = plane->id;
2738
2739                 /*
2740                  * Force a full wm update for every plane on modeset.
2741                  * Required because the reset value of the wm registers
2742                  * is non-zero, whereas we want all disabled planes to
2743                  * have zero watermarks. So if we turn off the relevant
2744                  * power well the hardware state will go out of sync
2745                  * with the software state.
2746                  */
2747                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
2748                     skl_plane_selected_wm_equals(plane,
2749                                                  &old_crtc_state->wm.skl.optimal,
2750                                                  &new_crtc_state->wm.skl.optimal))
2751                         continue;
2752
2753                 plane_state = intel_atomic_get_plane_state(state, plane);
2754                 if (IS_ERR(plane_state))
2755                         return PTR_ERR(plane_state);
2756
2757                 new_crtc_state->update_planes |= BIT(plane_id);
2758         }
2759
2760         return 0;
2761 }
2762
2763 static int
2764 skl_compute_wm(struct intel_atomic_state *state)
2765 {
2766         struct intel_crtc *crtc;
2767         struct intel_crtc_state *new_crtc_state;
2768         int ret, i;
2769
2770         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2771                 ret = skl_build_pipe_wm(state, crtc);
2772                 if (ret)
2773                         return ret;
2774         }
2775
2776         ret = skl_compute_ddb(state);
2777         if (ret)
2778                 return ret;
2779
2780         ret = intel_compute_sagv_mask(state);
2781         if (ret)
2782                 return ret;
2783
2784         /*
2785          * skl_compute_ddb() will have adjusted the final watermarks
2786          * based on how much ddb is available. Now we can actually
2787          * check if the final watermarks changed.
2788          */
2789         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2790                 ret = skl_wm_add_affected_planes(state, crtc);
2791                 if (ret)
2792                         return ret;
2793         }
2794
2795         skl_print_wm_changes(state);
2796
2797         return 0;
2798 }
2799
2800 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2801 {
2802         level->enable = val & PLANE_WM_EN;
2803         level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2804         level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2805         level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2806 }
2807
2808 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2809                                      struct skl_pipe_wm *out)
2810 {
2811         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2812         enum pipe pipe = crtc->pipe;
2813         int level, max_level;
2814         enum plane_id plane_id;
2815         u32 val;
2816
2817         max_level = ilk_wm_max_level(i915);
2818
2819         for_each_plane_id_on_crtc(crtc, plane_id) {
2820                 struct skl_plane_wm *wm = &out->planes[plane_id];
2821
2822                 for (level = 0; level <= max_level; level++) {
2823                         if (plane_id != PLANE_CURSOR)
2824                                 val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
2825                         else
2826                                 val = intel_de_read(i915, CUR_WM(pipe, level));
2827
2828                         skl_wm_level_from_reg_val(val, &wm->wm[level]);
2829                 }
2830
2831                 if (plane_id != PLANE_CURSOR)
2832                         val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
2833                 else
2834                         val = intel_de_read(i915, CUR_WM_TRANS(pipe));
2835
2836                 skl_wm_level_from_reg_val(val, &wm->trans_wm);
2837
2838                 if (HAS_HW_SAGV_WM(i915)) {
2839                         if (plane_id != PLANE_CURSOR)
2840                                 val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
2841                         else
2842                                 val = intel_de_read(i915, CUR_WM_SAGV(pipe));
2843
2844                         skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
2845
2846                         if (plane_id != PLANE_CURSOR)
2847                                 val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
2848                         else
2849                                 val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
2850
2851                         skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
2852                 } else if (DISPLAY_VER(i915) >= 12) {
2853                         wm->sagv.wm0 = wm->wm[0];
2854                         wm->sagv.trans_wm = wm->trans_wm;
2855                 }
2856         }
2857 }
2858
2859 void skl_wm_get_hw_state(struct drm_i915_private *i915)
2860 {
2861         struct intel_dbuf_state *dbuf_state =
2862                 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2863         struct intel_crtc *crtc;
2864
2865         if (HAS_MBUS_JOINING(i915))
2866                 dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
2867
2868         for_each_intel_crtc(&i915->drm, crtc) {
2869                 struct intel_crtc_state *crtc_state =
2870                         to_intel_crtc_state(crtc->base.state);
2871                 enum pipe pipe = crtc->pipe;
2872                 unsigned int mbus_offset;
2873                 enum plane_id plane_id;
2874                 u8 slices;
2875
2876                 memset(&crtc_state->wm.skl.optimal, 0,
2877                        sizeof(crtc_state->wm.skl.optimal));
2878                 if (crtc_state->hw.active)
2879                         skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
2880                 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
2881
2882                 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
2883
2884                 for_each_plane_id_on_crtc(crtc, plane_id) {
2885                         struct skl_ddb_entry *ddb =
2886                                 &crtc_state->wm.skl.plane_ddb[plane_id];
2887                         struct skl_ddb_entry *ddb_y =
2888                                 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2889
2890                         if (!crtc_state->hw.active)
2891                                 continue;
2892
2893                         skl_ddb_get_hw_plane_state(i915, crtc->pipe,
2894                                                    plane_id, ddb, ddb_y);
2895
2896                         skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
2897                         skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
2898                 }
2899
2900                 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
2901
2902                 /*
2903                  * Used for checking overlaps, so we need absolute
2904                  * offsets instead of MBUS relative offsets.
2905                  */
2906                 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2907                                                  dbuf_state->joined_mbus);
2908                 mbus_offset = mbus_ddb_offset(i915, slices);
2909                 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
2910                 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
2911
2912                 /* The slices actually used by the planes on the pipe */
2913                 dbuf_state->slices[pipe] =
2914                         skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
2915
2916                 drm_dbg_kms(&i915->drm,
2917                             "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
2918                             crtc->base.base.id, crtc->base.name,
2919                             dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
2920                             dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
2921                             str_yes_no(dbuf_state->joined_mbus));
2922         }
2923
2924         dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
2925 }
2926
2927 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
2928 {
2929         const struct intel_dbuf_state *dbuf_state =
2930                 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2931         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
2932         struct intel_crtc *crtc;
2933
2934         for_each_intel_crtc(&i915->drm, crtc) {
2935                 const struct intel_crtc_state *crtc_state =
2936                         to_intel_crtc_state(crtc->base.state);
2937
2938                 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
2939         }
2940
2941         for_each_intel_crtc(&i915->drm, crtc) {
2942                 const struct intel_crtc_state *crtc_state =
2943                         to_intel_crtc_state(crtc->base.state);
2944                 u8 slices;
2945
2946                 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2947                                                  dbuf_state->joined_mbus);
2948                 if (dbuf_state->slices[crtc->pipe] & ~slices)
2949                         return true;
2950
2951                 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
2952                                                 I915_MAX_PIPES, crtc->pipe))
2953                         return true;
2954         }
2955
2956         return false;
2957 }
2958
2959 void skl_wm_sanitize(struct drm_i915_private *i915)
2960 {
2961         struct intel_crtc *crtc;
2962
2963         /*
2964          * On TGL/RKL (at least) the BIOS likes to assign the planes
2965          * to the wrong DBUF slices. This will cause an infinite loop
2966          * in skl_commit_modeset_enables() as it can't find a way to
2967          * transition between the old bogus DBUF layout to the new
2968          * proper DBUF layout without DBUF allocation overlaps between
2969          * the planes (which cannot be allowed or else the hardware
2970          * may hang). If we detect a bogus DBUF layout just turn off
2971          * all the planes so that skl_commit_modeset_enables() can
2972          * simply ignore them.
2973          */
2974         if (!skl_dbuf_is_misconfigured(i915))
2975                 return;
2976
2977         drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
2978
2979         for_each_intel_crtc(&i915->drm, crtc) {
2980                 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2981                 const struct intel_plane_state *plane_state =
2982                         to_intel_plane_state(plane->base.state);
2983                 struct intel_crtc_state *crtc_state =
2984                         to_intel_crtc_state(crtc->base.state);
2985
2986                 if (plane_state->uapi.visible)
2987                         intel_plane_disable_noatomic(crtc, plane);
2988
2989                 drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
2990
2991                 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
2992         }
2993 }
2994
2995 void intel_wm_state_verify(struct intel_crtc *crtc,
2996                            struct intel_crtc_state *new_crtc_state)
2997 {
2998         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2999         struct skl_hw_state {
3000                 struct skl_ddb_entry ddb[I915_MAX_PLANES];
3001                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3002                 struct skl_pipe_wm wm;
3003         } *hw;
3004         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3005         int level, max_level = ilk_wm_max_level(i915);
3006         struct intel_plane *plane;
3007         u8 hw_enabled_slices;
3008
3009         if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3010                 return;
3011
3012         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3013         if (!hw)
3014                 return;
3015
3016         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3017
3018         skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3019
3020         hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3021
3022         if (DISPLAY_VER(i915) >= 11 &&
3023             hw_enabled_slices != i915->display.dbuf.enabled_slices)
3024                 drm_err(&i915->drm,
3025                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3026                         i915->display.dbuf.enabled_slices,
3027                         hw_enabled_slices);
3028
3029         for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3030                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3031                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3032
3033                 /* Watermarks */
3034                 for (level = 0; level <= max_level; level++) {
3035                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3036                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3037
3038                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3039                                 continue;
3040
3041                         drm_err(&i915->drm,
3042                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3043                                 plane->base.base.id, plane->base.name, level,
3044                                 sw_wm_level->enable,
3045                                 sw_wm_level->blocks,
3046                                 sw_wm_level->lines,
3047                                 hw_wm_level->enable,
3048                                 hw_wm_level->blocks,
3049                                 hw_wm_level->lines);
3050                 }
3051
3052                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3053                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3054
3055                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3056                         drm_err(&i915->drm,
3057                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3058                                 plane->base.base.id, plane->base.name,
3059                                 sw_wm_level->enable,
3060                                 sw_wm_level->blocks,
3061                                 sw_wm_level->lines,
3062                                 hw_wm_level->enable,
3063                                 hw_wm_level->blocks,
3064                                 hw_wm_level->lines);
3065                 }
3066
3067                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3068                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3069
3070                 if (HAS_HW_SAGV_WM(i915) &&
3071                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3072                         drm_err(&i915->drm,
3073                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3074                                 plane->base.base.id, plane->base.name,
3075                                 sw_wm_level->enable,
3076                                 sw_wm_level->blocks,
3077                                 sw_wm_level->lines,
3078                                 hw_wm_level->enable,
3079                                 hw_wm_level->blocks,
3080                                 hw_wm_level->lines);
3081                 }
3082
3083                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3084                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3085
3086                 if (HAS_HW_SAGV_WM(i915) &&
3087                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3088                         drm_err(&i915->drm,
3089                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3090                                 plane->base.base.id, plane->base.name,
3091                                 sw_wm_level->enable,
3092                                 sw_wm_level->blocks,
3093                                 sw_wm_level->lines,
3094                                 hw_wm_level->enable,
3095                                 hw_wm_level->blocks,
3096                                 hw_wm_level->lines);
3097                 }
3098
3099                 /* DDB */
3100                 hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3101                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3102
3103                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3104                         drm_err(&i915->drm,
3105                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3106                                 plane->base.base.id, plane->base.name,
3107                                 sw_ddb_entry->start, sw_ddb_entry->end,
3108                                 hw_ddb_entry->start, hw_ddb_entry->end);
3109                 }
3110         }
3111
3112         kfree(hw);
3113 }
3114
3115 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3116 {
3117         return i915->display.wm.ipc_enabled;
3118 }
3119
3120 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3121 {
3122         if (!HAS_IPC(i915))
3123                 return;
3124
3125         intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3126                      skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3127 }
3128
3129 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3130 {
3131         /* Display WA #0477 WaDisableIPC: skl */
3132         if (IS_SKYLAKE(i915))
3133                 return false;
3134
3135         /* Display WA #1141: SKL:all KBL:all CFL */
3136         if (IS_KABYLAKE(i915) ||
3137             IS_COFFEELAKE(i915) ||
3138             IS_COMETLAKE(i915))
3139                 return i915->dram_info.symmetric_memory;
3140
3141         return true;
3142 }
3143
3144 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3145 {
3146         if (!HAS_IPC(i915))
3147                 return;
3148
3149         i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3150
3151         skl_watermark_ipc_update(i915);
3152 }
3153
3154 static void
3155 adjust_wm_latency(struct drm_i915_private *i915,
3156                   u16 wm[], int max_level, int read_latency)
3157 {
3158         bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3159         int i, level;
3160
3161         /*
3162          * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3163          * need to be disabled. We make sure to sanitize the values out
3164          * of the punit to satisfy this requirement.
3165          */
3166         for (level = 1; level <= max_level; level++) {
3167                 if (wm[level] == 0) {
3168                         for (i = level + 1; i <= max_level; i++)
3169                                 wm[i] = 0;
3170
3171                         max_level = level - 1;
3172                         break;
3173                 }
3174         }
3175
3176         /*
3177          * WaWmMemoryReadLatency
3178          *
3179          * punit doesn't take into account the read latency so we need
3180          * to add proper adjustement to each valid level we retrieve
3181          * from the punit when level 0 response data is 0us.
3182          */
3183         if (wm[0] == 0) {
3184                 for (level = 0; level <= max_level; level++)
3185                         wm[level] += read_latency;
3186         }
3187
3188         /*
3189          * WA Level-0 adjustment for 16GB DIMMs: SKL+
3190          * If we could not get dimm info enable this WA to prevent from
3191          * any underrun. If not able to get Dimm info assume 16GB dimm
3192          * to avoid any underrun.
3193          */
3194         if (wm_lv_0_adjust_needed)
3195                 wm[0] += 1;
3196 }
3197
3198 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3199 {
3200         int max_level = ilk_wm_max_level(i915);
3201         u32 val;
3202
3203         val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3204         wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3205         wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3206
3207         val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3208         wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3209         wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3210
3211         val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3212         wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3213         wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3214
3215         adjust_wm_latency(i915, wm, max_level, 6);
3216 }
3217
3218 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3219 {
3220         int max_level = ilk_wm_max_level(i915);
3221         int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3222         int mult = IS_DG2(i915) ? 2 : 1;
3223         u32 val;
3224         int ret;
3225
3226         /* read the first set of memory latencies[0:3] */
3227         val = 0; /* data0 to be programmed to 0 for first set */
3228         ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3229         if (ret) {
3230                 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3231                 return;
3232         }
3233
3234         wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3235         wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3236         wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3237         wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3238
3239         /* read the second set of memory latencies[4:7] */
3240         val = 1; /* data0 to be programmed to 1 for second set */
3241         ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3242         if (ret) {
3243                 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3244                 return;
3245         }
3246
3247         wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3248         wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3249         wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3250         wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3251
3252         adjust_wm_latency(i915, wm, max_level, read_latency);
3253 }
3254
3255 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3256 {
3257         if (DISPLAY_VER(i915) >= 14)
3258                 mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3259         else
3260                 skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3261
3262         intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3263 }
3264
3265 static const struct intel_wm_funcs skl_wm_funcs = {
3266         .compute_global_watermarks = skl_compute_wm,
3267 };
3268
3269 void skl_wm_init(struct drm_i915_private *i915)
3270 {
3271         intel_sagv_init(i915);
3272
3273         skl_setup_wm_latency(i915);
3274
3275         i915->display.funcs.wm = &skl_wm_funcs;
3276 }
3277
3278 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3279 {
3280         struct intel_dbuf_state *dbuf_state;
3281
3282         dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3283         if (!dbuf_state)
3284                 return NULL;
3285
3286         return &dbuf_state->base;
3287 }
3288
3289 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3290                                      struct intel_global_state *state)
3291 {
3292         kfree(state);
3293 }
3294
3295 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3296         .atomic_duplicate_state = intel_dbuf_duplicate_state,
3297         .atomic_destroy_state = intel_dbuf_destroy_state,
3298 };
3299
3300 struct intel_dbuf_state *
3301 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3302 {
3303         struct drm_i915_private *i915 = to_i915(state->base.dev);
3304         struct intel_global_state *dbuf_state;
3305
3306         dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3307         if (IS_ERR(dbuf_state))
3308                 return ERR_CAST(dbuf_state);
3309
3310         return to_intel_dbuf_state(dbuf_state);
3311 }
3312
3313 int intel_dbuf_init(struct drm_i915_private *i915)
3314 {
3315         struct intel_dbuf_state *dbuf_state;
3316
3317         dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3318         if (!dbuf_state)
3319                 return -ENOMEM;
3320
3321         intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3322                                      &dbuf_state->base, &intel_dbuf_funcs);
3323
3324         return 0;
3325 }
3326
3327 /*
3328  * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
3329  * update the request state of all DBUS slices.
3330  */
3331 static void update_mbus_pre_enable(struct intel_atomic_state *state)
3332 {
3333         struct drm_i915_private *i915 = to_i915(state->base.dev);
3334         u32 mbus_ctl, dbuf_min_tracker_val;
3335         enum dbuf_slice slice;
3336         const struct intel_dbuf_state *dbuf_state =
3337                 intel_atomic_get_new_dbuf_state(state);
3338
3339         if (!HAS_MBUS_JOINING(i915))
3340                 return;
3341
3342         /*
3343          * TODO: Implement vblank synchronized MBUS joining changes.
3344          * Must be properly coordinated with dbuf reprogramming.
3345          */
3346         if (dbuf_state->joined_mbus) {
3347                 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
3348                         MBUS_JOIN_PIPE_SELECT_NONE;
3349                 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
3350         } else {
3351                 mbus_ctl = MBUS_HASHING_MODE_2x2 |
3352                         MBUS_JOIN_PIPE_SELECT_NONE;
3353                 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
3354         }
3355
3356         intel_de_rmw(i915, MBUS_CTL,
3357                      MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3358                      MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3359
3360         for_each_dbuf_slice(i915, slice)
3361                 intel_de_rmw(i915, DBUF_CTL_S(slice),
3362                              DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3363                              dbuf_min_tracker_val);
3364 }
3365
3366 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3367 {
3368         struct drm_i915_private *i915 = to_i915(state->base.dev);
3369         const struct intel_dbuf_state *new_dbuf_state =
3370                 intel_atomic_get_new_dbuf_state(state);
3371         const struct intel_dbuf_state *old_dbuf_state =
3372                 intel_atomic_get_old_dbuf_state(state);
3373
3374         if (!new_dbuf_state ||
3375             (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3376              new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3377                 return;
3378
3379         WARN_ON(!new_dbuf_state->base.changed);
3380
3381         update_mbus_pre_enable(state);
3382         gen9_dbuf_slices_update(i915,
3383                                 old_dbuf_state->enabled_slices |
3384                                 new_dbuf_state->enabled_slices);
3385 }
3386
3387 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3388 {
3389         struct drm_i915_private *i915 = to_i915(state->base.dev);
3390         const struct intel_dbuf_state *new_dbuf_state =
3391                 intel_atomic_get_new_dbuf_state(state);
3392         const struct intel_dbuf_state *old_dbuf_state =
3393                 intel_atomic_get_old_dbuf_state(state);
3394
3395         if (!new_dbuf_state ||
3396             (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3397              new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3398                 return;
3399
3400         WARN_ON(!new_dbuf_state->base.changed);
3401
3402         gen9_dbuf_slices_update(i915,
3403                                 new_dbuf_state->enabled_slices);
3404 }
3405
3406 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3407 {
3408         switch (pipe) {
3409         case PIPE_A:
3410                 return !(active_pipes & BIT(PIPE_D));
3411         case PIPE_D:
3412                 return !(active_pipes & BIT(PIPE_A));
3413         case PIPE_B:
3414                 return !(active_pipes & BIT(PIPE_C));
3415         case PIPE_C:
3416                 return !(active_pipes & BIT(PIPE_B));
3417         default: /* to suppress compiler warning */
3418                 MISSING_CASE(pipe);
3419                 break;
3420         }
3421
3422         return false;
3423 }
3424
3425 void intel_mbus_dbox_update(struct intel_atomic_state *state)
3426 {
3427         struct drm_i915_private *i915 = to_i915(state->base.dev);
3428         const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3429         const struct intel_crtc_state *new_crtc_state;
3430         const struct intel_crtc *crtc;
3431         u32 val = 0;
3432         int i;
3433
3434         if (DISPLAY_VER(i915) < 11)
3435                 return;
3436
3437         new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3438         old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3439         if (!new_dbuf_state ||
3440             (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3441              new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3442                 return;
3443
3444         if (DISPLAY_VER(i915) >= 14)
3445                 val |= MBUS_DBOX_I_CREDIT(2);
3446
3447         if (DISPLAY_VER(i915) >= 12) {
3448                 val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3449                 val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3450                 val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3451         }
3452
3453         if (DISPLAY_VER(i915) >= 14)
3454                 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3455                                                      MBUS_DBOX_A_CREDIT(8);
3456         else if (IS_ALDERLAKE_P(i915))
3457                 /* Wa_22010947358:adl-p */
3458                 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3459                                                      MBUS_DBOX_A_CREDIT(4);
3460         else
3461                 val |= MBUS_DBOX_A_CREDIT(2);
3462
3463         if (DISPLAY_VER(i915) >= 14) {
3464                 val |= MBUS_DBOX_B_CREDIT(0xA);
3465         } else if (IS_ALDERLAKE_P(i915)) {
3466                 val |= MBUS_DBOX_BW_CREDIT(2);
3467                 val |= MBUS_DBOX_B_CREDIT(8);
3468         } else if (DISPLAY_VER(i915) >= 12) {
3469                 val |= MBUS_DBOX_BW_CREDIT(2);
3470                 val |= MBUS_DBOX_B_CREDIT(12);
3471         } else {
3472                 val |= MBUS_DBOX_BW_CREDIT(1);
3473                 val |= MBUS_DBOX_B_CREDIT(8);
3474         }
3475
3476         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
3477                 u32 pipe_val = val;
3478
3479                 if (!new_crtc_state->hw.active)
3480                         continue;
3481
3482                 if (DISPLAY_VER(i915) >= 14) {
3483                         if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3484                                                               new_dbuf_state->active_pipes))
3485                                 pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3486                         else
3487                                 pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3488                 }
3489
3490                 intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3491         }
3492 }
3493
3494 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3495 {
3496         struct drm_i915_private *i915 = m->private;
3497
3498         seq_printf(m, "Isochronous Priority Control: %s\n",
3499                    str_yes_no(skl_watermark_ipc_enabled(i915)));
3500         return 0;
3501 }
3502
3503 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3504 {
3505         struct drm_i915_private *i915 = inode->i_private;
3506
3507         return single_open(file, skl_watermark_ipc_status_show, i915);
3508 }
3509
3510 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3511                                               const char __user *ubuf,
3512                                               size_t len, loff_t *offp)
3513 {
3514         struct seq_file *m = file->private_data;
3515         struct drm_i915_private *i915 = m->private;
3516         intel_wakeref_t wakeref;
3517         bool enable;
3518         int ret;
3519
3520         ret = kstrtobool_from_user(ubuf, len, &enable);
3521         if (ret < 0)
3522                 return ret;
3523
3524         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3525                 if (!skl_watermark_ipc_enabled(i915) && enable)
3526                         drm_info(&i915->drm,
3527                                  "Enabling IPC: WM will be proper only after next commit\n");
3528                 i915->display.wm.ipc_enabled = enable;
3529                 skl_watermark_ipc_update(i915);
3530         }
3531
3532         return len;
3533 }
3534
3535 static const struct file_operations skl_watermark_ipc_status_fops = {
3536         .owner = THIS_MODULE,
3537         .open = skl_watermark_ipc_status_open,
3538         .read = seq_read,
3539         .llseek = seq_lseek,
3540         .release = single_release,
3541         .write = skl_watermark_ipc_status_write
3542 };
3543
3544 void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
3545 {
3546         struct drm_minor *minor = i915->drm.primary;
3547
3548         if (!HAS_IPC(i915))
3549                 return;
3550
3551         debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3552                             &skl_watermark_ipc_status_fops);
3553 }