1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include <drm/drm_blend.h>
8 #include "intel_atomic.h"
9 #include "intel_atomic_plane.h"
12 #include "intel_display.h"
13 #include "intel_display_power.h"
14 #include "intel_display_types.h"
16 #include "skl_watermark.h"
19 #include "i915_fixed.h"
21 #include "intel_pcode.h"
24 static void skl_sagv_disable(struct drm_i915_private *i915);
26 /* Stores plane specific WM parameters */
27 struct skl_wm_params {
28 bool x_tiled, y_tiled;
35 u32 plane_bytes_per_line;
36 uint_fixed_16_16_t plane_blocks_per_line;
37 uint_fixed_16_16_t y_tile_minimum;
42 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
44 u8 enabled_slices = 0;
45 enum dbuf_slice slice;
47 for_each_dbuf_slice(i915, slice) {
48 if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
49 enabled_slices |= BIT(slice);
52 return enabled_slices;
56 * FIXME: We still don't have the proper code detect if we need to apply the WA,
57 * so assume we'll always need it in order to avoid underruns.
59 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
61 return DISPLAY_VER(i915) == 9;
65 intel_has_sagv(struct drm_i915_private *i915)
67 return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
68 i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
72 intel_sagv_block_time(struct drm_i915_private *i915)
74 if (DISPLAY_VER(i915) >= 14) {
77 val = intel_de_read(i915, MTL_LATENCY_SAGV);
79 return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
80 } else if (DISPLAY_VER(i915) >= 12) {
84 ret = snb_pcode_read(&i915->uncore,
85 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
88 drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
93 } else if (DISPLAY_VER(i915) == 11) {
95 } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
102 static void intel_sagv_init(struct drm_i915_private *i915)
104 if (!intel_has_sagv(i915))
105 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
108 * Probe to see if we have working SAGV control.
109 * For icl+ this was already determined by intel_bw_init_hw().
111 if (DISPLAY_VER(i915) < 11)
112 skl_sagv_disable(i915);
114 drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
116 i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
118 drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
119 str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
121 /* avoid overflow when adding with wm0 latency/etc. */
122 if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
123 "Excessive SAGV block time %u, ignoring\n",
124 i915->display.sagv.block_time_us))
125 i915->display.sagv.block_time_us = 0;
127 if (!intel_has_sagv(i915))
128 i915->display.sagv.block_time_us = 0;
132 * SAGV dynamically adjusts the system agent voltage and clock frequencies
133 * depending on power and performance requirements. The display engine access
134 * to system memory is blocked during the adjustment time. Because of the
135 * blocking time, having this enabled can cause full system hangs and/or pipe
136 * underruns if we don't meet all of the following requirements:
138 * - <= 1 pipe enabled
139 * - All planes can enable watermarks for latencies >= SAGV engine block time
140 * - We're not using an interlaced display configuration
142 static void skl_sagv_enable(struct drm_i915_private *i915)
146 if (!intel_has_sagv(i915))
149 if (i915->display.sagv.status == I915_SAGV_ENABLED)
152 drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
153 ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
156 /* We don't need to wait for SAGV when enabling */
159 * Some skl systems, pre-release machines in particular,
160 * don't actually have SAGV.
162 if (IS_SKYLAKE(i915) && ret == -ENXIO) {
163 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
164 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
166 } else if (ret < 0) {
167 drm_err(&i915->drm, "Failed to enable SAGV\n");
171 i915->display.sagv.status = I915_SAGV_ENABLED;
174 static void skl_sagv_disable(struct drm_i915_private *i915)
178 if (!intel_has_sagv(i915))
181 if (i915->display.sagv.status == I915_SAGV_DISABLED)
184 drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
185 /* bspec says to keep retrying for at least 1 ms */
186 ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
188 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
191 * Some skl systems, pre-release machines in particular,
192 * don't actually have SAGV.
194 if (IS_SKYLAKE(i915) && ret == -ENXIO) {
195 drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
196 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
198 } else if (ret < 0) {
199 drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
203 i915->display.sagv.status = I915_SAGV_DISABLED;
206 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
208 struct drm_i915_private *i915 = to_i915(state->base.dev);
209 const struct intel_bw_state *new_bw_state =
210 intel_atomic_get_new_bw_state(state);
215 if (!intel_can_enable_sagv(i915, new_bw_state))
216 skl_sagv_disable(i915);
219 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
221 struct drm_i915_private *i915 = to_i915(state->base.dev);
222 const struct intel_bw_state *new_bw_state =
223 intel_atomic_get_new_bw_state(state);
228 if (intel_can_enable_sagv(i915, new_bw_state))
229 skl_sagv_enable(i915);
232 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
234 struct drm_i915_private *i915 = to_i915(state->base.dev);
235 const struct intel_bw_state *old_bw_state =
236 intel_atomic_get_old_bw_state(state);
237 const struct intel_bw_state *new_bw_state =
238 intel_atomic_get_new_bw_state(state);
239 u16 old_mask, new_mask;
244 old_mask = old_bw_state->qgv_points_mask;
245 new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
247 if (old_mask == new_mask)
250 WARN_ON(!new_bw_state->base.changed);
252 drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
256 * Restrict required qgv points before updating the configuration.
257 * According to BSpec we can't mask and unmask qgv points at the same
258 * time. Also masking should be done before updating the configuration
259 * and unmasking afterwards.
261 icl_pcode_restrict_qgv_points(i915, new_mask);
264 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
266 struct drm_i915_private *i915 = to_i915(state->base.dev);
267 const struct intel_bw_state *old_bw_state =
268 intel_atomic_get_old_bw_state(state);
269 const struct intel_bw_state *new_bw_state =
270 intel_atomic_get_new_bw_state(state);
271 u16 old_mask, new_mask;
276 old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
277 new_mask = new_bw_state->qgv_points_mask;
279 if (old_mask == new_mask)
282 WARN_ON(!new_bw_state->base.changed);
284 drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
288 * Allow required qgv points after updating the configuration.
289 * According to BSpec we can't mask and unmask qgv points at the same
290 * time. Also masking should be done before updating the configuration
291 * and unmasking afterwards.
293 icl_pcode_restrict_qgv_points(i915, new_mask);
296 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
298 struct drm_i915_private *i915 = to_i915(state->base.dev);
301 * Just return if we can't control SAGV or don't have it.
302 * This is different from situation when we have SAGV but just can't
303 * afford it due to DBuf limitation - in case if SAGV is completely
304 * disabled in a BIOS, we are not even allowed to send a PCode request,
305 * as it will throw an error. So have to check it here.
307 if (!intel_has_sagv(i915))
310 if (DISPLAY_VER(i915) >= 11)
311 icl_sagv_pre_plane_update(state);
313 skl_sagv_pre_plane_update(state);
316 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
318 struct drm_i915_private *i915 = to_i915(state->base.dev);
321 * Just return if we can't control SAGV or don't have it.
322 * This is different from situation when we have SAGV but just can't
323 * afford it due to DBuf limitation - in case if SAGV is completely
324 * disabled in a BIOS, we are not even allowed to send a PCode request,
325 * as it will throw an error. So have to check it here.
327 if (!intel_has_sagv(i915))
330 if (DISPLAY_VER(i915) >= 11)
331 icl_sagv_post_plane_update(state);
333 skl_sagv_post_plane_update(state);
336 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
338 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
339 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
340 enum plane_id plane_id;
341 int max_level = INT_MAX;
343 if (!intel_has_sagv(i915))
346 if (!crtc_state->hw.active)
349 if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
352 for_each_plane_id_on_crtc(crtc, plane_id) {
353 const struct skl_plane_wm *wm =
354 &crtc_state->wm.skl.optimal.planes[plane_id];
357 /* Skip this plane if it's not enabled */
358 if (!wm->wm[0].enable)
361 /* Find the highest enabled wm level for this plane */
362 for (level = ilk_wm_max_level(i915);
363 !wm->wm[level].enable; --level)
366 /* Highest common enabled wm level for all planes */
367 max_level = min(level, max_level);
370 /* No enabled planes? */
371 if (max_level == INT_MAX)
374 for_each_plane_id_on_crtc(crtc, plane_id) {
375 const struct skl_plane_wm *wm =
376 &crtc_state->wm.skl.optimal.planes[plane_id];
379 * All enabled planes must have enabled a common wm level that
380 * can tolerate memory latencies higher than sagv_block_time_us
382 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
389 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
391 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
392 enum plane_id plane_id;
394 if (!crtc_state->hw.active)
397 for_each_plane_id_on_crtc(crtc, plane_id) {
398 const struct skl_plane_wm *wm =
399 &crtc_state->wm.skl.optimal.planes[plane_id];
401 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
408 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
410 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
411 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
413 if (DISPLAY_VER(i915) >= 12)
414 return tgl_crtc_can_enable_sagv(crtc_state);
416 return skl_crtc_can_enable_sagv(crtc_state);
419 bool intel_can_enable_sagv(struct drm_i915_private *i915,
420 const struct intel_bw_state *bw_state)
422 if (DISPLAY_VER(i915) < 11 &&
423 bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
426 return bw_state->pipe_sagv_reject == 0;
429 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
431 struct drm_i915_private *i915 = to_i915(state->base.dev);
433 struct intel_crtc *crtc;
434 struct intel_crtc_state *new_crtc_state;
435 struct intel_bw_state *new_bw_state = NULL;
436 const struct intel_bw_state *old_bw_state = NULL;
439 for_each_new_intel_crtc_in_state(state, crtc,
441 new_bw_state = intel_atomic_get_bw_state(state);
442 if (IS_ERR(new_bw_state))
443 return PTR_ERR(new_bw_state);
445 old_bw_state = intel_atomic_get_old_bw_state(state);
447 if (intel_crtc_can_enable_sagv(new_crtc_state))
448 new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
450 new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
456 new_bw_state->active_pipes =
457 intel_calc_active_pipes(state, old_bw_state->active_pipes);
459 if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
460 ret = intel_atomic_lock_global_state(&new_bw_state->base);
465 if (intel_can_enable_sagv(i915, new_bw_state) !=
466 intel_can_enable_sagv(i915, old_bw_state)) {
467 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
470 } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
471 ret = intel_atomic_lock_global_state(&new_bw_state->base);
476 for_each_new_intel_crtc_in_state(state, crtc,
478 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
481 * We store use_sagv_wm in the crtc state rather than relying on
482 * that bw state since we have no convenient way to get at the
483 * latter from the plane commit hooks (especially in the legacy
486 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
487 DISPLAY_VER(i915) >= 12 &&
488 intel_can_enable_sagv(i915, new_bw_state);
494 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
497 entry->start = start;
503 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
505 return INTEL_INFO(i915)->display.dbuf.size /
506 hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
510 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
511 struct skl_ddb_entry *ddb)
513 int slice_size = intel_dbuf_slice_size(i915);
521 ddb->start = (ffs(slice_mask) - 1) * slice_size;
522 ddb->end = fls(slice_mask) * slice_size;
524 WARN_ON(ddb->start >= ddb->end);
525 WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
528 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
530 struct skl_ddb_entry ddb;
532 if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
533 slice_mask = BIT(DBUF_S1);
534 else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
535 slice_mask = BIT(DBUF_S3);
537 skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
542 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
543 const struct skl_ddb_entry *entry)
545 int slice_size = intel_dbuf_slice_size(i915);
546 enum dbuf_slice start_slice, end_slice;
549 if (!skl_ddb_entry_size(entry))
552 start_slice = entry->start / slice_size;
553 end_slice = (entry->end - 1) / slice_size;
556 * Per plane DDB entry can in a really worst case be on multiple slices
557 * but single entry is anyway contigious.
559 while (start_slice <= end_slice) {
560 slice_mask |= BIT(start_slice);
567 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
569 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
570 int hdisplay, vdisplay;
572 if (!crtc_state->hw.active)
576 * Watermark/ddb requirement highly depends upon width of the
577 * framebuffer, So instead of allocating DDB equally among pipes
578 * distribute DDB based on resolution/width of the display.
580 drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
585 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
587 unsigned int *weight_start,
588 unsigned int *weight_end,
589 unsigned int *weight_total)
591 struct drm_i915_private *i915 =
592 to_i915(dbuf_state->base.state->base.dev);
599 for_each_pipe(i915, pipe) {
600 int weight = dbuf_state->weight[pipe];
603 * Do not account pipes using other slice sets
604 * luckily as of current BSpec slice sets do not partially
605 * intersect(pipes share either same one slice or same slice set
606 * i.e no partial intersection), so it is enough to check for
609 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
612 *weight_total += weight;
613 if (pipe < for_pipe) {
614 *weight_start += weight;
615 *weight_end += weight;
616 } else if (pipe == for_pipe) {
617 *weight_end += weight;
623 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
625 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
626 unsigned int weight_total, weight_start, weight_end;
627 const struct intel_dbuf_state *old_dbuf_state =
628 intel_atomic_get_old_dbuf_state(state);
629 struct intel_dbuf_state *new_dbuf_state =
630 intel_atomic_get_new_dbuf_state(state);
631 struct intel_crtc_state *crtc_state;
632 struct skl_ddb_entry ddb_slices;
633 enum pipe pipe = crtc->pipe;
634 unsigned int mbus_offset = 0;
640 if (new_dbuf_state->weight[pipe] == 0) {
641 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
645 dbuf_slice_mask = new_dbuf_state->slices[pipe];
647 skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
648 mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
649 ddb_range_size = skl_ddb_entry_size(&ddb_slices);
651 intel_crtc_dbuf_weights(new_dbuf_state, pipe,
652 &weight_start, &weight_end, &weight_total);
654 start = ddb_range_size * weight_start / weight_total;
655 end = ddb_range_size * weight_end / weight_total;
657 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
658 ddb_slices.start - mbus_offset + start,
659 ddb_slices.start - mbus_offset + end);
662 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
663 skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
664 &new_dbuf_state->ddb[pipe]))
667 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
671 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
672 if (IS_ERR(crtc_state))
673 return PTR_ERR(crtc_state);
676 * Used for checking overlaps, so we need absolute
677 * offsets instead of MBUS relative offsets.
679 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
680 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
682 drm_dbg_kms(&i915->drm,
683 "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
684 crtc->base.base.id, crtc->base.name,
685 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
686 old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
687 new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
688 old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
693 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
694 int width, const struct drm_format_info *format,
695 u64 modifier, unsigned int rotation,
696 u32 plane_pixel_rate, struct skl_wm_params *wp,
699 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
700 struct intel_plane *plane,
702 unsigned int latency,
703 const struct skl_wm_params *wp,
704 const struct skl_wm_level *result_prev,
705 struct skl_wm_level *result /* out */);
708 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
711 struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
712 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
713 int level, max_level = ilk_wm_max_level(i915);
714 struct skl_wm_level wm = {};
715 int ret, min_ddb_alloc = 0;
716 struct skl_wm_params wp;
718 ret = skl_compute_wm_params(crtc_state, 256,
719 drm_format_info(DRM_FORMAT_ARGB8888),
720 DRM_FORMAT_MOD_LINEAR,
722 crtc_state->pixel_rate, &wp, 0);
723 drm_WARN_ON(&i915->drm, ret);
725 for (level = 0; level <= max_level; level++) {
726 unsigned int latency = i915->display.wm.skl_latency[level];
728 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
729 if (wm.min_ddb_alloc == U16_MAX)
732 min_ddb_alloc = wm.min_ddb_alloc;
735 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
738 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
740 skl_ddb_entry_init(entry,
741 REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
742 REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
748 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
749 const enum pipe pipe,
750 const enum plane_id plane_id,
751 struct skl_ddb_entry *ddb,
752 struct skl_ddb_entry *ddb_y)
756 /* Cursor doesn't support NV12/planar, so no extra calculation needed */
757 if (plane_id == PLANE_CURSOR) {
758 val = intel_de_read(i915, CUR_BUF_CFG(pipe));
759 skl_ddb_entry_init_from_hw(ddb, val);
763 val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
764 skl_ddb_entry_init_from_hw(ddb, val);
766 if (DISPLAY_VER(i915) >= 11)
769 val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
770 skl_ddb_entry_init_from_hw(ddb_y, val);
773 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
774 struct skl_ddb_entry *ddb,
775 struct skl_ddb_entry *ddb_y)
777 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
778 enum intel_display_power_domain power_domain;
779 enum pipe pipe = crtc->pipe;
780 intel_wakeref_t wakeref;
781 enum plane_id plane_id;
783 power_domain = POWER_DOMAIN_PIPE(pipe);
784 wakeref = intel_display_power_get_if_enabled(i915, power_domain);
788 for_each_plane_id_on_crtc(crtc, plane_id)
789 skl_ddb_get_hw_plane_state(i915, pipe,
794 intel_display_power_put(i915, power_domain, wakeref);
797 struct dbuf_slice_conf_entry {
799 u8 dbuf_mask[I915_MAX_PIPES];
804 * Table taken from Bspec 12716
805 * Pipes do have some preferred DBuf slice affinity,
806 * plus there are some hardcoded requirements on how
807 * those should be distributed for multipipe scenarios.
808 * For more DBuf slices algorithm can get even more messy
809 * and less readable, so decided to use a table almost
810 * as is from BSpec itself - that way it is at least easier
811 * to compare, change and check.
813 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
814 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
817 .active_pipes = BIT(PIPE_A),
819 [PIPE_A] = BIT(DBUF_S1),
823 .active_pipes = BIT(PIPE_B),
825 [PIPE_B] = BIT(DBUF_S1),
829 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
831 [PIPE_A] = BIT(DBUF_S1),
832 [PIPE_B] = BIT(DBUF_S2),
836 .active_pipes = BIT(PIPE_C),
838 [PIPE_C] = BIT(DBUF_S2),
842 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
844 [PIPE_A] = BIT(DBUF_S1),
845 [PIPE_C] = BIT(DBUF_S2),
849 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
851 [PIPE_B] = BIT(DBUF_S1),
852 [PIPE_C] = BIT(DBUF_S2),
856 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
858 [PIPE_A] = BIT(DBUF_S1),
859 [PIPE_B] = BIT(DBUF_S1),
860 [PIPE_C] = BIT(DBUF_S2),
867 * Table taken from Bspec 49255
868 * Pipes do have some preferred DBuf slice affinity,
869 * plus there are some hardcoded requirements on how
870 * those should be distributed for multipipe scenarios.
871 * For more DBuf slices algorithm can get even more messy
872 * and less readable, so decided to use a table almost
873 * as is from BSpec itself - that way it is at least easier
874 * to compare, change and check.
876 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
877 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
880 .active_pipes = BIT(PIPE_A),
882 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
886 .active_pipes = BIT(PIPE_B),
888 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
892 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
894 [PIPE_A] = BIT(DBUF_S2),
895 [PIPE_B] = BIT(DBUF_S1),
899 .active_pipes = BIT(PIPE_C),
901 [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
905 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
907 [PIPE_A] = BIT(DBUF_S1),
908 [PIPE_C] = BIT(DBUF_S2),
912 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
914 [PIPE_B] = BIT(DBUF_S1),
915 [PIPE_C] = BIT(DBUF_S2),
919 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
921 [PIPE_A] = BIT(DBUF_S1),
922 [PIPE_B] = BIT(DBUF_S1),
923 [PIPE_C] = BIT(DBUF_S2),
927 .active_pipes = BIT(PIPE_D),
929 [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
933 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
935 [PIPE_A] = BIT(DBUF_S1),
936 [PIPE_D] = BIT(DBUF_S2),
940 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
942 [PIPE_B] = BIT(DBUF_S1),
943 [PIPE_D] = BIT(DBUF_S2),
947 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
949 [PIPE_A] = BIT(DBUF_S1),
950 [PIPE_B] = BIT(DBUF_S1),
951 [PIPE_D] = BIT(DBUF_S2),
955 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
957 [PIPE_C] = BIT(DBUF_S1),
958 [PIPE_D] = BIT(DBUF_S2),
962 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
964 [PIPE_A] = BIT(DBUF_S1),
965 [PIPE_C] = BIT(DBUF_S2),
966 [PIPE_D] = BIT(DBUF_S2),
970 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
972 [PIPE_B] = BIT(DBUF_S1),
973 [PIPE_C] = BIT(DBUF_S2),
974 [PIPE_D] = BIT(DBUF_S2),
978 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
980 [PIPE_A] = BIT(DBUF_S1),
981 [PIPE_B] = BIT(DBUF_S1),
982 [PIPE_C] = BIT(DBUF_S2),
983 [PIPE_D] = BIT(DBUF_S2),
989 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
991 .active_pipes = BIT(PIPE_A),
993 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
997 .active_pipes = BIT(PIPE_B),
999 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1003 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1005 [PIPE_A] = BIT(DBUF_S1),
1006 [PIPE_B] = BIT(DBUF_S2),
1010 .active_pipes = BIT(PIPE_C),
1012 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1016 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1018 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1019 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1023 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1025 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1026 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1030 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1032 [PIPE_A] = BIT(DBUF_S1),
1033 [PIPE_B] = BIT(DBUF_S2),
1034 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1038 .active_pipes = BIT(PIPE_D),
1040 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1044 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1046 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1047 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1051 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1053 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1054 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1058 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1060 [PIPE_A] = BIT(DBUF_S1),
1061 [PIPE_B] = BIT(DBUF_S2),
1062 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1066 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1068 [PIPE_C] = BIT(DBUF_S3),
1069 [PIPE_D] = BIT(DBUF_S4),
1073 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1075 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1076 [PIPE_C] = BIT(DBUF_S3),
1077 [PIPE_D] = BIT(DBUF_S4),
1081 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1083 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1084 [PIPE_C] = BIT(DBUF_S3),
1085 [PIPE_D] = BIT(DBUF_S4),
1089 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1091 [PIPE_A] = BIT(DBUF_S1),
1092 [PIPE_B] = BIT(DBUF_S2),
1093 [PIPE_C] = BIT(DBUF_S3),
1094 [PIPE_D] = BIT(DBUF_S4),
1100 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1102 * Keep the join_mbus cases first so check_mbus_joined()
1103 * will prefer them over the !join_mbus cases.
1106 .active_pipes = BIT(PIPE_A),
1108 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1113 .active_pipes = BIT(PIPE_B),
1115 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1120 .active_pipes = BIT(PIPE_A),
1122 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1127 .active_pipes = BIT(PIPE_B),
1129 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1134 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1136 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1137 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1141 .active_pipes = BIT(PIPE_C),
1143 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1147 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1149 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1150 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1154 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1156 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1157 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1161 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1163 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1164 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1165 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1169 .active_pipes = BIT(PIPE_D),
1171 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1175 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1177 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1178 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1182 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1184 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1185 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1189 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1191 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1192 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1193 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1197 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1199 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1200 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1204 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1206 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1207 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1208 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1212 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1214 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1215 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1216 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1220 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1222 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1223 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1224 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1225 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1232 static bool check_mbus_joined(u8 active_pipes,
1233 const struct dbuf_slice_conf_entry *dbuf_slices)
1237 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1238 if (dbuf_slices[i].active_pipes == active_pipes)
1239 return dbuf_slices[i].join_mbus;
1244 static bool adlp_check_mbus_joined(u8 active_pipes)
1246 return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1249 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1250 const struct dbuf_slice_conf_entry *dbuf_slices)
1254 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1255 if (dbuf_slices[i].active_pipes == active_pipes &&
1256 dbuf_slices[i].join_mbus == join_mbus)
1257 return dbuf_slices[i].dbuf_mask[pipe];
1263 * This function finds an entry with same enabled pipe configuration and
1264 * returns correspondent DBuf slice mask as stated in BSpec for particular
1267 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1270 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1271 * required calculating "pipe ratio" in order to determine
1272 * if one or two slices can be used for single pipe configurations
1273 * as additional constraint to the existing table.
1274 * However based on recent info, it should be not "pipe ratio"
1275 * but rather ratio between pixel_rate and cdclk with additional
1276 * constants, so for now we are using only table until this is
1277 * clarified. Also this is the reason why crtc_state param is
1278 * still here - we will need it once those additional constraints
1281 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1285 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1287 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1291 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1293 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1294 adlp_allowed_dbufs);
1297 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1299 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1303 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1305 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1306 enum pipe pipe = crtc->pipe;
1309 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1310 else if (DISPLAY_VER(i915) >= 13)
1311 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1312 else if (DISPLAY_VER(i915) == 12)
1313 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1314 else if (DISPLAY_VER(i915) == 11)
1315 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1317 * For anything else just return one slice yet.
1318 * Should be extended for other platforms.
1320 return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1324 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1325 struct intel_plane *plane)
1327 struct drm_i915_private *i915 = to_i915(plane->base.dev);
1329 return DISPLAY_VER(i915) >= 13 &&
1330 crtc_state->uapi.async_flip &&
1335 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1337 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1338 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1339 enum plane_id plane_id;
1342 for_each_plane_id_on_crtc(crtc, plane_id) {
1343 if (plane_id == PLANE_CURSOR)
1346 data_rate += crtc_state->rel_data_rate[plane_id];
1348 if (DISPLAY_VER(i915) < 11)
1349 data_rate += crtc_state->rel_data_rate_y[plane_id];
1355 static const struct skl_wm_level *
1356 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1357 enum plane_id plane_id,
1360 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1362 if (level == 0 && pipe_wm->use_sagv_wm)
1363 return &wm->sagv.wm0;
1365 return &wm->wm[level];
1368 static const struct skl_wm_level *
1369 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1370 enum plane_id plane_id)
1372 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1374 if (pipe_wm->use_sagv_wm)
1375 return &wm->sagv.trans_wm;
1377 return &wm->trans_wm;
1381 * We only disable the watermarks for each plane if
1382 * they exceed the ddb allocation of said plane. This
1383 * is done so that we don't end up touching cursor
1384 * watermarks needlessly when some other plane reduces
1385 * our max possible watermark level.
1387 * Bspec has this to say about the PLANE_WM enable bit:
1388 * "All the watermarks at this level for all enabled
1389 * planes must be enabled before the level will be used."
1390 * So this is actually safe to do.
1393 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1395 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1396 memset(wm, 0, sizeof(*wm));
1400 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1401 const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1403 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1404 uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1405 memset(wm, 0, sizeof(*wm));
1406 memset(uv_wm, 0, sizeof(*uv_wm));
1410 static bool icl_need_wm1_wa(struct drm_i915_private *i915,
1411 enum plane_id plane_id)
1414 * Wa_1408961008:icl, ehl
1415 * Wa_14012656716:tgl, adl
1416 * Underruns with WM1+ disabled
1418 return DISPLAY_VER(i915) == 11 ||
1419 (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
1422 struct skl_plane_ddb_iter {
1428 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1429 struct skl_ddb_entry *ddb,
1430 const struct skl_wm_level *wm,
1433 u16 size, extra = 0;
1436 extra = min_t(u16, iter->size,
1437 DIV64_U64_ROUND_UP(iter->size * data_rate,
1439 iter->size -= extra;
1440 iter->data_rate -= data_rate;
1444 * Keep ddb entry of all disabled planes explicitly zeroed
1445 * to avoid skl_ddb_add_affected_planes() adding them to
1446 * the state when other planes change their allocations.
1448 size = wm->min_ddb_alloc + extra;
1450 iter->start = skl_ddb_entry_init(ddb, iter->start,
1451 iter->start + size);
1455 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1456 struct intel_crtc *crtc)
1458 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1459 struct intel_crtc_state *crtc_state =
1460 intel_atomic_get_new_crtc_state(state, crtc);
1461 const struct intel_dbuf_state *dbuf_state =
1462 intel_atomic_get_new_dbuf_state(state);
1463 const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1464 int num_active = hweight8(dbuf_state->active_pipes);
1465 struct skl_plane_ddb_iter iter;
1466 enum plane_id plane_id;
1471 /* Clear the partitioning for disabled planes. */
1472 memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1473 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1475 if (!crtc_state->hw.active)
1478 iter.start = alloc->start;
1479 iter.size = skl_ddb_entry_size(alloc);
1483 /* Allocate fixed number of blocks for cursor. */
1484 cursor_size = skl_cursor_allocation(crtc_state, num_active);
1485 iter.size -= cursor_size;
1486 skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1487 alloc->end - cursor_size, alloc->end);
1489 iter.data_rate = skl_total_relative_data_rate(crtc_state);
1492 * Find the highest watermark level for which we can satisfy the block
1493 * requirement of active planes.
1495 for (level = ilk_wm_max_level(i915); level >= 0; level--) {
1497 for_each_plane_id_on_crtc(crtc, plane_id) {
1498 const struct skl_plane_wm *wm =
1499 &crtc_state->wm.skl.optimal.planes[plane_id];
1501 if (plane_id == PLANE_CURSOR) {
1502 const struct skl_ddb_entry *ddb =
1503 &crtc_state->wm.skl.plane_ddb[plane_id];
1505 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1506 drm_WARN_ON(&i915->drm,
1507 wm->wm[level].min_ddb_alloc != U16_MAX);
1514 blocks += wm->wm[level].min_ddb_alloc;
1515 blocks += wm->uv_wm[level].min_ddb_alloc;
1518 if (blocks <= iter.size) {
1519 iter.size -= blocks;
1525 drm_dbg_kms(&i915->drm,
1526 "Requested display configuration exceeds system DDB limitations");
1527 drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1532 /* avoid the WARN later when we don't allocate any extra DDB */
1533 if (iter.data_rate == 0)
1537 * Grant each plane the blocks it requires at the highest achievable
1538 * watermark level, plus an extra share of the leftover blocks
1539 * proportional to its relative data rate.
1541 for_each_plane_id_on_crtc(crtc, plane_id) {
1542 struct skl_ddb_entry *ddb =
1543 &crtc_state->wm.skl.plane_ddb[plane_id];
1544 struct skl_ddb_entry *ddb_y =
1545 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1546 const struct skl_plane_wm *wm =
1547 &crtc_state->wm.skl.optimal.planes[plane_id];
1549 if (plane_id == PLANE_CURSOR)
1552 if (DISPLAY_VER(i915) < 11 &&
1553 crtc_state->nv12_planes & BIT(plane_id)) {
1554 skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1555 crtc_state->rel_data_rate_y[plane_id]);
1556 skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1557 crtc_state->rel_data_rate[plane_id]);
1559 skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1560 crtc_state->rel_data_rate[plane_id]);
1563 drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1566 * When we calculated watermark values we didn't know how high
1567 * of a level we'd actually be able to hit, so we just marked
1568 * all levels as "enabled." Go back now and disable the ones
1569 * that aren't actually possible.
1571 for (level++; level <= ilk_wm_max_level(i915); level++) {
1572 for_each_plane_id_on_crtc(crtc, plane_id) {
1573 const struct skl_ddb_entry *ddb =
1574 &crtc_state->wm.skl.plane_ddb[plane_id];
1575 const struct skl_ddb_entry *ddb_y =
1576 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1577 struct skl_plane_wm *wm =
1578 &crtc_state->wm.skl.optimal.planes[plane_id];
1580 if (DISPLAY_VER(i915) < 11 &&
1581 crtc_state->nv12_planes & BIT(plane_id))
1582 skl_check_nv12_wm_level(&wm->wm[level],
1586 skl_check_wm_level(&wm->wm[level], ddb);
1588 if (icl_need_wm1_wa(i915, plane_id) &&
1589 level == 1 && !wm->wm[level].enable &&
1591 wm->wm[level].blocks = wm->wm[0].blocks;
1592 wm->wm[level].lines = wm->wm[0].lines;
1593 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
1599 * Go back and disable the transition and SAGV watermarks
1600 * if it turns out we don't have enough DDB blocks for them.
1602 for_each_plane_id_on_crtc(crtc, plane_id) {
1603 const struct skl_ddb_entry *ddb =
1604 &crtc_state->wm.skl.plane_ddb[plane_id];
1605 const struct skl_ddb_entry *ddb_y =
1606 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1607 struct skl_plane_wm *wm =
1608 &crtc_state->wm.skl.optimal.planes[plane_id];
1610 if (DISPLAY_VER(i915) < 11 &&
1611 crtc_state->nv12_planes & BIT(plane_id)) {
1612 skl_check_wm_level(&wm->trans_wm, ddb_y);
1614 WARN_ON(skl_ddb_entry_size(ddb_y));
1616 skl_check_wm_level(&wm->trans_wm, ddb);
1619 skl_check_wm_level(&wm->sagv.wm0, ddb);
1620 skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1627 * The max latency should be 257 (max the punit can code is 255 and we add 2us
1628 * for the read latency) and cpp should always be <= 8, so that
1629 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1630 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1632 static uint_fixed_16_16_t
1633 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1634 u8 cpp, u32 latency, u32 dbuf_block_size)
1636 u32 wm_intermediate_val;
1637 uint_fixed_16_16_t ret;
1640 return FP_16_16_MAX;
1642 wm_intermediate_val = latency * pixel_rate * cpp;
1643 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1645 if (DISPLAY_VER(i915) >= 10)
1646 ret = add_fixed16_u32(ret, 1);
1651 static uint_fixed_16_16_t
1652 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1653 uint_fixed_16_16_t plane_blocks_per_line)
1655 u32 wm_intermediate_val;
1656 uint_fixed_16_16_t ret;
1659 return FP_16_16_MAX;
1661 wm_intermediate_val = latency * pixel_rate;
1662 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1663 pipe_htotal * 1000);
1664 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1668 static uint_fixed_16_16_t
1669 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1671 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1674 uint_fixed_16_16_t linetime_us;
1676 if (!crtc_state->hw.active)
1677 return u32_to_fixed16(0);
1679 pixel_rate = crtc_state->pixel_rate;
1681 if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1682 return u32_to_fixed16(0);
1684 crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1685 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1691 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1692 int width, const struct drm_format_info *format,
1693 u64 modifier, unsigned int rotation,
1694 u32 plane_pixel_rate, struct skl_wm_params *wp,
1697 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1698 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1701 /* only planar format has two planes */
1702 if (color_plane == 1 &&
1703 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1704 drm_dbg_kms(&i915->drm,
1705 "Non planar format have single plane\n");
1709 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1710 wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1711 intel_fb_is_tiled_modifier(modifier);
1712 wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1713 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1716 if (color_plane == 1 && wp->is_planar)
1719 wp->cpp = format->cpp[color_plane];
1720 wp->plane_pixel_rate = plane_pixel_rate;
1722 if (DISPLAY_VER(i915) >= 11 &&
1723 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
1724 wp->dbuf_block_size = 256;
1726 wp->dbuf_block_size = 512;
1728 if (drm_rotation_90_or_270(rotation)) {
1731 wp->y_min_scanlines = 16;
1734 wp->y_min_scanlines = 8;
1737 wp->y_min_scanlines = 4;
1740 MISSING_CASE(wp->cpp);
1744 wp->y_min_scanlines = 4;
1747 if (skl_needs_memory_bw_wa(i915))
1748 wp->y_min_scanlines *= 2;
1750 wp->plane_bytes_per_line = wp->width * wp->cpp;
1752 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1753 wp->y_min_scanlines,
1754 wp->dbuf_block_size);
1756 if (DISPLAY_VER(i915) >= 10)
1759 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1760 wp->y_min_scanlines);
1762 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1763 wp->dbuf_block_size);
1765 if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1768 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1771 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1772 wp->plane_blocks_per_line);
1774 wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1780 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1781 const struct intel_plane_state *plane_state,
1782 struct skl_wm_params *wp, int color_plane)
1784 const struct drm_framebuffer *fb = plane_state->hw.fb;
1788 * Src coordinates are already rotated by 270 degrees for
1789 * the 90/270 degree plane rotation cases (to match the
1790 * GTT mapping), hence no need to account for rotation here.
1792 width = drm_rect_width(&plane_state->uapi.src) >> 16;
1794 return skl_compute_wm_params(crtc_state, width,
1795 fb->format, fb->modifier,
1796 plane_state->hw.rotation,
1797 intel_plane_pixel_rate(crtc_state, plane_state),
1801 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1803 if (DISPLAY_VER(i915) >= 10)
1806 /* The number of lines are ignored for the level 0 watermark. */
1810 static int skl_wm_max_lines(struct drm_i915_private *i915)
1812 if (DISPLAY_VER(i915) >= 13)
1818 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1819 struct intel_plane *plane,
1821 unsigned int latency,
1822 const struct skl_wm_params *wp,
1823 const struct skl_wm_level *result_prev,
1824 struct skl_wm_level *result /* out */)
1826 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1827 uint_fixed_16_16_t method1, method2;
1828 uint_fixed_16_16_t selected_result;
1829 u32 blocks, lines, min_ddb_alloc = 0;
1832 (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1834 result->min_ddb_alloc = U16_MAX;
1839 * WaIncreaseLatencyIPCEnabled: kbl,cfl
1840 * Display WA #1141: kbl,cfl
1842 if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
1843 skl_watermark_ipc_enabled(i915))
1846 if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
1849 method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1850 wp->cpp, latency, wp->dbuf_block_size);
1851 method2 = skl_wm_method2(wp->plane_pixel_rate,
1852 crtc_state->hw.pipe_mode.crtc_htotal,
1854 wp->plane_blocks_per_line);
1857 selected_result = max_fixed16(method2, wp->y_tile_minimum);
1859 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1860 wp->dbuf_block_size < 1) &&
1861 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1862 selected_result = method2;
1863 } else if (latency >= wp->linetime_us) {
1864 if (DISPLAY_VER(i915) == 9)
1865 selected_result = min_fixed16(method1, method2);
1867 selected_result = method2;
1869 selected_result = method1;
1873 blocks = fixed16_to_u32_round_up(selected_result) + 1;
1875 * Lets have blocks at minimum equivalent to plane_blocks_per_line
1876 * as there will be at minimum one line for lines configuration. This
1877 * is a work around for FIFO underruns observed with resolutions like
1878 * 4k 60 Hz in single channel DRAM configurations.
1880 * As per the Bspec 49325, if the ddb allocation can hold at least
1881 * one plane_blocks_per_line, we should have selected method2 in
1882 * the above logic. Assuming that modern versions have enough dbuf
1883 * and method2 guarantees blocks equivalent to at least 1 line,
1884 * select the blocks as plane_blocks_per_line.
1886 * TODO: Revisit the logic when we have better understanding on DRAM
1887 * channels' impact on the level 0 memory latency and the relevant
1890 if (skl_wm_has_lines(i915, level))
1891 blocks = max(blocks,
1892 fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1893 lines = div_round_up_fixed16(selected_result,
1894 wp->plane_blocks_per_line);
1896 if (DISPLAY_VER(i915) == 9) {
1897 /* Display WA #1125: skl,bxt,kbl */
1898 if (level == 0 && wp->rc_surface)
1899 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1901 /* Display WA #1126: skl,bxt,kbl */
1902 if (level >= 1 && level <= 7) {
1904 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1905 lines += wp->y_min_scanlines;
1911 * Make sure result blocks for higher latency levels are
1912 * at least as high as level below the current level.
1913 * Assumption in DDB algorithm optimization for special
1914 * cases. Also covers Display WA #1125 for RC.
1916 if (result_prev->blocks > blocks)
1917 blocks = result_prev->blocks;
1921 if (DISPLAY_VER(i915) >= 11) {
1925 if (lines % wp->y_min_scanlines == 0)
1926 extra_lines = wp->y_min_scanlines;
1928 extra_lines = wp->y_min_scanlines * 2 -
1929 lines % wp->y_min_scanlines;
1931 min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1932 wp->plane_blocks_per_line);
1934 min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1938 if (!skl_wm_has_lines(i915, level))
1941 if (lines > skl_wm_max_lines(i915)) {
1943 result->min_ddb_alloc = U16_MAX;
1948 * If lines is valid, assume we can use this watermark level
1949 * for now. We'll come back and disable it after we calculate the
1950 * DDB allocation if it turns out we don't actually have enough
1951 * blocks to satisfy it.
1953 result->blocks = blocks;
1954 result->lines = lines;
1955 /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1956 result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1957 result->enable = true;
1959 if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1960 result->can_sagv = latency >= i915->display.sagv.block_time_us;
1964 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
1965 struct intel_plane *plane,
1966 const struct skl_wm_params *wm_params,
1967 struct skl_wm_level *levels)
1969 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1970 int level, max_level = ilk_wm_max_level(i915);
1971 struct skl_wm_level *result_prev = &levels[0];
1973 for (level = 0; level <= max_level; level++) {
1974 struct skl_wm_level *result = &levels[level];
1975 unsigned int latency = i915->display.wm.skl_latency[level];
1977 skl_compute_plane_wm(crtc_state, plane, level, latency,
1978 wm_params, result_prev, result);
1980 result_prev = result;
1984 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
1985 struct intel_plane *plane,
1986 const struct skl_wm_params *wm_params,
1987 struct skl_plane_wm *plane_wm)
1989 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1990 struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
1991 struct skl_wm_level *levels = plane_wm->wm;
1992 unsigned int latency = 0;
1994 if (i915->display.sagv.block_time_us)
1995 latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
1997 skl_compute_plane_wm(crtc_state, plane, 0, latency,
1998 wm_params, &levels[0],
2002 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2003 struct skl_wm_level *trans_wm,
2004 const struct skl_wm_level *wm0,
2005 const struct skl_wm_params *wp)
2007 u16 trans_min, trans_amount, trans_y_tile_min;
2008 u16 wm0_blocks, trans_offset, blocks;
2010 /* Transition WM don't make any sense if ipc is disabled */
2011 if (!skl_watermark_ipc_enabled(i915))
2015 * WaDisableTWM:skl,kbl,cfl,bxt
2016 * Transition WM are not recommended by HW team for GEN9
2018 if (DISPLAY_VER(i915) == 9)
2021 if (DISPLAY_VER(i915) >= 11)
2026 /* Display WA #1140: glk,cnl */
2027 if (DISPLAY_VER(i915) == 10)
2030 trans_amount = 10; /* This is configurable amount */
2032 trans_offset = trans_min + trans_amount;
2035 * The spec asks for Selected Result Blocks for wm0 (the real value),
2036 * not Result Blocks (the integer value). Pay attention to the capital
2037 * letters. The value wm_l0->blocks is actually Result Blocks, but
2038 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2039 * and since we later will have to get the ceiling of the sum in the
2040 * transition watermarks calculation, we can just pretend Selected
2041 * Result Blocks is Result Blocks minus 1 and it should work for the
2042 * current platforms.
2044 wm0_blocks = wm0->blocks - 1;
2048 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2049 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2051 blocks = wm0_blocks + trans_offset;
2056 * Just assume we can enable the transition watermark. After
2057 * computing the DDB we'll come back and disable it if that
2058 * assumption turns out to be false.
2060 trans_wm->blocks = blocks;
2061 trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2062 trans_wm->enable = true;
2065 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2066 const struct intel_plane_state *plane_state,
2067 struct intel_plane *plane, int color_plane)
2069 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2070 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2071 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2072 struct skl_wm_params wm_params;
2075 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2076 &wm_params, color_plane);
2080 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2082 skl_compute_transition_wm(i915, &wm->trans_wm,
2083 &wm->wm[0], &wm_params);
2085 if (DISPLAY_VER(i915) >= 12) {
2086 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2088 skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2089 &wm->sagv.wm0, &wm_params);
2095 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2096 const struct intel_plane_state *plane_state,
2097 struct intel_plane *plane)
2099 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2100 struct skl_wm_params wm_params;
2103 wm->is_planar = true;
2105 /* uv plane watermarks must also be validated for NV12/Planar */
2106 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2111 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2116 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2117 const struct intel_plane_state *plane_state)
2119 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2120 enum plane_id plane_id = plane->id;
2121 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2122 const struct drm_framebuffer *fb = plane_state->hw.fb;
2125 memset(wm, 0, sizeof(*wm));
2127 if (!intel_wm_plane_visible(crtc_state, plane_state))
2130 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2135 if (fb->format->is_yuv && fb->format->num_planes > 1) {
2136 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2145 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2146 const struct intel_plane_state *plane_state)
2148 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2149 struct drm_i915_private *i915 = to_i915(plane->base.dev);
2150 enum plane_id plane_id = plane->id;
2151 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2154 /* Watermarks calculated in master */
2155 if (plane_state->planar_slave)
2158 memset(wm, 0, sizeof(*wm));
2160 if (plane_state->planar_linked_plane) {
2161 const struct drm_framebuffer *fb = plane_state->hw.fb;
2163 drm_WARN_ON(&i915->drm,
2164 !intel_wm_plane_visible(crtc_state, plane_state));
2165 drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2166 fb->format->num_planes == 1);
2168 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2169 plane_state->planar_linked_plane, 0);
2173 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2177 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2178 ret = skl_build_plane_wm_single(crtc_state, plane_state,
2187 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2188 struct intel_crtc *crtc)
2190 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2191 struct intel_crtc_state *crtc_state =
2192 intel_atomic_get_new_crtc_state(state, crtc);
2193 const struct intel_plane_state *plane_state;
2194 struct intel_plane *plane;
2197 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2199 * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2200 * instead but we don't populate that correctly for NV12 Y
2201 * planes so for now hack this.
2203 if (plane->pipe != crtc->pipe)
2206 if (DISPLAY_VER(i915) >= 11)
2207 ret = icl_build_plane_wm(crtc_state, plane_state);
2209 ret = skl_build_plane_wm(crtc_state, plane_state);
2214 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2219 static void skl_ddb_entry_write(struct drm_i915_private *i915,
2221 const struct skl_ddb_entry *entry)
2224 intel_de_write_fw(i915, reg,
2225 PLANE_BUF_END(entry->end - 1) |
2226 PLANE_BUF_START(entry->start));
2228 intel_de_write_fw(i915, reg, 0);
2231 static void skl_write_wm_level(struct drm_i915_private *i915,
2233 const struct skl_wm_level *level)
2239 if (level->ignore_lines)
2240 val |= PLANE_WM_IGNORE_LINES;
2241 val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
2242 val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
2244 intel_de_write_fw(i915, reg, val);
2247 void skl_write_plane_wm(struct intel_plane *plane,
2248 const struct intel_crtc_state *crtc_state)
2250 struct drm_i915_private *i915 = to_i915(plane->base.dev);
2251 int level, max_level = ilk_wm_max_level(i915);
2252 enum plane_id plane_id = plane->id;
2253 enum pipe pipe = plane->pipe;
2254 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2255 const struct skl_ddb_entry *ddb =
2256 &crtc_state->wm.skl.plane_ddb[plane_id];
2257 const struct skl_ddb_entry *ddb_y =
2258 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2260 for (level = 0; level <= max_level; level++)
2261 skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
2262 skl_plane_wm_level(pipe_wm, plane_id, level));
2264 skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
2265 skl_plane_trans_wm(pipe_wm, plane_id));
2267 if (HAS_HW_SAGV_WM(i915)) {
2268 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2270 skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
2272 skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
2273 &wm->sagv.trans_wm);
2276 skl_ddb_entry_write(i915,
2277 PLANE_BUF_CFG(pipe, plane_id), ddb);
2279 if (DISPLAY_VER(i915) < 11)
2280 skl_ddb_entry_write(i915,
2281 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
2284 void skl_write_cursor_wm(struct intel_plane *plane,
2285 const struct intel_crtc_state *crtc_state)
2287 struct drm_i915_private *i915 = to_i915(plane->base.dev);
2288 int level, max_level = ilk_wm_max_level(i915);
2289 enum plane_id plane_id = plane->id;
2290 enum pipe pipe = plane->pipe;
2291 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2292 const struct skl_ddb_entry *ddb =
2293 &crtc_state->wm.skl.plane_ddb[plane_id];
2295 for (level = 0; level <= max_level; level++)
2296 skl_write_wm_level(i915, CUR_WM(pipe, level),
2297 skl_plane_wm_level(pipe_wm, plane_id, level));
2299 skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
2300 skl_plane_trans_wm(pipe_wm, plane_id));
2302 if (HAS_HW_SAGV_WM(i915)) {
2303 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2305 skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
2307 skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
2308 &wm->sagv.trans_wm);
2311 skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
2314 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2315 const struct skl_wm_level *l2)
2317 return l1->enable == l2->enable &&
2318 l1->ignore_lines == l2->ignore_lines &&
2319 l1->lines == l2->lines &&
2320 l1->blocks == l2->blocks;
2323 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2324 const struct skl_plane_wm *wm1,
2325 const struct skl_plane_wm *wm2)
2327 int level, max_level = ilk_wm_max_level(i915);
2329 for (level = 0; level <= max_level; level++) {
2331 * We don't check uv_wm as the hardware doesn't actually
2332 * use it. It only gets used for calculating the required
2335 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2339 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2340 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2341 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2344 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2345 const struct skl_ddb_entry *b)
2347 return a->start < b->end && b->start < a->end;
2350 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2351 const struct skl_ddb_entry *b)
2353 if (a->end && b->end) {
2354 a->start = min(a->start, b->start);
2355 a->end = max(a->end, b->end);
2356 } else if (b->end) {
2357 a->start = b->start;
2362 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2363 const struct skl_ddb_entry *entries,
2364 int num_entries, int ignore_idx)
2368 for (i = 0; i < num_entries; i++) {
2369 if (i != ignore_idx &&
2370 skl_ddb_entries_overlap(ddb, &entries[i]))
2378 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
2379 struct intel_crtc_state *new_crtc_state)
2381 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
2382 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2383 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2384 struct intel_plane *plane;
2386 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2387 struct intel_plane_state *plane_state;
2388 enum plane_id plane_id = plane->id;
2390 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2391 &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2392 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2393 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2396 plane_state = intel_atomic_get_plane_state(state, plane);
2397 if (IS_ERR(plane_state))
2398 return PTR_ERR(plane_state);
2400 new_crtc_state->update_planes |= BIT(plane_id);
2406 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2408 struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2413 * FIXME: For now we always enable slice S1 as per
2414 * the Bspec display initialization sequence.
2416 enabled_slices = BIT(DBUF_S1);
2418 for_each_pipe(i915, pipe)
2419 enabled_slices |= dbuf_state->slices[pipe];
2421 return enabled_slices;
2425 skl_compute_ddb(struct intel_atomic_state *state)
2427 struct drm_i915_private *i915 = to_i915(state->base.dev);
2428 const struct intel_dbuf_state *old_dbuf_state;
2429 struct intel_dbuf_state *new_dbuf_state = NULL;
2430 const struct intel_crtc_state *old_crtc_state;
2431 struct intel_crtc_state *new_crtc_state;
2432 struct intel_crtc *crtc;
2435 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2436 new_dbuf_state = intel_atomic_get_dbuf_state(state);
2437 if (IS_ERR(new_dbuf_state))
2438 return PTR_ERR(new_dbuf_state);
2440 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2444 if (!new_dbuf_state)
2447 new_dbuf_state->active_pipes =
2448 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2450 if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2451 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2456 if (HAS_MBUS_JOINING(i915))
2457 new_dbuf_state->joined_mbus =
2458 adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2460 for_each_intel_crtc(&i915->drm, crtc) {
2461 enum pipe pipe = crtc->pipe;
2463 new_dbuf_state->slices[pipe] =
2464 skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2465 new_dbuf_state->joined_mbus);
2467 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2470 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2475 new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2477 if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2478 old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2479 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2483 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2484 /* TODO: Implement vblank synchronized MBUS joining changes */
2485 ret = intel_modeset_all_pipes(state, "MBUS joining change");
2490 drm_dbg_kms(&i915->drm,
2491 "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2492 old_dbuf_state->enabled_slices,
2493 new_dbuf_state->enabled_slices,
2494 INTEL_INFO(i915)->display.dbuf.slice_mask,
2495 str_yes_no(old_dbuf_state->joined_mbus),
2496 str_yes_no(new_dbuf_state->joined_mbus));
2499 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2500 enum pipe pipe = crtc->pipe;
2502 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2504 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2507 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2512 for_each_intel_crtc(&i915->drm, crtc) {
2513 ret = skl_crtc_allocate_ddb(state, crtc);
2518 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2519 new_crtc_state, i) {
2520 ret = skl_crtc_allocate_plane_ddb(state, crtc);
2524 ret = skl_ddb_add_affected_planes(old_crtc_state,
2533 static char enast(bool enable)
2535 return enable ? '*' : ' ';
2539 skl_print_wm_changes(struct intel_atomic_state *state)
2541 struct drm_i915_private *i915 = to_i915(state->base.dev);
2542 const struct intel_crtc_state *old_crtc_state;
2543 const struct intel_crtc_state *new_crtc_state;
2544 struct intel_plane *plane;
2545 struct intel_crtc *crtc;
2548 if (!drm_debug_enabled(DRM_UT_KMS))
2551 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2552 new_crtc_state, i) {
2553 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2555 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2556 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2558 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2559 enum plane_id plane_id = plane->id;
2560 const struct skl_ddb_entry *old, *new;
2562 old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2563 new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2565 if (skl_ddb_entry_equal(old, new))
2568 drm_dbg_kms(&i915->drm,
2569 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2570 plane->base.base.id, plane->base.name,
2571 old->start, old->end, new->start, new->end,
2572 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2575 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2576 enum plane_id plane_id = plane->id;
2577 const struct skl_plane_wm *old_wm, *new_wm;
2579 old_wm = &old_pipe_wm->planes[plane_id];
2580 new_wm = &new_pipe_wm->planes[plane_id];
2582 if (skl_plane_wm_equals(i915, old_wm, new_wm))
2585 drm_dbg_kms(&i915->drm,
2586 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2587 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2588 plane->base.base.id, plane->base.name,
2589 enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2590 enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2591 enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2592 enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2593 enast(old_wm->trans_wm.enable),
2594 enast(old_wm->sagv.wm0.enable),
2595 enast(old_wm->sagv.trans_wm.enable),
2596 enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2597 enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2598 enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2599 enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2600 enast(new_wm->trans_wm.enable),
2601 enast(new_wm->sagv.wm0.enable),
2602 enast(new_wm->sagv.trans_wm.enable));
2604 drm_dbg_kms(&i915->drm,
2605 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2606 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2607 plane->base.base.id, plane->base.name,
2608 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2609 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2610 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2611 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2612 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2613 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2614 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2615 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2616 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2617 enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2618 enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2619 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2620 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2621 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2622 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2623 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2624 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2625 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2626 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2627 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2628 enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2629 enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2631 drm_dbg_kms(&i915->drm,
2632 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2633 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2634 plane->base.base.id, plane->base.name,
2635 old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2636 old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2637 old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2638 old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2639 old_wm->trans_wm.blocks,
2640 old_wm->sagv.wm0.blocks,
2641 old_wm->sagv.trans_wm.blocks,
2642 new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2643 new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2644 new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2645 new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2646 new_wm->trans_wm.blocks,
2647 new_wm->sagv.wm0.blocks,
2648 new_wm->sagv.trans_wm.blocks);
2650 drm_dbg_kms(&i915->drm,
2651 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2652 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2653 plane->base.base.id, plane->base.name,
2654 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2655 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2656 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2657 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2658 old_wm->trans_wm.min_ddb_alloc,
2659 old_wm->sagv.wm0.min_ddb_alloc,
2660 old_wm->sagv.trans_wm.min_ddb_alloc,
2661 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2662 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2663 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2664 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2665 new_wm->trans_wm.min_ddb_alloc,
2666 new_wm->sagv.wm0.min_ddb_alloc,
2667 new_wm->sagv.trans_wm.min_ddb_alloc);
2672 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2673 const struct skl_pipe_wm *old_pipe_wm,
2674 const struct skl_pipe_wm *new_pipe_wm)
2676 struct drm_i915_private *i915 = to_i915(plane->base.dev);
2677 int level, max_level = ilk_wm_max_level(i915);
2679 for (level = 0; level <= max_level; level++) {
2681 * We don't check uv_wm as the hardware doesn't actually
2682 * use it. It only gets used for calculating the required
2685 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2686 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2690 if (HAS_HW_SAGV_WM(i915)) {
2691 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2692 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2694 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2695 !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2699 return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2700 skl_plane_trans_wm(new_pipe_wm, plane->id));
2704 * To make sure the cursor watermark registers are always consistent
2705 * with our computed state the following scenario needs special
2709 * 2. move cursor entirely offscreen
2712 * Step 2. does call .disable_plane() but does not zero the watermarks
2713 * (since we consider an offscreen cursor still active for the purposes
2714 * of watermarks). Step 3. would not normally call .disable_plane()
2715 * because the actual plane visibility isn't changing, and we don't
2716 * deallocate the cursor ddb until the pipe gets disabled. So we must
2717 * force step 3. to call .disable_plane() to update the watermark
2718 * registers properly.
2720 * Other planes do not suffer from this issues as their watermarks are
2721 * calculated based on the actual plane visibility. The only time this
2722 * can trigger for the other planes is during the initial readout as the
2723 * default value of the watermarks registers is not zero.
2725 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2726 struct intel_crtc *crtc)
2728 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2729 const struct intel_crtc_state *old_crtc_state =
2730 intel_atomic_get_old_crtc_state(state, crtc);
2731 struct intel_crtc_state *new_crtc_state =
2732 intel_atomic_get_new_crtc_state(state, crtc);
2733 struct intel_plane *plane;
2735 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2736 struct intel_plane_state *plane_state;
2737 enum plane_id plane_id = plane->id;
2740 * Force a full wm update for every plane on modeset.
2741 * Required because the reset value of the wm registers
2742 * is non-zero, whereas we want all disabled planes to
2743 * have zero watermarks. So if we turn off the relevant
2744 * power well the hardware state will go out of sync
2745 * with the software state.
2747 if (!intel_crtc_needs_modeset(new_crtc_state) &&
2748 skl_plane_selected_wm_equals(plane,
2749 &old_crtc_state->wm.skl.optimal,
2750 &new_crtc_state->wm.skl.optimal))
2753 plane_state = intel_atomic_get_plane_state(state, plane);
2754 if (IS_ERR(plane_state))
2755 return PTR_ERR(plane_state);
2757 new_crtc_state->update_planes |= BIT(plane_id);
2764 skl_compute_wm(struct intel_atomic_state *state)
2766 struct intel_crtc *crtc;
2767 struct intel_crtc_state *new_crtc_state;
2770 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2771 ret = skl_build_pipe_wm(state, crtc);
2776 ret = skl_compute_ddb(state);
2780 ret = intel_compute_sagv_mask(state);
2785 * skl_compute_ddb() will have adjusted the final watermarks
2786 * based on how much ddb is available. Now we can actually
2787 * check if the final watermarks changed.
2789 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2790 ret = skl_wm_add_affected_planes(state, crtc);
2795 skl_print_wm_changes(state);
2800 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2802 level->enable = val & PLANE_WM_EN;
2803 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2804 level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2805 level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2808 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2809 struct skl_pipe_wm *out)
2811 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2812 enum pipe pipe = crtc->pipe;
2813 int level, max_level;
2814 enum plane_id plane_id;
2817 max_level = ilk_wm_max_level(i915);
2819 for_each_plane_id_on_crtc(crtc, plane_id) {
2820 struct skl_plane_wm *wm = &out->planes[plane_id];
2822 for (level = 0; level <= max_level; level++) {
2823 if (plane_id != PLANE_CURSOR)
2824 val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
2826 val = intel_de_read(i915, CUR_WM(pipe, level));
2828 skl_wm_level_from_reg_val(val, &wm->wm[level]);
2831 if (plane_id != PLANE_CURSOR)
2832 val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
2834 val = intel_de_read(i915, CUR_WM_TRANS(pipe));
2836 skl_wm_level_from_reg_val(val, &wm->trans_wm);
2838 if (HAS_HW_SAGV_WM(i915)) {
2839 if (plane_id != PLANE_CURSOR)
2840 val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
2842 val = intel_de_read(i915, CUR_WM_SAGV(pipe));
2844 skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
2846 if (plane_id != PLANE_CURSOR)
2847 val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
2849 val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
2851 skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
2852 } else if (DISPLAY_VER(i915) >= 12) {
2853 wm->sagv.wm0 = wm->wm[0];
2854 wm->sagv.trans_wm = wm->trans_wm;
2859 void skl_wm_get_hw_state(struct drm_i915_private *i915)
2861 struct intel_dbuf_state *dbuf_state =
2862 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2863 struct intel_crtc *crtc;
2865 if (HAS_MBUS_JOINING(i915))
2866 dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
2868 for_each_intel_crtc(&i915->drm, crtc) {
2869 struct intel_crtc_state *crtc_state =
2870 to_intel_crtc_state(crtc->base.state);
2871 enum pipe pipe = crtc->pipe;
2872 unsigned int mbus_offset;
2873 enum plane_id plane_id;
2876 memset(&crtc_state->wm.skl.optimal, 0,
2877 sizeof(crtc_state->wm.skl.optimal));
2878 if (crtc_state->hw.active)
2879 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
2880 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
2882 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
2884 for_each_plane_id_on_crtc(crtc, plane_id) {
2885 struct skl_ddb_entry *ddb =
2886 &crtc_state->wm.skl.plane_ddb[plane_id];
2887 struct skl_ddb_entry *ddb_y =
2888 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2890 if (!crtc_state->hw.active)
2893 skl_ddb_get_hw_plane_state(i915, crtc->pipe,
2894 plane_id, ddb, ddb_y);
2896 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
2897 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
2900 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
2903 * Used for checking overlaps, so we need absolute
2904 * offsets instead of MBUS relative offsets.
2906 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2907 dbuf_state->joined_mbus);
2908 mbus_offset = mbus_ddb_offset(i915, slices);
2909 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
2910 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
2912 /* The slices actually used by the planes on the pipe */
2913 dbuf_state->slices[pipe] =
2914 skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
2916 drm_dbg_kms(&i915->drm,
2917 "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
2918 crtc->base.base.id, crtc->base.name,
2919 dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
2920 dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
2921 str_yes_no(dbuf_state->joined_mbus));
2924 dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
2927 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
2929 const struct intel_dbuf_state *dbuf_state =
2930 to_intel_dbuf_state(i915->display.dbuf.obj.state);
2931 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
2932 struct intel_crtc *crtc;
2934 for_each_intel_crtc(&i915->drm, crtc) {
2935 const struct intel_crtc_state *crtc_state =
2936 to_intel_crtc_state(crtc->base.state);
2938 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
2941 for_each_intel_crtc(&i915->drm, crtc) {
2942 const struct intel_crtc_state *crtc_state =
2943 to_intel_crtc_state(crtc->base.state);
2946 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2947 dbuf_state->joined_mbus);
2948 if (dbuf_state->slices[crtc->pipe] & ~slices)
2951 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
2952 I915_MAX_PIPES, crtc->pipe))
2959 void skl_wm_sanitize(struct drm_i915_private *i915)
2961 struct intel_crtc *crtc;
2964 * On TGL/RKL (at least) the BIOS likes to assign the planes
2965 * to the wrong DBUF slices. This will cause an infinite loop
2966 * in skl_commit_modeset_enables() as it can't find a way to
2967 * transition between the old bogus DBUF layout to the new
2968 * proper DBUF layout without DBUF allocation overlaps between
2969 * the planes (which cannot be allowed or else the hardware
2970 * may hang). If we detect a bogus DBUF layout just turn off
2971 * all the planes so that skl_commit_modeset_enables() can
2972 * simply ignore them.
2974 if (!skl_dbuf_is_misconfigured(i915))
2977 drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
2979 for_each_intel_crtc(&i915->drm, crtc) {
2980 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2981 const struct intel_plane_state *plane_state =
2982 to_intel_plane_state(plane->base.state);
2983 struct intel_crtc_state *crtc_state =
2984 to_intel_crtc_state(crtc->base.state);
2986 if (plane_state->uapi.visible)
2987 intel_plane_disable_noatomic(crtc, plane);
2989 drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
2991 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
2995 void intel_wm_state_verify(struct intel_crtc *crtc,
2996 struct intel_crtc_state *new_crtc_state)
2998 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2999 struct skl_hw_state {
3000 struct skl_ddb_entry ddb[I915_MAX_PLANES];
3001 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3002 struct skl_pipe_wm wm;
3004 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3005 int level, max_level = ilk_wm_max_level(i915);
3006 struct intel_plane *plane;
3007 u8 hw_enabled_slices;
3009 if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3012 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3016 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3018 skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3020 hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3022 if (DISPLAY_VER(i915) >= 11 &&
3023 hw_enabled_slices != i915->display.dbuf.enabled_slices)
3025 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3026 i915->display.dbuf.enabled_slices,
3029 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3030 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3031 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3034 for (level = 0; level <= max_level; level++) {
3035 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3036 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3038 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3042 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3043 plane->base.base.id, plane->base.name, level,
3044 sw_wm_level->enable,
3045 sw_wm_level->blocks,
3047 hw_wm_level->enable,
3048 hw_wm_level->blocks,
3049 hw_wm_level->lines);
3052 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3053 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3055 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3057 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3058 plane->base.base.id, plane->base.name,
3059 sw_wm_level->enable,
3060 sw_wm_level->blocks,
3062 hw_wm_level->enable,
3063 hw_wm_level->blocks,
3064 hw_wm_level->lines);
3067 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3068 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3070 if (HAS_HW_SAGV_WM(i915) &&
3071 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3073 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3074 plane->base.base.id, plane->base.name,
3075 sw_wm_level->enable,
3076 sw_wm_level->blocks,
3078 hw_wm_level->enable,
3079 hw_wm_level->blocks,
3080 hw_wm_level->lines);
3083 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3084 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3086 if (HAS_HW_SAGV_WM(i915) &&
3087 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3089 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3090 plane->base.base.id, plane->base.name,
3091 sw_wm_level->enable,
3092 sw_wm_level->blocks,
3094 hw_wm_level->enable,
3095 hw_wm_level->blocks,
3096 hw_wm_level->lines);
3100 hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3101 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3103 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3105 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3106 plane->base.base.id, plane->base.name,
3107 sw_ddb_entry->start, sw_ddb_entry->end,
3108 hw_ddb_entry->start, hw_ddb_entry->end);
3115 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3117 return i915->display.wm.ipc_enabled;
3120 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3125 intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3126 skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3129 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3131 /* Display WA #0477 WaDisableIPC: skl */
3132 if (IS_SKYLAKE(i915))
3135 /* Display WA #1141: SKL:all KBL:all CFL */
3136 if (IS_KABYLAKE(i915) ||
3137 IS_COFFEELAKE(i915) ||
3139 return i915->dram_info.symmetric_memory;
3144 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3149 i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3151 skl_watermark_ipc_update(i915);
3155 adjust_wm_latency(struct drm_i915_private *i915,
3156 u16 wm[], int max_level, int read_latency)
3158 bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3162 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3163 * need to be disabled. We make sure to sanitize the values out
3164 * of the punit to satisfy this requirement.
3166 for (level = 1; level <= max_level; level++) {
3167 if (wm[level] == 0) {
3168 for (i = level + 1; i <= max_level; i++)
3171 max_level = level - 1;
3177 * WaWmMemoryReadLatency
3179 * punit doesn't take into account the read latency so we need
3180 * to add proper adjustement to each valid level we retrieve
3181 * from the punit when level 0 response data is 0us.
3184 for (level = 0; level <= max_level; level++)
3185 wm[level] += read_latency;
3189 * WA Level-0 adjustment for 16GB DIMMs: SKL+
3190 * If we could not get dimm info enable this WA to prevent from
3191 * any underrun. If not able to get Dimm info assume 16GB dimm
3192 * to avoid any underrun.
3194 if (wm_lv_0_adjust_needed)
3198 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3200 int max_level = ilk_wm_max_level(i915);
3203 val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3204 wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3205 wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3207 val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3208 wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3209 wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3211 val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3212 wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3213 wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3215 adjust_wm_latency(i915, wm, max_level, 6);
3218 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3220 int max_level = ilk_wm_max_level(i915);
3221 int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3222 int mult = IS_DG2(i915) ? 2 : 1;
3226 /* read the first set of memory latencies[0:3] */
3227 val = 0; /* data0 to be programmed to 0 for first set */
3228 ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3230 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3234 wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3235 wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3236 wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3237 wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3239 /* read the second set of memory latencies[4:7] */
3240 val = 1; /* data0 to be programmed to 1 for second set */
3241 ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3243 drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3247 wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3248 wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3249 wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3250 wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3252 adjust_wm_latency(i915, wm, max_level, read_latency);
3255 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3257 if (DISPLAY_VER(i915) >= 14)
3258 mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3260 skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3262 intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3265 static const struct intel_wm_funcs skl_wm_funcs = {
3266 .compute_global_watermarks = skl_compute_wm,
3269 void skl_wm_init(struct drm_i915_private *i915)
3271 intel_sagv_init(i915);
3273 skl_setup_wm_latency(i915);
3275 i915->display.funcs.wm = &skl_wm_funcs;
3278 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3280 struct intel_dbuf_state *dbuf_state;
3282 dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3286 return &dbuf_state->base;
3289 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3290 struct intel_global_state *state)
3295 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3296 .atomic_duplicate_state = intel_dbuf_duplicate_state,
3297 .atomic_destroy_state = intel_dbuf_destroy_state,
3300 struct intel_dbuf_state *
3301 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3303 struct drm_i915_private *i915 = to_i915(state->base.dev);
3304 struct intel_global_state *dbuf_state;
3306 dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3307 if (IS_ERR(dbuf_state))
3308 return ERR_CAST(dbuf_state);
3310 return to_intel_dbuf_state(dbuf_state);
3313 int intel_dbuf_init(struct drm_i915_private *i915)
3315 struct intel_dbuf_state *dbuf_state;
3317 dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3321 intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3322 &dbuf_state->base, &intel_dbuf_funcs);
3328 * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
3329 * update the request state of all DBUS slices.
3331 static void update_mbus_pre_enable(struct intel_atomic_state *state)
3333 struct drm_i915_private *i915 = to_i915(state->base.dev);
3334 u32 mbus_ctl, dbuf_min_tracker_val;
3335 enum dbuf_slice slice;
3336 const struct intel_dbuf_state *dbuf_state =
3337 intel_atomic_get_new_dbuf_state(state);
3339 if (!HAS_MBUS_JOINING(i915))
3343 * TODO: Implement vblank synchronized MBUS joining changes.
3344 * Must be properly coordinated with dbuf reprogramming.
3346 if (dbuf_state->joined_mbus) {
3347 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
3348 MBUS_JOIN_PIPE_SELECT_NONE;
3349 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
3351 mbus_ctl = MBUS_HASHING_MODE_2x2 |
3352 MBUS_JOIN_PIPE_SELECT_NONE;
3353 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
3356 intel_de_rmw(i915, MBUS_CTL,
3357 MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3358 MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3360 for_each_dbuf_slice(i915, slice)
3361 intel_de_rmw(i915, DBUF_CTL_S(slice),
3362 DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3363 dbuf_min_tracker_val);
3366 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3368 struct drm_i915_private *i915 = to_i915(state->base.dev);
3369 const struct intel_dbuf_state *new_dbuf_state =
3370 intel_atomic_get_new_dbuf_state(state);
3371 const struct intel_dbuf_state *old_dbuf_state =
3372 intel_atomic_get_old_dbuf_state(state);
3374 if (!new_dbuf_state ||
3375 (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3376 new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3379 WARN_ON(!new_dbuf_state->base.changed);
3381 update_mbus_pre_enable(state);
3382 gen9_dbuf_slices_update(i915,
3383 old_dbuf_state->enabled_slices |
3384 new_dbuf_state->enabled_slices);
3387 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3389 struct drm_i915_private *i915 = to_i915(state->base.dev);
3390 const struct intel_dbuf_state *new_dbuf_state =
3391 intel_atomic_get_new_dbuf_state(state);
3392 const struct intel_dbuf_state *old_dbuf_state =
3393 intel_atomic_get_old_dbuf_state(state);
3395 if (!new_dbuf_state ||
3396 (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3397 new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3400 WARN_ON(!new_dbuf_state->base.changed);
3402 gen9_dbuf_slices_update(i915,
3403 new_dbuf_state->enabled_slices);
3406 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3410 return !(active_pipes & BIT(PIPE_D));
3412 return !(active_pipes & BIT(PIPE_A));
3414 return !(active_pipes & BIT(PIPE_C));
3416 return !(active_pipes & BIT(PIPE_B));
3417 default: /* to suppress compiler warning */
3425 void intel_mbus_dbox_update(struct intel_atomic_state *state)
3427 struct drm_i915_private *i915 = to_i915(state->base.dev);
3428 const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3429 const struct intel_crtc_state *new_crtc_state;
3430 const struct intel_crtc *crtc;
3434 if (DISPLAY_VER(i915) < 11)
3437 new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3438 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3439 if (!new_dbuf_state ||
3440 (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3441 new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3444 if (DISPLAY_VER(i915) >= 14)
3445 val |= MBUS_DBOX_I_CREDIT(2);
3447 if (DISPLAY_VER(i915) >= 12) {
3448 val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3449 val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3450 val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3453 if (DISPLAY_VER(i915) >= 14)
3454 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3455 MBUS_DBOX_A_CREDIT(8);
3456 else if (IS_ALDERLAKE_P(i915))
3457 /* Wa_22010947358:adl-p */
3458 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3459 MBUS_DBOX_A_CREDIT(4);
3461 val |= MBUS_DBOX_A_CREDIT(2);
3463 if (DISPLAY_VER(i915) >= 14) {
3464 val |= MBUS_DBOX_B_CREDIT(0xA);
3465 } else if (IS_ALDERLAKE_P(i915)) {
3466 val |= MBUS_DBOX_BW_CREDIT(2);
3467 val |= MBUS_DBOX_B_CREDIT(8);
3468 } else if (DISPLAY_VER(i915) >= 12) {
3469 val |= MBUS_DBOX_BW_CREDIT(2);
3470 val |= MBUS_DBOX_B_CREDIT(12);
3472 val |= MBUS_DBOX_BW_CREDIT(1);
3473 val |= MBUS_DBOX_B_CREDIT(8);
3476 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
3479 if (!new_crtc_state->hw.active)
3482 if (DISPLAY_VER(i915) >= 14) {
3483 if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3484 new_dbuf_state->active_pipes))
3485 pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3487 pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3490 intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3494 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3496 struct drm_i915_private *i915 = m->private;
3498 seq_printf(m, "Isochronous Priority Control: %s\n",
3499 str_yes_no(skl_watermark_ipc_enabled(i915)));
3503 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3505 struct drm_i915_private *i915 = inode->i_private;
3507 return single_open(file, skl_watermark_ipc_status_show, i915);
3510 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3511 const char __user *ubuf,
3512 size_t len, loff_t *offp)
3514 struct seq_file *m = file->private_data;
3515 struct drm_i915_private *i915 = m->private;
3516 intel_wakeref_t wakeref;
3520 ret = kstrtobool_from_user(ubuf, len, &enable);
3524 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3525 if (!skl_watermark_ipc_enabled(i915) && enable)
3526 drm_info(&i915->drm,
3527 "Enabling IPC: WM will be proper only after next commit\n");
3528 i915->display.wm.ipc_enabled = enable;
3529 skl_watermark_ipc_update(i915);
3535 static const struct file_operations skl_watermark_ipc_status_fops = {
3536 .owner = THIS_MODULE,
3537 .open = skl_watermark_ipc_status_open,
3539 .llseek = seq_lseek,
3540 .release = single_release,
3541 .write = skl_watermark_ipc_status_write
3544 void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
3546 struct drm_minor *minor = i915->drm.primary;
3551 debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3552 &skl_watermark_ipc_status_fops);