2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
43 * Display PLLs used for driving outputs vary by platform. While some have
44 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45 * from a pool. In the latter scenario, it is possible that multiple pipes
46 * share a PLL if their configurations match.
48 * This file provides an abstraction over display PLLs. The function
49 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
50 * users of a PLL are tracked and that tracking is integrated with the atomic
51 * modset interface. During an atomic operation, required PLLs can be reserved
52 * for a given CRTC and encoder configuration by calling
53 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54 * with intel_release_shared_dplls().
55 * Changes to the users are first staged in the atomic state, and then made
56 * effective by calling intel_shared_dpll_swap_state() during the atomic
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
63 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 * the pll is not already enabled.
66 void (*enable)(struct drm_i915_private *i915,
67 struct intel_shared_dpll *pll);
70 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 * only when it is safe to disable the pll, i.e., there are no more
72 * tracked users for it.
74 void (*disable)(struct drm_i915_private *i915,
75 struct intel_shared_dpll *pll);
78 * Hook for reading the values currently programmed to the DPLL
79 * registers. This is used for initial hw state readout and state
80 * verification after a mode set.
82 bool (*get_hw_state)(struct drm_i915_private *i915,
83 struct intel_shared_dpll *pll,
84 struct intel_dpll_hw_state *hw_state);
87 * Hook for calculating the pll's output frequency based on its passed
90 int (*get_freq)(struct drm_i915_private *i915,
91 const struct intel_shared_dpll *pll,
92 const struct intel_dpll_hw_state *pll_state);
95 struct intel_dpll_mgr {
96 const struct dpll_info *dpll_info;
98 int (*compute_dplls)(struct intel_atomic_state *state,
99 struct intel_crtc *crtc,
100 struct intel_encoder *encoder);
101 int (*get_dplls)(struct intel_atomic_state *state,
102 struct intel_crtc *crtc,
103 struct intel_encoder *encoder);
104 void (*put_dplls)(struct intel_atomic_state *state,
105 struct intel_crtc *crtc);
106 void (*update_active_dpll)(struct intel_atomic_state *state,
107 struct intel_crtc *crtc,
108 struct intel_encoder *encoder);
109 void (*update_ref_clks)(struct drm_i915_private *i915);
110 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
111 const struct intel_dpll_hw_state *hw_state);
115 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
116 struct intel_shared_dpll_state *shared_dpll)
118 enum intel_dpll_id i;
120 /* Copy shared dpll state */
121 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
122 struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
124 shared_dpll[i] = pll->state;
128 static struct intel_shared_dpll_state *
129 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
131 struct intel_atomic_state *state = to_intel_atomic_state(s);
133 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
135 if (!state->dpll_set) {
136 state->dpll_set = true;
138 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
142 return state->shared_dpll;
146 * intel_get_shared_dpll_by_id - get a DPLL given its id
147 * @dev_priv: i915 device instance
151 * A pointer to the DPLL with @id
153 struct intel_shared_dpll *
154 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
155 enum intel_dpll_id id)
157 return &dev_priv->display.dpll.shared_dplls[id];
161 void assert_shared_dpll(struct drm_i915_private *dev_priv,
162 struct intel_shared_dpll *pll,
166 struct intel_dpll_hw_state hw_state;
168 if (drm_WARN(&dev_priv->drm, !pll,
169 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
172 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
173 I915_STATE_WARN(dev_priv, cur_state != state,
174 "%s assertion failure (expected %s, current %s)\n",
175 pll->info->name, str_on_off(state),
176 str_on_off(cur_state));
179 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
181 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
184 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
186 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
190 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
191 struct intel_shared_dpll *pll)
194 return DG1_DPLL_ENABLE(pll->info->id);
195 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
196 return MG_PLL_ENABLE(0);
198 return ICL_DPLL_ENABLE(pll->info->id);
202 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
203 struct intel_shared_dpll *pll)
205 const enum intel_dpll_id id = pll->info->id;
206 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
208 if (IS_ALDERLAKE_P(i915))
209 return ADLP_PORTTC_PLL_ENABLE(tc_port);
211 return MG_PLL_ENABLE(tc_port);
215 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
216 * @crtc_state: CRTC, and its state, which has a shared DPLL
218 * Enable the shared DPLL used by @crtc.
220 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
222 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
224 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
225 unsigned int pipe_mask = BIT(crtc->pipe);
226 unsigned int old_mask;
228 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
231 mutex_lock(&dev_priv->display.dpll.lock);
232 old_mask = pll->active_mask;
234 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
235 drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
238 pll->active_mask |= pipe_mask;
240 drm_dbg_kms(&dev_priv->drm,
241 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
242 pll->info->name, pll->active_mask, pll->on,
243 crtc->base.base.id, crtc->base.name);
246 drm_WARN_ON(&dev_priv->drm, !pll->on);
247 assert_shared_dpll_enabled(dev_priv, pll);
250 drm_WARN_ON(&dev_priv->drm, pll->on);
252 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
253 pll->info->funcs->enable(dev_priv, pll);
257 mutex_unlock(&dev_priv->display.dpll.lock);
261 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
262 * @crtc_state: CRTC, and its state, which has a shared DPLL
264 * Disable the shared DPLL used by @crtc.
266 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
268 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
269 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
270 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
271 unsigned int pipe_mask = BIT(crtc->pipe);
273 /* PCH only available on ILK+ */
274 if (DISPLAY_VER(dev_priv) < 5)
280 mutex_lock(&dev_priv->display.dpll.lock);
281 if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
282 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
283 crtc->base.base.id, crtc->base.name))
286 drm_dbg_kms(&dev_priv->drm,
287 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
288 pll->info->name, pll->active_mask, pll->on,
289 crtc->base.base.id, crtc->base.name);
291 assert_shared_dpll_enabled(dev_priv, pll);
292 drm_WARN_ON(&dev_priv->drm, !pll->on);
294 pll->active_mask &= ~pipe_mask;
295 if (pll->active_mask)
298 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
299 pll->info->funcs->disable(dev_priv, pll);
303 mutex_unlock(&dev_priv->display.dpll.lock);
306 static struct intel_shared_dpll *
307 intel_find_shared_dpll(struct intel_atomic_state *state,
308 const struct intel_crtc *crtc,
309 const struct intel_dpll_hw_state *pll_state,
310 unsigned long dpll_mask)
312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
313 struct intel_shared_dpll *pll, *unused_pll = NULL;
314 struct intel_shared_dpll_state *shared_dpll;
315 enum intel_dpll_id i;
317 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
319 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
321 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
322 pll = &dev_priv->display.dpll.shared_dplls[i];
324 /* Only want to check enabled timings first */
325 if (shared_dpll[i].pipe_mask == 0) {
331 if (memcmp(pll_state,
332 &shared_dpll[i].hw_state,
333 sizeof(*pll_state)) == 0) {
334 drm_dbg_kms(&dev_priv->drm,
335 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
336 crtc->base.base.id, crtc->base.name,
338 shared_dpll[i].pipe_mask,
344 /* Ok no matching timings, maybe there's a free one? */
346 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
347 crtc->base.base.id, crtc->base.name,
348 unused_pll->info->name);
356 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
357 * @crtc: CRTC on which behalf the reference is taken
358 * @pll: DPLL for which the reference is taken
359 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
361 * Take a reference for @pll tracking the use of it by @crtc.
364 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
365 const struct intel_shared_dpll *pll,
366 struct intel_shared_dpll_state *shared_dpll_state)
368 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
370 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
372 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
374 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
375 crtc->base.base.id, crtc->base.name, pll->info->name);
379 intel_reference_shared_dpll(struct intel_atomic_state *state,
380 const struct intel_crtc *crtc,
381 const struct intel_shared_dpll *pll,
382 const struct intel_dpll_hw_state *pll_state)
384 struct intel_shared_dpll_state *shared_dpll;
385 const enum intel_dpll_id id = pll->info->id;
387 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
389 if (shared_dpll[id].pipe_mask == 0)
390 shared_dpll[id].hw_state = *pll_state;
392 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
396 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
397 * @crtc: CRTC on which behalf the reference is dropped
398 * @pll: DPLL for which the reference is dropped
399 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
401 * Drop a reference for @pll tracking the end of use of it by @crtc.
404 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
405 const struct intel_shared_dpll *pll,
406 struct intel_shared_dpll_state *shared_dpll_state)
408 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
410 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
412 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
414 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
415 crtc->base.base.id, crtc->base.name, pll->info->name);
418 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
419 const struct intel_crtc *crtc,
420 const struct intel_shared_dpll *pll)
422 struct intel_shared_dpll_state *shared_dpll;
423 const enum intel_dpll_id id = pll->info->id;
425 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
427 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
430 static void intel_put_dpll(struct intel_atomic_state *state,
431 struct intel_crtc *crtc)
433 const struct intel_crtc_state *old_crtc_state =
434 intel_atomic_get_old_crtc_state(state, crtc);
435 struct intel_crtc_state *new_crtc_state =
436 intel_atomic_get_new_crtc_state(state, crtc);
438 new_crtc_state->shared_dpll = NULL;
440 if (!old_crtc_state->shared_dpll)
443 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
447 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
448 * @state: atomic state
450 * This is the dpll version of drm_atomic_helper_swap_state() since the
451 * helper does not handle driver-specific global state.
453 * For consistency with atomic helpers this function does a complete swap,
454 * i.e. it also puts the current state into @state, even though there is no
455 * need for that at this moment.
457 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
459 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
460 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
461 enum intel_dpll_id i;
463 if (!state->dpll_set)
466 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
467 struct intel_shared_dpll *pll =
468 &dev_priv->display.dpll.shared_dplls[i];
470 swap(pll->state, shared_dpll[i]);
474 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
475 struct intel_shared_dpll *pll,
476 struct intel_dpll_hw_state *hw_state)
478 const enum intel_dpll_id id = pll->info->id;
479 intel_wakeref_t wakeref;
482 wakeref = intel_display_power_get_if_enabled(dev_priv,
483 POWER_DOMAIN_DISPLAY_CORE);
487 val = intel_de_read(dev_priv, PCH_DPLL(id));
488 hw_state->dpll = val;
489 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
490 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
492 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
494 return val & DPLL_VCO_ENABLE;
497 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
502 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
503 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
504 DREF_SUPERSPREAD_SOURCE_MASK));
505 I915_STATE_WARN(dev_priv, !enabled,
506 "PCH refclk assertion failure, should be active but is disabled\n");
509 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
510 struct intel_shared_dpll *pll)
512 const enum intel_dpll_id id = pll->info->id;
514 /* PCH refclock must be enabled first */
515 ibx_assert_pch_refclk_enabled(dev_priv);
517 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
518 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
520 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
522 /* Wait for the clocks to stabilize. */
523 intel_de_posting_read(dev_priv, PCH_DPLL(id));
526 /* The pixel multiplier can only be updated once the
527 * DPLL is enabled and the clocks are stable.
531 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
532 intel_de_posting_read(dev_priv, PCH_DPLL(id));
536 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
537 struct intel_shared_dpll *pll)
539 const enum intel_dpll_id id = pll->info->id;
541 intel_de_write(dev_priv, PCH_DPLL(id), 0);
542 intel_de_posting_read(dev_priv, PCH_DPLL(id));
546 static int ibx_compute_dpll(struct intel_atomic_state *state,
547 struct intel_crtc *crtc,
548 struct intel_encoder *encoder)
553 static int ibx_get_dpll(struct intel_atomic_state *state,
554 struct intel_crtc *crtc,
555 struct intel_encoder *encoder)
557 struct intel_crtc_state *crtc_state =
558 intel_atomic_get_new_crtc_state(state, crtc);
559 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
560 struct intel_shared_dpll *pll;
561 enum intel_dpll_id i;
563 if (HAS_PCH_IBX(dev_priv)) {
564 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
565 i = (enum intel_dpll_id) crtc->pipe;
566 pll = &dev_priv->display.dpll.shared_dplls[i];
568 drm_dbg_kms(&dev_priv->drm,
569 "[CRTC:%d:%s] using pre-allocated %s\n",
570 crtc->base.base.id, crtc->base.name,
573 pll = intel_find_shared_dpll(state, crtc,
574 &crtc_state->dpll_hw_state,
575 BIT(DPLL_ID_PCH_PLL_B) |
576 BIT(DPLL_ID_PCH_PLL_A));
582 /* reference the pll */
583 intel_reference_shared_dpll(state, crtc,
584 pll, &crtc_state->dpll_hw_state);
586 crtc_state->shared_dpll = pll;
591 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
592 const struct intel_dpll_hw_state *hw_state)
594 drm_dbg_kms(&dev_priv->drm,
595 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
596 "fp0: 0x%x, fp1: 0x%x\n",
603 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
604 .enable = ibx_pch_dpll_enable,
605 .disable = ibx_pch_dpll_disable,
606 .get_hw_state = ibx_pch_dpll_get_hw_state,
609 static const struct dpll_info pch_plls[] = {
610 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
611 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
615 static const struct intel_dpll_mgr pch_pll_mgr = {
616 .dpll_info = pch_plls,
617 .compute_dplls = ibx_compute_dpll,
618 .get_dplls = ibx_get_dpll,
619 .put_dplls = intel_put_dpll,
620 .dump_hw_state = ibx_dump_hw_state,
623 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
624 struct intel_shared_dpll *pll)
626 const enum intel_dpll_id id = pll->info->id;
628 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
629 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
633 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
634 struct intel_shared_dpll *pll)
636 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
637 intel_de_posting_read(dev_priv, SPLL_CTL);
641 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
642 struct intel_shared_dpll *pll)
644 const enum intel_dpll_id id = pll->info->id;
646 intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
647 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
650 * Try to set up the PCH reference clock once all DPLLs
651 * that depend on it have been shut down.
653 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
654 intel_init_pch_refclk(dev_priv);
657 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
658 struct intel_shared_dpll *pll)
660 enum intel_dpll_id id = pll->info->id;
662 intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
663 intel_de_posting_read(dev_priv, SPLL_CTL);
666 * Try to set up the PCH reference clock once all DPLLs
667 * that depend on it have been shut down.
669 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
670 intel_init_pch_refclk(dev_priv);
673 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
674 struct intel_shared_dpll *pll,
675 struct intel_dpll_hw_state *hw_state)
677 const enum intel_dpll_id id = pll->info->id;
678 intel_wakeref_t wakeref;
681 wakeref = intel_display_power_get_if_enabled(dev_priv,
682 POWER_DOMAIN_DISPLAY_CORE);
686 val = intel_de_read(dev_priv, WRPLL_CTL(id));
687 hw_state->wrpll = val;
689 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
691 return val & WRPLL_PLL_ENABLE;
694 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
695 struct intel_shared_dpll *pll,
696 struct intel_dpll_hw_state *hw_state)
698 intel_wakeref_t wakeref;
701 wakeref = intel_display_power_get_if_enabled(dev_priv,
702 POWER_DOMAIN_DISPLAY_CORE);
706 val = intel_de_read(dev_priv, SPLL_CTL);
707 hw_state->spll = val;
709 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
711 return val & SPLL_PLL_ENABLE;
715 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
721 /* Constraints for PLL good behavior */
727 struct hsw_wrpll_rnp {
731 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
795 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
796 unsigned int r2, unsigned int n2,
798 struct hsw_wrpll_rnp *best)
800 u64 a, b, c, d, diff, diff_best;
802 /* No best (r,n,p) yet */
811 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
815 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
818 * and we would like delta <= budget.
820 * If the discrepancy is above the PPM-based budget, always prefer to
821 * improve upon the previous solution. However, if you're within the
822 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
824 a = freq2k * budget * p * r2;
825 b = freq2k * budget * best->p * best->r2;
826 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
827 diff_best = abs_diff(freq2k * best->p * best->r2,
828 LC_FREQ_2K * best->n2);
830 d = 1000000 * diff_best;
832 if (a < c && b < d) {
833 /* If both are above the budget, pick the closer */
834 if (best->p * best->r2 * diff < p * r2 * diff_best) {
839 } else if (a >= c && b < d) {
840 /* If A is below the threshold but B is above it? Update. */
844 } else if (a >= c && b >= d) {
845 /* Both are below the limit, so pick the higher n2/(r2*r2) */
846 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
852 /* Otherwise a < c && b >= d, do nothing */
856 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
857 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
861 struct hsw_wrpll_rnp best = {};
864 freq2k = clock / 100;
866 budget = hsw_wrpll_get_budget_for_freq(clock);
868 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
869 * and directly pass the LC PLL to it. */
870 if (freq2k == 5400000) {
878 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
881 * We want R so that REF_MIN <= Ref <= REF_MAX.
882 * Injecting R2 = 2 * R gives:
883 * REF_MAX * r2 > LC_FREQ * 2 and
884 * REF_MIN * r2 < LC_FREQ * 2
886 * Which means the desired boundaries for r2 are:
887 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
890 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
891 r2 <= LC_FREQ * 2 / REF_MIN;
895 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
897 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
898 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
899 * VCO_MAX * r2 > n2 * LC_FREQ and
900 * VCO_MIN * r2 < n2 * LC_FREQ)
902 * Which means the desired boundaries for n2 are:
903 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
905 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
906 n2 <= VCO_MAX * r2 / LC_FREQ;
909 for (p = P_MIN; p <= P_MAX; p += P_INC)
910 hsw_wrpll_update_rnp(freq2k, budget,
920 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 const struct intel_shared_dpll *pll,
922 const struct intel_dpll_hw_state *pll_state)
926 u32 wrpll = pll_state->wrpll;
928 switch (wrpll & WRPLL_REF_MASK) {
929 case WRPLL_REF_SPECIAL_HSW:
930 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
932 refclk = dev_priv->display.dpll.ref_clks.nssc;
936 case WRPLL_REF_PCH_SSC:
938 * We could calculate spread here, but our checking
939 * code only cares about 5% accuracy, and spread is a max of
942 refclk = dev_priv->display.dpll.ref_clks.ssc;
944 case WRPLL_REF_LCPLL:
952 r = wrpll & WRPLL_DIVIDER_REF_MASK;
953 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
954 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
956 /* Convert to KHz, p & r have a fixed point portion */
957 return (refclk * n / 10) / (p * r) * 2;
961 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
962 struct intel_crtc *crtc)
964 struct drm_i915_private *i915 = to_i915(state->base.dev);
965 struct intel_crtc_state *crtc_state =
966 intel_atomic_get_new_crtc_state(state, crtc);
967 unsigned int p, n2, r2;
969 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
971 crtc_state->dpll_hw_state.wrpll =
972 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
973 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
974 WRPLL_DIVIDER_POST(p);
976 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
977 &crtc_state->dpll_hw_state);
982 static struct intel_shared_dpll *
983 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
984 struct intel_crtc *crtc)
986 struct intel_crtc_state *crtc_state =
987 intel_atomic_get_new_crtc_state(state, crtc);
989 return intel_find_shared_dpll(state, crtc,
990 &crtc_state->dpll_hw_state,
991 BIT(DPLL_ID_WRPLL2) |
992 BIT(DPLL_ID_WRPLL1));
996 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
998 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
999 int clock = crtc_state->port_clock;
1001 switch (clock / 2) {
1007 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
1013 static struct intel_shared_dpll *
1014 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1016 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1017 struct intel_shared_dpll *pll;
1018 enum intel_dpll_id pll_id;
1019 int clock = crtc_state->port_clock;
1021 switch (clock / 2) {
1023 pll_id = DPLL_ID_LCPLL_810;
1026 pll_id = DPLL_ID_LCPLL_1350;
1029 pll_id = DPLL_ID_LCPLL_2700;
1032 MISSING_CASE(clock / 2);
1036 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1044 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1045 const struct intel_shared_dpll *pll,
1046 const struct intel_dpll_hw_state *pll_state)
1050 switch (pll->info->id) {
1051 case DPLL_ID_LCPLL_810:
1054 case DPLL_ID_LCPLL_1350:
1055 link_clock = 135000;
1057 case DPLL_ID_LCPLL_2700:
1058 link_clock = 270000;
1061 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1065 return link_clock * 2;
1069 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1070 struct intel_crtc *crtc)
1072 struct intel_crtc_state *crtc_state =
1073 intel_atomic_get_new_crtc_state(state, crtc);
1075 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1078 crtc_state->dpll_hw_state.spll =
1079 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1084 static struct intel_shared_dpll *
1085 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1086 struct intel_crtc *crtc)
1088 struct intel_crtc_state *crtc_state =
1089 intel_atomic_get_new_crtc_state(state, crtc);
1091 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1095 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1096 const struct intel_shared_dpll *pll,
1097 const struct intel_dpll_hw_state *pll_state)
1101 switch (pll_state->spll & SPLL_FREQ_MASK) {
1102 case SPLL_FREQ_810MHz:
1105 case SPLL_FREQ_1350MHz:
1106 link_clock = 135000;
1108 case SPLL_FREQ_2700MHz:
1109 link_clock = 270000;
1112 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1116 return link_clock * 2;
1119 static int hsw_compute_dpll(struct intel_atomic_state *state,
1120 struct intel_crtc *crtc,
1121 struct intel_encoder *encoder)
1123 struct intel_crtc_state *crtc_state =
1124 intel_atomic_get_new_crtc_state(state, crtc);
1126 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1127 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1128 else if (intel_crtc_has_dp_encoder(crtc_state))
1129 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1130 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1131 return hsw_ddi_spll_compute_dpll(state, crtc);
1136 static int hsw_get_dpll(struct intel_atomic_state *state,
1137 struct intel_crtc *crtc,
1138 struct intel_encoder *encoder)
1140 struct intel_crtc_state *crtc_state =
1141 intel_atomic_get_new_crtc_state(state, crtc);
1142 struct intel_shared_dpll *pll = NULL;
1144 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1145 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1146 else if (intel_crtc_has_dp_encoder(crtc_state))
1147 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1148 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1149 pll = hsw_ddi_spll_get_dpll(state, crtc);
1154 intel_reference_shared_dpll(state, crtc,
1155 pll, &crtc_state->dpll_hw_state);
1157 crtc_state->shared_dpll = pll;
1162 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1164 i915->display.dpll.ref_clks.ssc = 135000;
1165 /* Non-SSC is only used on non-ULT HSW. */
1166 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1167 i915->display.dpll.ref_clks.nssc = 24000;
1169 i915->display.dpll.ref_clks.nssc = 135000;
1172 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1173 const struct intel_dpll_hw_state *hw_state)
1175 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1176 hw_state->wrpll, hw_state->spll);
1179 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1180 .enable = hsw_ddi_wrpll_enable,
1181 .disable = hsw_ddi_wrpll_disable,
1182 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1183 .get_freq = hsw_ddi_wrpll_get_freq,
1186 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1187 .enable = hsw_ddi_spll_enable,
1188 .disable = hsw_ddi_spll_disable,
1189 .get_hw_state = hsw_ddi_spll_get_hw_state,
1190 .get_freq = hsw_ddi_spll_get_freq,
1193 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1194 struct intel_shared_dpll *pll)
1198 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1199 struct intel_shared_dpll *pll)
1203 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1204 struct intel_shared_dpll *pll,
1205 struct intel_dpll_hw_state *hw_state)
1210 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1211 .enable = hsw_ddi_lcpll_enable,
1212 .disable = hsw_ddi_lcpll_disable,
1213 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1214 .get_freq = hsw_ddi_lcpll_get_freq,
1217 static const struct dpll_info hsw_plls[] = {
1218 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1219 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1220 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1221 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1222 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1223 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1227 static const struct intel_dpll_mgr hsw_pll_mgr = {
1228 .dpll_info = hsw_plls,
1229 .compute_dplls = hsw_compute_dpll,
1230 .get_dplls = hsw_get_dpll,
1231 .put_dplls = intel_put_dpll,
1232 .update_ref_clks = hsw_update_dpll_ref_clks,
1233 .dump_hw_state = hsw_dump_hw_state,
1236 struct skl_dpll_regs {
1237 i915_reg_t ctl, cfgcr1, cfgcr2;
1240 /* this array is indexed by the *shared* pll id */
1241 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1245 /* DPLL 0 doesn't support HDMI mode */
1250 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1251 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1255 .ctl = WRPLL_CTL(0),
1256 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1257 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1261 .ctl = WRPLL_CTL(1),
1262 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1263 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1267 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1268 struct intel_shared_dpll *pll)
1270 const enum intel_dpll_id id = pll->info->id;
1272 intel_de_rmw(dev_priv, DPLL_CTRL1,
1273 DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1274 pll->state.hw_state.ctrl1 << (id * 6));
1275 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1278 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1279 struct intel_shared_dpll *pll)
1281 const struct skl_dpll_regs *regs = skl_dpll_regs;
1282 const enum intel_dpll_id id = pll->info->id;
1284 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1286 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1287 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1288 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1289 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1291 /* the enable bit is always bit 31 */
1292 intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1294 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1295 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1298 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1299 struct intel_shared_dpll *pll)
1301 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1304 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1305 struct intel_shared_dpll *pll)
1307 const struct skl_dpll_regs *regs = skl_dpll_regs;
1308 const enum intel_dpll_id id = pll->info->id;
1310 /* the enable bit is always bit 31 */
1311 intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1312 intel_de_posting_read(dev_priv, regs[id].ctl);
1315 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1316 struct intel_shared_dpll *pll)
1320 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1321 struct intel_shared_dpll *pll,
1322 struct intel_dpll_hw_state *hw_state)
1325 const struct skl_dpll_regs *regs = skl_dpll_regs;
1326 const enum intel_dpll_id id = pll->info->id;
1327 intel_wakeref_t wakeref;
1330 wakeref = intel_display_power_get_if_enabled(dev_priv,
1331 POWER_DOMAIN_DISPLAY_CORE);
1337 val = intel_de_read(dev_priv, regs[id].ctl);
1338 if (!(val & LCPLL_PLL_ENABLE))
1341 val = intel_de_read(dev_priv, DPLL_CTRL1);
1342 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1344 /* avoid reading back stale values if HDMI mode is not enabled */
1345 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1346 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1347 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1352 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1357 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1358 struct intel_shared_dpll *pll,
1359 struct intel_dpll_hw_state *hw_state)
1361 const struct skl_dpll_regs *regs = skl_dpll_regs;
1362 const enum intel_dpll_id id = pll->info->id;
1363 intel_wakeref_t wakeref;
1367 wakeref = intel_display_power_get_if_enabled(dev_priv,
1368 POWER_DOMAIN_DISPLAY_CORE);
1374 /* DPLL0 is always enabled since it drives CDCLK */
1375 val = intel_de_read(dev_priv, regs[id].ctl);
1376 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1379 val = intel_de_read(dev_priv, DPLL_CTRL1);
1380 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1385 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1390 struct skl_wrpll_context {
1391 u64 min_deviation; /* current minimal deviation */
1392 u64 central_freq; /* chosen central freq */
1393 u64 dco_freq; /* chosen dco freq */
1394 unsigned int p; /* chosen divider */
1397 /* DCO freq must be within +1%/-6% of the DCO central freq */
1398 #define SKL_DCO_MAX_PDEVIATION 100
1399 #define SKL_DCO_MAX_NDEVIATION 600
1401 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1404 unsigned int divider)
1408 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1411 /* positive deviation */
1412 if (dco_freq >= central_freq) {
1413 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1414 deviation < ctx->min_deviation) {
1415 ctx->min_deviation = deviation;
1416 ctx->central_freq = central_freq;
1417 ctx->dco_freq = dco_freq;
1420 /* negative deviation */
1421 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1422 deviation < ctx->min_deviation) {
1423 ctx->min_deviation = deviation;
1424 ctx->central_freq = central_freq;
1425 ctx->dco_freq = dco_freq;
1430 static void skl_wrpll_get_multipliers(unsigned int p,
1431 unsigned int *p0 /* out */,
1432 unsigned int *p1 /* out */,
1433 unsigned int *p2 /* out */)
1437 unsigned int half = p / 2;
1439 if (half == 1 || half == 2 || half == 3 || half == 5) {
1443 } else if (half % 2 == 0) {
1447 } else if (half % 3 == 0) {
1451 } else if (half % 7 == 0) {
1456 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1460 } else if (p == 5 || p == 7) {
1464 } else if (p == 15) {
1468 } else if (p == 21) {
1472 } else if (p == 35) {
1479 struct skl_wrpll_params {
1489 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1493 u32 p0, u32 p1, u32 p2)
1497 switch (central_freq) {
1499 params->central_freq = 0;
1502 params->central_freq = 1;
1505 params->central_freq = 3;
1522 WARN(1, "Incorrect PDiv\n");
1539 WARN(1, "Incorrect KDiv\n");
1542 params->qdiv_ratio = p1;
1543 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1545 dco_freq = p0 * p1 * p2 * afe_clock;
1548 * Intermediate values are in Hz.
1549 * Divide by MHz to match bsepc
1551 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1552 params->dco_fraction =
1553 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1554 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1558 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1560 struct skl_wrpll_params *wrpll_params)
1562 static const u64 dco_central_freq[3] = { 8400000000ULL,
1565 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1566 24, 28, 30, 32, 36, 40, 42, 44,
1567 48, 52, 54, 56, 60, 64, 66, 68,
1568 70, 72, 76, 78, 80, 84, 88, 90,
1570 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1571 static const struct {
1575 { even_dividers, ARRAY_SIZE(even_dividers) },
1576 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1578 struct skl_wrpll_context ctx = {
1579 .min_deviation = U64_MAX,
1581 unsigned int dco, d, i;
1582 unsigned int p0, p1, p2;
1583 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1585 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1586 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1587 for (i = 0; i < dividers[d].n_dividers; i++) {
1588 unsigned int p = dividers[d].list[i];
1589 u64 dco_freq = p * afe_clock;
1591 skl_wrpll_try_divider(&ctx,
1592 dco_central_freq[dco],
1596 * Skip the remaining dividers if we're sure to
1597 * have found the definitive divider, we can't
1598 * improve a 0 deviation.
1600 if (ctx.min_deviation == 0)
1601 goto skip_remaining_dividers;
1605 skip_remaining_dividers:
1607 * If a solution is found with an even divider, prefer
1610 if (d == 0 && ctx.p)
1618 * gcc incorrectly analyses that these can be used without being
1619 * initialized. To be fair, it's hard to guess.
1622 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1623 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1624 ctx.central_freq, p0, p1, p2);
1629 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1630 const struct intel_shared_dpll *pll,
1631 const struct intel_dpll_hw_state *pll_state)
1633 int ref_clock = i915->display.dpll.ref_clks.nssc;
1634 u32 p0, p1, p2, dco_freq;
1636 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1637 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1639 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1640 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1646 case DPLL_CFGCR2_PDIV_1:
1649 case DPLL_CFGCR2_PDIV_2:
1652 case DPLL_CFGCR2_PDIV_3:
1655 case DPLL_CFGCR2_PDIV_7_INVALID:
1657 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1658 * handling it the same way as PDIV_7.
1660 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1662 case DPLL_CFGCR2_PDIV_7:
1671 case DPLL_CFGCR2_KDIV_5:
1674 case DPLL_CFGCR2_KDIV_2:
1677 case DPLL_CFGCR2_KDIV_3:
1680 case DPLL_CFGCR2_KDIV_1:
1688 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1691 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1694 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1697 return dco_freq / (p0 * p1 * p2 * 5);
1700 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1702 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1703 struct skl_wrpll_params wrpll_params = {};
1704 u32 ctrl1, cfgcr1, cfgcr2;
1708 * See comment in intel_dpll_hw_state to understand why we always use 0
1709 * as the DPLL id in this function.
1711 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1713 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1715 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1716 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1720 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1721 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1722 wrpll_params.dco_integer;
1724 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1725 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1726 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1727 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1728 wrpll_params.central_freq;
1730 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1731 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1732 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1734 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1735 &crtc_state->dpll_hw_state);
1741 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1746 * See comment in intel_dpll_hw_state to understand why we always use 0
1747 * as the DPLL id in this function.
1749 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1750 switch (crtc_state->port_clock / 2) {
1752 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1755 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1758 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1762 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1765 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1768 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1772 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1777 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1778 const struct intel_shared_dpll *pll,
1779 const struct intel_dpll_hw_state *pll_state)
1783 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1784 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1785 case DPLL_CTRL1_LINK_RATE_810:
1788 case DPLL_CTRL1_LINK_RATE_1080:
1789 link_clock = 108000;
1791 case DPLL_CTRL1_LINK_RATE_1350:
1792 link_clock = 135000;
1794 case DPLL_CTRL1_LINK_RATE_1620:
1795 link_clock = 162000;
1797 case DPLL_CTRL1_LINK_RATE_2160:
1798 link_clock = 216000;
1800 case DPLL_CTRL1_LINK_RATE_2700:
1801 link_clock = 270000;
1804 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1808 return link_clock * 2;
1811 static int skl_compute_dpll(struct intel_atomic_state *state,
1812 struct intel_crtc *crtc,
1813 struct intel_encoder *encoder)
1815 struct intel_crtc_state *crtc_state =
1816 intel_atomic_get_new_crtc_state(state, crtc);
1818 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1819 return skl_ddi_hdmi_pll_dividers(crtc_state);
1820 else if (intel_crtc_has_dp_encoder(crtc_state))
1821 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1826 static int skl_get_dpll(struct intel_atomic_state *state,
1827 struct intel_crtc *crtc,
1828 struct intel_encoder *encoder)
1830 struct intel_crtc_state *crtc_state =
1831 intel_atomic_get_new_crtc_state(state, crtc);
1832 struct intel_shared_dpll *pll;
1834 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1835 pll = intel_find_shared_dpll(state, crtc,
1836 &crtc_state->dpll_hw_state,
1837 BIT(DPLL_ID_SKL_DPLL0));
1839 pll = intel_find_shared_dpll(state, crtc,
1840 &crtc_state->dpll_hw_state,
1841 BIT(DPLL_ID_SKL_DPLL3) |
1842 BIT(DPLL_ID_SKL_DPLL2) |
1843 BIT(DPLL_ID_SKL_DPLL1));
1847 intel_reference_shared_dpll(state, crtc,
1848 pll, &crtc_state->dpll_hw_state);
1850 crtc_state->shared_dpll = pll;
1855 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1856 const struct intel_shared_dpll *pll,
1857 const struct intel_dpll_hw_state *pll_state)
1860 * ctrl1 register is already shifted for each pll, just use 0 to get
1861 * the internal shift for each field
1863 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1864 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1866 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1869 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1872 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1875 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1876 const struct intel_dpll_hw_state *hw_state)
1878 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1879 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1885 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1886 .enable = skl_ddi_pll_enable,
1887 .disable = skl_ddi_pll_disable,
1888 .get_hw_state = skl_ddi_pll_get_hw_state,
1889 .get_freq = skl_ddi_pll_get_freq,
1892 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1893 .enable = skl_ddi_dpll0_enable,
1894 .disable = skl_ddi_dpll0_disable,
1895 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1896 .get_freq = skl_ddi_pll_get_freq,
1899 static const struct dpll_info skl_plls[] = {
1900 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1901 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1902 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1903 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1907 static const struct intel_dpll_mgr skl_pll_mgr = {
1908 .dpll_info = skl_plls,
1909 .compute_dplls = skl_compute_dpll,
1910 .get_dplls = skl_get_dpll,
1911 .put_dplls = intel_put_dpll,
1912 .update_ref_clks = skl_update_dpll_ref_clks,
1913 .dump_hw_state = skl_dump_hw_state,
1916 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1917 struct intel_shared_dpll *pll)
1920 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1922 enum dpio_channel ch;
1924 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1926 /* Non-SSC reference */
1927 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1929 if (IS_GEMINILAKE(dev_priv)) {
1930 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1931 0, PORT_PLL_POWER_ENABLE);
1933 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1934 PORT_PLL_POWER_STATE), 200))
1935 drm_err(&dev_priv->drm,
1936 "Power state not set for PLL:%d\n", port);
1939 /* Disable 10 bit clock */
1940 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1941 PORT_PLL_10BIT_CLK_ENABLE, 0);
1944 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1945 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1947 /* Write M2 integer */
1948 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1949 PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1952 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1953 PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1955 /* Write M2 fraction */
1956 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1957 PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1959 /* Write M2 fraction enable */
1960 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1961 PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1964 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1965 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1966 temp &= ~PORT_PLL_INT_COEFF_MASK;
1967 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1968 temp |= pll->state.hw_state.pll6;
1969 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1971 /* Write calibration val */
1972 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1973 PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1975 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1976 PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1978 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1979 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1980 temp &= ~PORT_PLL_DCO_AMP_MASK;
1981 temp |= pll->state.hw_state.pll10;
1982 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1984 /* Recalibrate with new settings */
1985 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1986 temp |= PORT_PLL_RECALIBRATE;
1987 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1988 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1989 temp |= pll->state.hw_state.ebb4;
1990 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1993 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1994 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1996 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1998 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
2000 if (IS_GEMINILAKE(dev_priv)) {
2001 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2002 temp |= DCC_DELAY_RANGE_2;
2003 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2007 * While we write to the group register to program all lanes at once we
2008 * can read only lane registers and we pick lanes 0/1 for that.
2010 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2011 temp &= ~LANE_STAGGER_MASK;
2012 temp &= ~LANESTAGGER_STRAP_OVRD;
2013 temp |= pll->state.hw_state.pcsdw12;
2014 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2017 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2018 struct intel_shared_dpll *pll)
2020 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2022 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2023 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2025 if (IS_GEMINILAKE(dev_priv)) {
2026 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
2027 PORT_PLL_POWER_ENABLE, 0);
2029 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2030 PORT_PLL_POWER_STATE), 200))
2031 drm_err(&dev_priv->drm,
2032 "Power state not reset for PLL:%d\n", port);
2036 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2037 struct intel_shared_dpll *pll,
2038 struct intel_dpll_hw_state *hw_state)
2040 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041 intel_wakeref_t wakeref;
2043 enum dpio_channel ch;
2047 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2049 wakeref = intel_display_power_get_if_enabled(dev_priv,
2050 POWER_DOMAIN_DISPLAY_CORE);
2056 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2057 if (!(val & PORT_PLL_ENABLE))
2060 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2061 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2063 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2064 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2066 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2067 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2069 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2070 hw_state->pll1 &= PORT_PLL_N_MASK;
2072 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2073 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2075 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2076 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2078 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2079 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2080 PORT_PLL_INT_COEFF_MASK |
2081 PORT_PLL_GAIN_CTL_MASK;
2083 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2084 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2086 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2087 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2089 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2090 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2091 PORT_PLL_DCO_AMP_MASK;
2094 * While we write to the group register to program all lanes at once we
2095 * can read only lane registers. We configure all lanes the same way, so
2096 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2098 hw_state->pcsdw12 = intel_de_read(dev_priv,
2099 BXT_PORT_PCS_DW12_LN01(phy, ch));
2100 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2101 drm_dbg(&dev_priv->drm,
2102 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2104 intel_de_read(dev_priv,
2105 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2106 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2111 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2116 /* pre-calculated values for DP linkrates */
2117 static const struct dpll bxt_dp_clk_val[] = {
2118 /* m2 is .22 binary fixed point */
2119 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2120 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2122 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2123 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2124 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2129 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2130 struct dpll *clk_div)
2132 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2134 /* Calculate HDMI div */
2136 * FIXME: tie the following calculation into
2137 * i9xx_crtc_compute_clock
2139 if (!bxt_find_best_dpll(crtc_state, clk_div))
2142 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 struct dpll *clk_div)
2150 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2153 *clk_div = bxt_dp_clk_val[0];
2154 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2156 *clk_div = bxt_dp_clk_val[i];
2161 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2163 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2164 clk_div->dot != crtc_state->port_clock);
2167 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2168 const struct dpll *clk_div)
2170 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2172 int clock = crtc_state->port_clock;
2173 int vco = clk_div->vco;
2174 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2177 if (vco >= 6200000 && vco <= 6700000) {
2182 } else if ((vco > 5400000 && vco < 6200000) ||
2183 (vco >= 4800000 && vco < 5400000)) {
2188 } else if (vco == 5400000) {
2194 drm_err(&i915->drm, "Invalid VCO\n");
2200 else if (clock > 135000)
2202 else if (clock > 67000)
2204 else if (clock > 33000)
2209 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2210 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2211 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2212 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2214 if (clk_div->m2 & 0x3fffff)
2215 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2217 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2218 PORT_PLL_INT_COEFF(int_coef) |
2219 PORT_PLL_GAIN_CTL(gain_ctl);
2221 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2223 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2225 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2226 PORT_PLL_DCO_AMP_OVR_EN_H;
2228 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2230 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2235 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2236 const struct intel_shared_dpll *pll,
2237 const struct intel_dpll_hw_state *pll_state)
2242 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2243 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2244 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2245 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2246 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2247 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2249 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2253 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2255 struct dpll clk_div = {};
2257 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2259 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2263 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2265 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2266 struct dpll clk_div = {};
2269 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2271 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2275 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2276 &crtc_state->dpll_hw_state);
2281 static int bxt_compute_dpll(struct intel_atomic_state *state,
2282 struct intel_crtc *crtc,
2283 struct intel_encoder *encoder)
2285 struct intel_crtc_state *crtc_state =
2286 intel_atomic_get_new_crtc_state(state, crtc);
2288 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2289 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2290 else if (intel_crtc_has_dp_encoder(crtc_state))
2291 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2296 static int bxt_get_dpll(struct intel_atomic_state *state,
2297 struct intel_crtc *crtc,
2298 struct intel_encoder *encoder)
2300 struct intel_crtc_state *crtc_state =
2301 intel_atomic_get_new_crtc_state(state, crtc);
2302 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303 struct intel_shared_dpll *pll;
2304 enum intel_dpll_id id;
2306 /* 1:1 mapping between ports and PLLs */
2307 id = (enum intel_dpll_id) encoder->port;
2308 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2310 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2311 crtc->base.base.id, crtc->base.name, pll->info->name);
2313 intel_reference_shared_dpll(state, crtc,
2314 pll, &crtc_state->dpll_hw_state);
2316 crtc_state->shared_dpll = pll;
2321 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2323 i915->display.dpll.ref_clks.ssc = 100000;
2324 i915->display.dpll.ref_clks.nssc = 100000;
2325 /* DSI non-SSC ref 19.2MHz */
2328 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2329 const struct intel_dpll_hw_state *hw_state)
2331 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2332 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2333 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2347 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2348 .enable = bxt_ddi_pll_enable,
2349 .disable = bxt_ddi_pll_disable,
2350 .get_hw_state = bxt_ddi_pll_get_hw_state,
2351 .get_freq = bxt_ddi_pll_get_freq,
2354 static const struct dpll_info bxt_plls[] = {
2355 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2356 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2357 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2361 static const struct intel_dpll_mgr bxt_pll_mgr = {
2362 .dpll_info = bxt_plls,
2363 .compute_dplls = bxt_compute_dpll,
2364 .get_dplls = bxt_get_dpll,
2365 .put_dplls = intel_put_dpll,
2366 .update_ref_clks = bxt_update_dpll_ref_clks,
2367 .dump_hw_state = bxt_dump_hw_state,
2370 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2371 int *qdiv, int *kdiv)
2374 if (bestdiv % 2 == 0) {
2379 } else if (bestdiv % 4 == 0) {
2381 *qdiv = bestdiv / 4;
2383 } else if (bestdiv % 6 == 0) {
2385 *qdiv = bestdiv / 6;
2387 } else if (bestdiv % 5 == 0) {
2389 *qdiv = bestdiv / 10;
2391 } else if (bestdiv % 14 == 0) {
2393 *qdiv = bestdiv / 14;
2397 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2401 } else { /* 9, 15, 21 */
2402 *pdiv = bestdiv / 3;
2409 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2410 u32 dco_freq, u32 ref_freq,
2411 int pdiv, int qdiv, int kdiv)
2426 WARN(1, "Incorrect KDiv\n");
2443 WARN(1, "Incorrect PDiv\n");
2446 WARN_ON(kdiv != 2 && qdiv != 1);
2448 params->qdiv_ratio = qdiv;
2449 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2451 dco = div_u64((u64)dco_freq << 15, ref_freq);
2453 params->dco_integer = dco >> 15;
2454 params->dco_fraction = dco & 0x7fff;
2458 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2459 * Program half of the nominal DCO divider fraction value.
2462 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2464 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2465 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2466 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2467 i915->display.dpll.ref_clks.nssc == 38400;
2470 struct icl_combo_pll_params {
2472 struct skl_wrpll_params wrpll;
2476 * These values alrea already adjusted: they're the bits we write to the
2477 * registers, not the logical values.
2479 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2481 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2482 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2484 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2485 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2487 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2488 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2490 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2491 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2494 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2496 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2497 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2499 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2500 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2502 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2503 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2507 /* Also used for 38.4 MHz values. */
2508 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2510 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2511 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2513 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2514 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2516 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2517 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2519 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2520 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2522 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2523 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2525 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2526 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2528 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2529 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2532 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2536 .dco_integer = 0x151, .dco_fraction = 0x4000,
2537 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2540 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2541 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2542 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2545 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2546 .dco_integer = 0x54, .dco_fraction = 0x3000,
2547 /* the following params are unused */
2548 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2551 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2552 .dco_integer = 0x43, .dco_fraction = 0x4000,
2553 /* the following params are unused */
2556 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2557 struct skl_wrpll_params *pll_params)
2559 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2560 const struct icl_combo_pll_params *params =
2561 dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2562 icl_dp_combo_pll_24MHz_values :
2563 icl_dp_combo_pll_19_2MHz_values;
2564 int clock = crtc_state->port_clock;
2567 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2568 if (clock == params[i].clock) {
2569 *pll_params = params[i].wrpll;
2574 MISSING_CASE(clock);
2578 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2579 struct skl_wrpll_params *pll_params)
2581 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2583 if (DISPLAY_VER(dev_priv) >= 12) {
2584 switch (dev_priv->display.dpll.ref_clks.nssc) {
2586 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2590 *pll_params = tgl_tbt_pll_19_2MHz_values;
2593 *pll_params = tgl_tbt_pll_24MHz_values;
2597 switch (dev_priv->display.dpll.ref_clks.nssc) {
2599 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2603 *pll_params = icl_tbt_pll_19_2MHz_values;
2606 *pll_params = icl_tbt_pll_24MHz_values;
2614 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2615 const struct intel_shared_dpll *pll,
2616 const struct intel_dpll_hw_state *pll_state)
2619 * The PLL outputs multiple frequencies at the same time, selection is
2620 * made at DDI clock mux level.
2622 drm_WARN_ON(&i915->drm, 1);
2627 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2629 int ref_clock = i915->display.dpll.ref_clks.nssc;
2632 * For ICL+, the spec states: if reference frequency is 38.4,
2633 * use 19.2 because the DPLL automatically divides that by 2.
2635 if (ref_clock == 38400)
2642 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2643 struct skl_wrpll_params *wrpll_params)
2645 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2646 int ref_clock = icl_wrpll_ref_clock(i915);
2647 u32 afe_clock = crtc_state->port_clock * 5;
2648 u32 dco_min = 7998000;
2649 u32 dco_max = 10000000;
2650 u32 dco_mid = (dco_min + dco_max) / 2;
2651 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2652 18, 20, 24, 28, 30, 32, 36, 40,
2653 42, 44, 48, 50, 52, 54, 56, 60,
2654 64, 66, 68, 70, 72, 76, 78, 80,
2655 84, 88, 90, 92, 96, 98, 100, 102,
2656 3, 5, 7, 9, 15, 21 };
2657 u32 dco, best_dco = 0, dco_centrality = 0;
2658 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2659 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2661 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2662 dco = afe_clock * dividers[d];
2664 if (dco <= dco_max && dco >= dco_min) {
2665 dco_centrality = abs(dco - dco_mid);
2667 if (dco_centrality < best_dco_centrality) {
2668 best_dco_centrality = dco_centrality;
2669 best_div = dividers[d];
2678 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2679 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2685 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2686 const struct intel_shared_dpll *pll,
2687 const struct intel_dpll_hw_state *pll_state)
2689 int ref_clock = icl_wrpll_ref_clock(i915);
2691 u32 p0, p1, p2, dco_freq;
2693 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2694 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2696 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2697 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2698 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2703 case DPLL_CFGCR1_PDIV_2:
2706 case DPLL_CFGCR1_PDIV_3:
2709 case DPLL_CFGCR1_PDIV_5:
2712 case DPLL_CFGCR1_PDIV_7:
2718 case DPLL_CFGCR1_KDIV_1:
2721 case DPLL_CFGCR1_KDIV_2:
2724 case DPLL_CFGCR1_KDIV_3:
2729 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2732 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2733 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2735 if (ehl_combo_pll_div_frac_wa_needed(i915))
2738 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2740 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2743 return dco_freq / (p0 * p1 * p2 * 5);
2746 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2747 const struct skl_wrpll_params *pll_params,
2748 struct intel_dpll_hw_state *pll_state)
2750 u32 dco_fraction = pll_params->dco_fraction;
2752 if (ehl_combo_pll_div_frac_wa_needed(i915))
2753 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2755 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2756 pll_params->dco_integer;
2758 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2759 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2760 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2761 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2763 if (DISPLAY_VER(i915) >= 12)
2764 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2766 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2768 if (i915->display.vbt.override_afc_startup)
2769 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2772 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2773 u32 *target_dco_khz,
2774 struct intel_dpll_hw_state *state,
2777 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2778 u32 dco_min_freq, dco_max_freq;
2782 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2783 dco_max_freq = is_dp ? 8100000 : 10000000;
2785 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2786 int div1 = div1_vals[i];
2788 for (div2 = 10; div2 > 0; div2--) {
2789 int dco = div1 * div2 * clock_khz * 5;
2790 int a_divratio, tlinedrv, inputsel;
2793 if (dco < dco_min_freq || dco > dco_max_freq)
2798 * Note: a_divratio not matching TGL BSpec
2799 * algorithm but matching hardcoded values and
2800 * working on HW for DP alt-mode at least
2802 a_divratio = is_dp ? 10 : 5;
2803 tlinedrv = is_dkl ? 1 : 2;
2808 inputsel = is_dp ? 0 : 1;
2815 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2818 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2821 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2824 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2828 *target_dco_khz = dco;
2830 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2832 state->mg_clktop2_coreclkctl1 =
2833 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2835 state->mg_clktop2_hsclkctl =
2836 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2837 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2839 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2849 * The specification for this function uses real numbers, so the math had to be
2850 * adapted to integer-only calculation, that's why it looks so different.
2852 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2853 struct intel_dpll_hw_state *pll_state)
2855 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2856 int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2857 int clock = crtc_state->port_clock;
2858 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2859 u32 iref_ndiv, iref_trim, iref_pulse_w;
2860 u32 prop_coeff, int_coeff;
2861 u32 tdc_targetcnt, feedfwgain;
2862 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2864 bool use_ssc = false;
2865 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2866 bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2869 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2875 m2div_int = dco_khz / (refclk_khz * m1div);
2876 if (m2div_int > 255) {
2879 m2div_int = dco_khz / (refclk_khz * m1div);
2882 if (m2div_int > 255)
2885 m2div_rem = dco_khz % (refclk_khz * m1div);
2887 tmp = (u64)m2div_rem * (1 << 22);
2888 do_div(tmp, refclk_khz * m1div);
2891 switch (refclk_khz) {
2908 MISSING_CASE(refclk_khz);
2913 * tdc_res = 0.000003
2914 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2916 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2917 * was supposed to be a division, but we rearranged the operations of
2918 * the formula to avoid early divisions so we don't multiply the
2921 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2922 * we also rearrange to work with integers.
2924 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2925 * last division by 10.
2927 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2930 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2931 * 32 bits. That's not a problem since we round the division down
2934 feedfwgain = (use_ssc || m2div_rem > 0) ?
2935 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2937 if (dco_khz >= 9000000) {
2946 tmp = mul_u32_u32(dco_khz, 47 * 32);
2947 do_div(tmp, refclk_khz * m1div * 10000);
2950 tmp = mul_u32_u32(dco_khz, 1000);
2951 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2958 /* write pll_state calculations */
2960 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2961 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2962 DKL_PLL_DIV0_FBPREDIV(m1div) |
2963 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2964 if (dev_priv->display.vbt.override_afc_startup) {
2965 u8 val = dev_priv->display.vbt.override_afc_startup_val;
2967 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2970 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2971 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2973 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2974 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2975 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2976 (use_ssc ? DKL_PLL_SSC_EN : 0);
2978 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2979 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2981 pll_state->mg_pll_tdc_coldst_bias =
2982 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2983 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2986 pll_state->mg_pll_div0 =
2987 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2988 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2989 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2991 pll_state->mg_pll_div1 =
2992 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2993 MG_PLL_DIV1_DITHER_DIV_2 |
2994 MG_PLL_DIV1_NDIVRATIO(1) |
2995 MG_PLL_DIV1_FBPREDIV(m1div);
2997 pll_state->mg_pll_lf =
2998 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2999 MG_PLL_LF_AFCCNTSEL_512 |
3000 MG_PLL_LF_GAINCTRL(1) |
3001 MG_PLL_LF_INT_COEFF(int_coeff) |
3002 MG_PLL_LF_PROP_COEFF(prop_coeff);
3004 pll_state->mg_pll_frac_lock =
3005 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3006 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3007 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3008 MG_PLL_FRAC_LOCK_DCODITHEREN |
3009 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3010 if (use_ssc || m2div_rem > 0)
3011 pll_state->mg_pll_frac_lock |=
3012 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3014 pll_state->mg_pll_ssc =
3015 (use_ssc ? MG_PLL_SSC_EN : 0) |
3016 MG_PLL_SSC_TYPE(2) |
3017 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3018 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3020 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3022 pll_state->mg_pll_tdc_coldst_bias =
3023 MG_PLL_TDC_COLDST_COLDSTART |
3024 MG_PLL_TDC_COLDST_IREFINT_EN |
3025 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3026 MG_PLL_TDC_TDCOVCCORR_EN |
3027 MG_PLL_TDC_TDCSEL(3);
3029 pll_state->mg_pll_bias =
3030 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3031 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3032 MG_PLL_BIAS_BIAS_BONUS(10) |
3033 MG_PLL_BIAS_BIASCAL_EN |
3034 MG_PLL_BIAS_CTRIM(12) |
3035 MG_PLL_BIAS_VREF_RDAC(4) |
3036 MG_PLL_BIAS_IREFTRIM(iref_trim);
3038 if (refclk_khz == 38400) {
3039 pll_state->mg_pll_tdc_coldst_bias_mask =
3040 MG_PLL_TDC_COLDST_COLDSTART;
3041 pll_state->mg_pll_bias_mask = 0;
3043 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3044 pll_state->mg_pll_bias_mask = -1U;
3047 pll_state->mg_pll_tdc_coldst_bias &=
3048 pll_state->mg_pll_tdc_coldst_bias_mask;
3049 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3055 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3056 const struct intel_shared_dpll *pll,
3057 const struct intel_dpll_hw_state *pll_state)
3059 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3062 ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3064 if (DISPLAY_VER(dev_priv) >= 12) {
3065 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3066 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3067 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3069 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3070 m2_frac = pll_state->mg_pll_bias &
3071 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3072 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3077 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3078 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3080 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3081 m2_frac = pll_state->mg_pll_div0 &
3082 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3083 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3089 switch (pll_state->mg_clktop2_hsclkctl &
3090 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3091 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3094 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3097 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3100 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3104 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3108 div2 = (pll_state->mg_clktop2_hsclkctl &
3109 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3110 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3112 /* div2 value of 0 is same as 1 means no div */
3117 * Adjust the original formula to delay the division by 2^22 in order to
3118 * minimize possible rounding errors.
3120 tmp = (u64)m1 * m2_int * ref_clock +
3121 (((u64)m1 * m2_frac * ref_clock) >> 22);
3122 tmp = div_u64(tmp, 5 * div1 * div2);
3128 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3129 * @crtc_state: state for the CRTC to select the DPLL for
3130 * @port_dpll_id: the active @port_dpll_id to select
3132 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3135 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3136 enum icl_port_dpll_id port_dpll_id)
3138 struct icl_port_dpll *port_dpll =
3139 &crtc_state->icl_port_dplls[port_dpll_id];
3141 crtc_state->shared_dpll = port_dpll->pll;
3142 crtc_state->dpll_hw_state = port_dpll->hw_state;
3145 static void icl_update_active_dpll(struct intel_atomic_state *state,
3146 struct intel_crtc *crtc,
3147 struct intel_encoder *encoder)
3149 struct intel_crtc_state *crtc_state =
3150 intel_atomic_get_new_crtc_state(state, crtc);
3151 struct intel_digital_port *primary_port;
3152 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3154 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3155 enc_to_mst(encoder)->primary :
3156 enc_to_dig_port(encoder);
3159 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3160 intel_tc_port_in_legacy_mode(primary_port)))
3161 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3163 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3166 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3167 struct intel_crtc *crtc)
3169 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3170 struct intel_crtc_state *crtc_state =
3171 intel_atomic_get_new_crtc_state(state, crtc);
3172 struct icl_port_dpll *port_dpll =
3173 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3174 struct skl_wrpll_params pll_params = {};
3177 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3178 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3179 ret = icl_calc_wrpll(crtc_state, &pll_params);
3181 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3186 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3188 /* this is mainly for the fastset check */
3189 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3191 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3192 &port_dpll->hw_state);
3197 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3198 struct intel_crtc *crtc,
3199 struct intel_encoder *encoder)
3201 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3202 struct intel_crtc_state *crtc_state =
3203 intel_atomic_get_new_crtc_state(state, crtc);
3204 struct icl_port_dpll *port_dpll =
3205 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3206 enum port port = encoder->port;
3207 unsigned long dpll_mask;
3209 if (IS_ALDERLAKE_S(dev_priv)) {
3211 BIT(DPLL_ID_DG1_DPLL3) |
3212 BIT(DPLL_ID_DG1_DPLL2) |
3213 BIT(DPLL_ID_ICL_DPLL1) |
3214 BIT(DPLL_ID_ICL_DPLL0);
3215 } else if (IS_DG1(dev_priv)) {
3216 if (port == PORT_D || port == PORT_E) {
3218 BIT(DPLL_ID_DG1_DPLL2) |
3219 BIT(DPLL_ID_DG1_DPLL3);
3222 BIT(DPLL_ID_DG1_DPLL0) |
3223 BIT(DPLL_ID_DG1_DPLL1);
3225 } else if (IS_ROCKETLAKE(dev_priv)) {
3227 BIT(DPLL_ID_EHL_DPLL4) |
3228 BIT(DPLL_ID_ICL_DPLL1) |
3229 BIT(DPLL_ID_ICL_DPLL0);
3230 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3232 BIT(DPLL_ID_EHL_DPLL4) |
3233 BIT(DPLL_ID_ICL_DPLL1) |
3234 BIT(DPLL_ID_ICL_DPLL0);
3236 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3239 /* Eliminate DPLLs from consideration if reserved by HTI */
3240 dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3242 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3243 &port_dpll->hw_state,
3245 if (!port_dpll->pll)
3248 intel_reference_shared_dpll(state, crtc,
3249 port_dpll->pll, &port_dpll->hw_state);
3251 icl_update_active_dpll(state, crtc, encoder);
3256 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3257 struct intel_crtc *crtc)
3259 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3260 struct intel_crtc_state *crtc_state =
3261 intel_atomic_get_new_crtc_state(state, crtc);
3262 struct icl_port_dpll *port_dpll =
3263 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3264 struct skl_wrpll_params pll_params = {};
3267 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3268 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3272 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3274 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3275 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3279 /* this is mainly for the fastset check */
3280 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3282 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3283 &port_dpll->hw_state);
3288 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3289 struct intel_crtc *crtc,
3290 struct intel_encoder *encoder)
3292 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3293 struct intel_crtc_state *crtc_state =
3294 intel_atomic_get_new_crtc_state(state, crtc);
3295 struct icl_port_dpll *port_dpll =
3296 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3297 enum intel_dpll_id dpll_id;
3300 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3301 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3302 &port_dpll->hw_state,
3303 BIT(DPLL_ID_ICL_TBTPLL));
3304 if (!port_dpll->pll)
3306 intel_reference_shared_dpll(state, crtc,
3307 port_dpll->pll, &port_dpll->hw_state);
3310 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3311 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3313 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3314 &port_dpll->hw_state,
3316 if (!port_dpll->pll) {
3318 goto err_unreference_tbt_pll;
3320 intel_reference_shared_dpll(state, crtc,
3321 port_dpll->pll, &port_dpll->hw_state);
3323 icl_update_active_dpll(state, crtc, encoder);
3327 err_unreference_tbt_pll:
3328 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3329 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3334 static int icl_compute_dplls(struct intel_atomic_state *state,
3335 struct intel_crtc *crtc,
3336 struct intel_encoder *encoder)
3338 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3339 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3341 if (intel_phy_is_combo(dev_priv, phy))
3342 return icl_compute_combo_phy_dpll(state, crtc);
3343 else if (intel_phy_is_tc(dev_priv, phy))
3344 return icl_compute_tc_phy_dplls(state, crtc);
3351 static int icl_get_dplls(struct intel_atomic_state *state,
3352 struct intel_crtc *crtc,
3353 struct intel_encoder *encoder)
3355 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3356 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3358 if (intel_phy_is_combo(dev_priv, phy))
3359 return icl_get_combo_phy_dpll(state, crtc, encoder);
3360 else if (intel_phy_is_tc(dev_priv, phy))
3361 return icl_get_tc_phy_dplls(state, crtc, encoder);
3368 static void icl_put_dplls(struct intel_atomic_state *state,
3369 struct intel_crtc *crtc)
3371 const struct intel_crtc_state *old_crtc_state =
3372 intel_atomic_get_old_crtc_state(state, crtc);
3373 struct intel_crtc_state *new_crtc_state =
3374 intel_atomic_get_new_crtc_state(state, crtc);
3375 enum icl_port_dpll_id id;
3377 new_crtc_state->shared_dpll = NULL;
3379 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3380 const struct icl_port_dpll *old_port_dpll =
3381 &old_crtc_state->icl_port_dplls[id];
3382 struct icl_port_dpll *new_port_dpll =
3383 &new_crtc_state->icl_port_dplls[id];
3385 new_port_dpll->pll = NULL;
3387 if (!old_port_dpll->pll)
3390 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3394 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3395 struct intel_shared_dpll *pll,
3396 struct intel_dpll_hw_state *hw_state)
3398 const enum intel_dpll_id id = pll->info->id;
3399 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3400 intel_wakeref_t wakeref;
3404 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3406 wakeref = intel_display_power_get_if_enabled(dev_priv,
3407 POWER_DOMAIN_DISPLAY_CORE);
3411 val = intel_de_read(dev_priv, enable_reg);
3412 if (!(val & PLL_ENABLE))
3415 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3416 MG_REFCLKIN_CTL(tc_port));
3417 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3419 hw_state->mg_clktop2_coreclkctl1 =
3420 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3421 hw_state->mg_clktop2_coreclkctl1 &=
3422 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3424 hw_state->mg_clktop2_hsclkctl =
3425 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3426 hw_state->mg_clktop2_hsclkctl &=
3427 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3428 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3429 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3430 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3432 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3433 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3434 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3435 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3436 MG_PLL_FRAC_LOCK(tc_port));
3437 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3439 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3440 hw_state->mg_pll_tdc_coldst_bias =
3441 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3443 if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3444 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3445 hw_state->mg_pll_bias_mask = 0;
3447 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3448 hw_state->mg_pll_bias_mask = -1U;
3451 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3452 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3456 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3460 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3461 struct intel_shared_dpll *pll,
3462 struct intel_dpll_hw_state *hw_state)
3464 const enum intel_dpll_id id = pll->info->id;
3465 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3466 intel_wakeref_t wakeref;
3470 wakeref = intel_display_power_get_if_enabled(dev_priv,
3471 POWER_DOMAIN_DISPLAY_CORE);
3475 val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3476 if (!(val & PLL_ENABLE))
3480 * All registers read here have the same HIP_INDEX_REG even though
3481 * they are on different building blocks
3483 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3484 DKL_REFCLKIN_CTL(tc_port));
3485 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3487 hw_state->mg_clktop2_hsclkctl =
3488 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3489 hw_state->mg_clktop2_hsclkctl &=
3490 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3491 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3492 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3493 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3495 hw_state->mg_clktop2_coreclkctl1 =
3496 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3497 hw_state->mg_clktop2_coreclkctl1 &=
3498 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3500 hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3501 val = DKL_PLL_DIV0_MASK;
3502 if (dev_priv->display.vbt.override_afc_startup)
3503 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3504 hw_state->mg_pll_div0 &= val;
3506 hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3507 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3508 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3510 hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3511 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3512 DKL_PLL_SSC_STEP_LEN_MASK |
3513 DKL_PLL_SSC_STEP_NUM_MASK |
3516 hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3517 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3518 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3520 hw_state->mg_pll_tdc_coldst_bias =
3521 intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3522 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3523 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3527 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3531 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3532 struct intel_shared_dpll *pll,
3533 struct intel_dpll_hw_state *hw_state,
3534 i915_reg_t enable_reg)
3536 const enum intel_dpll_id id = pll->info->id;
3537 intel_wakeref_t wakeref;
3541 wakeref = intel_display_power_get_if_enabled(dev_priv,
3542 POWER_DOMAIN_DISPLAY_CORE);
3546 val = intel_de_read(dev_priv, enable_reg);
3547 if (!(val & PLL_ENABLE))
3550 if (IS_ALDERLAKE_S(dev_priv)) {
3551 hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3552 hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3553 } else if (IS_DG1(dev_priv)) {
3554 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3555 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3556 } else if (IS_ROCKETLAKE(dev_priv)) {
3557 hw_state->cfgcr0 = intel_de_read(dev_priv,
3558 RKL_DPLL_CFGCR0(id));
3559 hw_state->cfgcr1 = intel_de_read(dev_priv,
3560 RKL_DPLL_CFGCR1(id));
3561 } else if (DISPLAY_VER(dev_priv) >= 12) {
3562 hw_state->cfgcr0 = intel_de_read(dev_priv,
3563 TGL_DPLL_CFGCR0(id));
3564 hw_state->cfgcr1 = intel_de_read(dev_priv,
3565 TGL_DPLL_CFGCR1(id));
3566 if (dev_priv->display.vbt.override_afc_startup) {
3567 hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3568 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3571 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3572 hw_state->cfgcr0 = intel_de_read(dev_priv,
3573 ICL_DPLL_CFGCR0(4));
3574 hw_state->cfgcr1 = intel_de_read(dev_priv,
3575 ICL_DPLL_CFGCR1(4));
3577 hw_state->cfgcr0 = intel_de_read(dev_priv,
3578 ICL_DPLL_CFGCR0(id));
3579 hw_state->cfgcr1 = intel_de_read(dev_priv,
3580 ICL_DPLL_CFGCR1(id));
3586 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3590 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3591 struct intel_shared_dpll *pll,
3592 struct intel_dpll_hw_state *hw_state)
3594 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3596 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3599 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3600 struct intel_shared_dpll *pll,
3601 struct intel_dpll_hw_state *hw_state)
3603 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3606 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3607 struct intel_shared_dpll *pll)
3609 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3610 const enum intel_dpll_id id = pll->info->id;
3611 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3613 if (IS_ALDERLAKE_S(dev_priv)) {
3614 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3615 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3616 } else if (IS_DG1(dev_priv)) {
3617 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3618 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3619 } else if (IS_ROCKETLAKE(dev_priv)) {
3620 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3621 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3622 } else if (DISPLAY_VER(dev_priv) >= 12) {
3623 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3624 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3625 div0_reg = TGL_DPLL0_DIV0(id);
3627 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3628 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3629 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3631 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3632 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3636 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3637 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3638 drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3639 !i915_mmio_reg_valid(div0_reg));
3640 if (dev_priv->display.vbt.override_afc_startup &&
3641 i915_mmio_reg_valid(div0_reg))
3642 intel_de_rmw(dev_priv, div0_reg,
3643 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3644 intel_de_posting_read(dev_priv, cfgcr1_reg);
3647 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3648 struct intel_shared_dpll *pll)
3650 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3651 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3654 * Some of the following registers have reserved fields, so program
3655 * these with RMW based on a mask. The mask can be fixed or generated
3656 * during the calc/readout phase if the mask depends on some other HW
3657 * state like refclk, see icl_calc_mg_pll_state().
3659 intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3660 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3662 intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3663 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3664 hw_state->mg_clktop2_coreclkctl1);
3666 intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3667 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3668 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3669 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3670 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3671 hw_state->mg_clktop2_hsclkctl);
3673 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3674 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3675 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3676 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3677 hw_state->mg_pll_frac_lock);
3678 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3680 intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3681 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3683 intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3684 hw_state->mg_pll_tdc_coldst_bias_mask,
3685 hw_state->mg_pll_tdc_coldst_bias);
3687 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3690 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3691 struct intel_shared_dpll *pll)
3693 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3694 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3698 * All registers programmed here have the same HIP_INDEX_REG even
3699 * though on different building block
3701 /* All the registers are RMW */
3702 val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3703 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3704 val |= hw_state->mg_refclkin_ctl;
3705 intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3707 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3708 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3709 val |= hw_state->mg_clktop2_coreclkctl1;
3710 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3712 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3713 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3714 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3715 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3716 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3717 val |= hw_state->mg_clktop2_hsclkctl;
3718 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3720 val = DKL_PLL_DIV0_MASK;
3721 if (dev_priv->display.vbt.override_afc_startup)
3722 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3723 intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3724 hw_state->mg_pll_div0);
3726 val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3727 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3728 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3729 val |= hw_state->mg_pll_div1;
3730 intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3732 val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3733 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3734 DKL_PLL_SSC_STEP_LEN_MASK |
3735 DKL_PLL_SSC_STEP_NUM_MASK |
3737 val |= hw_state->mg_pll_ssc;
3738 intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3740 val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3741 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3742 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3743 val |= hw_state->mg_pll_bias;
3744 intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3746 val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3747 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3748 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3749 val |= hw_state->mg_pll_tdc_coldst_bias;
3750 intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3752 intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3755 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3756 struct intel_shared_dpll *pll,
3757 i915_reg_t enable_reg)
3759 intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3762 * The spec says we need to "wait" but it also says it should be
3765 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3766 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3770 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3771 struct intel_shared_dpll *pll,
3772 i915_reg_t enable_reg)
3774 intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3776 /* Timeout is actually 600us. */
3777 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3778 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3781 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3785 if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3786 pll->info->id != DPLL_ID_ICL_DPLL0)
3789 * Wa_16011069516:adl-p[a0]
3791 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3792 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3793 * sanity check this assumption with a double read, which presumably
3794 * returns the correct value even with clock gating on.
3796 * Instead of the usual place for workarounds we apply this one here,
3797 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3799 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3800 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3801 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3802 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3805 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3806 struct intel_shared_dpll *pll)
3808 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3810 if (IS_JSL_EHL(dev_priv) &&
3811 pll->info->id == DPLL_ID_EHL_DPLL4) {
3814 * We need to disable DC states when this DPLL is enabled.
3815 * This can be done by taking a reference on DPLL4 power
3818 pll->wakeref = intel_display_power_get(dev_priv,
3819 POWER_DOMAIN_DC_OFF);
3822 icl_pll_power_enable(dev_priv, pll, enable_reg);
3824 icl_dpll_write(dev_priv, pll);
3827 * DVFS pre sequence would be here, but in our driver the cdclk code
3828 * paths should already be setting the appropriate voltage, hence we do
3832 icl_pll_enable(dev_priv, pll, enable_reg);
3834 adlp_cmtg_clock_gating_wa(dev_priv, pll);
3836 /* DVFS post sequence would be here. See the comment above. */
3839 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3840 struct intel_shared_dpll *pll)
3842 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3844 icl_dpll_write(dev_priv, pll);
3847 * DVFS pre sequence would be here, but in our driver the cdclk code
3848 * paths should already be setting the appropriate voltage, hence we do
3852 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3854 /* DVFS post sequence would be here. See the comment above. */
3857 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3858 struct intel_shared_dpll *pll)
3860 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3862 icl_pll_power_enable(dev_priv, pll, enable_reg);
3864 if (DISPLAY_VER(dev_priv) >= 12)
3865 dkl_pll_write(dev_priv, pll);
3867 icl_mg_pll_write(dev_priv, pll);
3870 * DVFS pre sequence would be here, but in our driver the cdclk code
3871 * paths should already be setting the appropriate voltage, hence we do
3875 icl_pll_enable(dev_priv, pll, enable_reg);
3877 /* DVFS post sequence would be here. See the comment above. */
3880 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3881 struct intel_shared_dpll *pll,
3882 i915_reg_t enable_reg)
3884 /* The first steps are done by intel_ddi_post_disable(). */
3887 * DVFS pre sequence would be here, but in our driver the cdclk code
3888 * paths should already be setting the appropriate voltage, hence we do
3892 intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3894 /* Timeout is actually 1us. */
3895 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3896 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3898 /* DVFS post sequence would be here. See the comment above. */
3900 intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3903 * The spec says we need to "wait" but it also says it should be
3906 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3907 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3911 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3912 struct intel_shared_dpll *pll)
3914 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3916 icl_pll_disable(dev_priv, pll, enable_reg);
3918 if (IS_JSL_EHL(dev_priv) &&
3919 pll->info->id == DPLL_ID_EHL_DPLL4)
3920 intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3924 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3925 struct intel_shared_dpll *pll)
3927 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3930 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3931 struct intel_shared_dpll *pll)
3933 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3935 icl_pll_disable(dev_priv, pll, enable_reg);
3938 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3941 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3944 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3945 const struct intel_dpll_hw_state *hw_state)
3947 drm_dbg_kms(&dev_priv->drm,
3948 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3949 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3950 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3951 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3952 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3953 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3954 hw_state->cfgcr0, hw_state->cfgcr1,
3956 hw_state->mg_refclkin_ctl,
3957 hw_state->mg_clktop2_coreclkctl1,
3958 hw_state->mg_clktop2_hsclkctl,
3959 hw_state->mg_pll_div0,
3960 hw_state->mg_pll_div1,
3961 hw_state->mg_pll_lf,
3962 hw_state->mg_pll_frac_lock,
3963 hw_state->mg_pll_ssc,
3964 hw_state->mg_pll_bias,
3965 hw_state->mg_pll_tdc_coldst_bias);
3968 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3969 .enable = combo_pll_enable,
3970 .disable = combo_pll_disable,
3971 .get_hw_state = combo_pll_get_hw_state,
3972 .get_freq = icl_ddi_combo_pll_get_freq,
3975 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3976 .enable = tbt_pll_enable,
3977 .disable = tbt_pll_disable,
3978 .get_hw_state = tbt_pll_get_hw_state,
3979 .get_freq = icl_ddi_tbt_pll_get_freq,
3982 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3983 .enable = mg_pll_enable,
3984 .disable = mg_pll_disable,
3985 .get_hw_state = mg_pll_get_hw_state,
3986 .get_freq = icl_ddi_mg_pll_get_freq,
3989 static const struct dpll_info icl_plls[] = {
3990 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3991 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3992 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3993 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3994 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3995 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3996 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4000 static const struct intel_dpll_mgr icl_pll_mgr = {
4001 .dpll_info = icl_plls,
4002 .compute_dplls = icl_compute_dplls,
4003 .get_dplls = icl_get_dplls,
4004 .put_dplls = icl_put_dplls,
4005 .update_active_dpll = icl_update_active_dpll,
4006 .update_ref_clks = icl_update_dpll_ref_clks,
4007 .dump_hw_state = icl_dump_hw_state,
4010 static const struct dpll_info ehl_plls[] = {
4011 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4012 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4013 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4017 static const struct intel_dpll_mgr ehl_pll_mgr = {
4018 .dpll_info = ehl_plls,
4019 .compute_dplls = icl_compute_dplls,
4020 .get_dplls = icl_get_dplls,
4021 .put_dplls = icl_put_dplls,
4022 .update_ref_clks = icl_update_dpll_ref_clks,
4023 .dump_hw_state = icl_dump_hw_state,
4026 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4027 .enable = mg_pll_enable,
4028 .disable = mg_pll_disable,
4029 .get_hw_state = dkl_pll_get_hw_state,
4030 .get_freq = icl_ddi_mg_pll_get_freq,
4033 static const struct dpll_info tgl_plls[] = {
4034 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4035 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4036 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4037 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4038 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4039 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4040 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4041 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4042 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4046 static const struct intel_dpll_mgr tgl_pll_mgr = {
4047 .dpll_info = tgl_plls,
4048 .compute_dplls = icl_compute_dplls,
4049 .get_dplls = icl_get_dplls,
4050 .put_dplls = icl_put_dplls,
4051 .update_active_dpll = icl_update_active_dpll,
4052 .update_ref_clks = icl_update_dpll_ref_clks,
4053 .dump_hw_state = icl_dump_hw_state,
4056 static const struct dpll_info rkl_plls[] = {
4057 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4058 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4059 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4063 static const struct intel_dpll_mgr rkl_pll_mgr = {
4064 .dpll_info = rkl_plls,
4065 .compute_dplls = icl_compute_dplls,
4066 .get_dplls = icl_get_dplls,
4067 .put_dplls = icl_put_dplls,
4068 .update_ref_clks = icl_update_dpll_ref_clks,
4069 .dump_hw_state = icl_dump_hw_state,
4072 static const struct dpll_info dg1_plls[] = {
4073 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4074 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4075 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4076 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4080 static const struct intel_dpll_mgr dg1_pll_mgr = {
4081 .dpll_info = dg1_plls,
4082 .compute_dplls = icl_compute_dplls,
4083 .get_dplls = icl_get_dplls,
4084 .put_dplls = icl_put_dplls,
4085 .update_ref_clks = icl_update_dpll_ref_clks,
4086 .dump_hw_state = icl_dump_hw_state,
4089 static const struct dpll_info adls_plls[] = {
4090 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4091 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4092 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4093 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4097 static const struct intel_dpll_mgr adls_pll_mgr = {
4098 .dpll_info = adls_plls,
4099 .compute_dplls = icl_compute_dplls,
4100 .get_dplls = icl_get_dplls,
4101 .put_dplls = icl_put_dplls,
4102 .update_ref_clks = icl_update_dpll_ref_clks,
4103 .dump_hw_state = icl_dump_hw_state,
4106 static const struct dpll_info adlp_plls[] = {
4107 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4108 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4109 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4110 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4111 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4112 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4113 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4117 static const struct intel_dpll_mgr adlp_pll_mgr = {
4118 .dpll_info = adlp_plls,
4119 .compute_dplls = icl_compute_dplls,
4120 .get_dplls = icl_get_dplls,
4121 .put_dplls = icl_put_dplls,
4122 .update_active_dpll = icl_update_active_dpll,
4123 .update_ref_clks = icl_update_dpll_ref_clks,
4124 .dump_hw_state = icl_dump_hw_state,
4128 * intel_shared_dpll_init - Initialize shared DPLLs
4129 * @dev_priv: i915 device
4131 * Initialize shared DPLLs for @dev_priv.
4133 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4135 const struct intel_dpll_mgr *dpll_mgr = NULL;
4136 const struct dpll_info *dpll_info;
4139 mutex_init(&dev_priv->display.dpll.lock);
4141 if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4142 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4144 else if (IS_ALDERLAKE_P(dev_priv))
4145 dpll_mgr = &adlp_pll_mgr;
4146 else if (IS_ALDERLAKE_S(dev_priv))
4147 dpll_mgr = &adls_pll_mgr;
4148 else if (IS_DG1(dev_priv))
4149 dpll_mgr = &dg1_pll_mgr;
4150 else if (IS_ROCKETLAKE(dev_priv))
4151 dpll_mgr = &rkl_pll_mgr;
4152 else if (DISPLAY_VER(dev_priv) >= 12)
4153 dpll_mgr = &tgl_pll_mgr;
4154 else if (IS_JSL_EHL(dev_priv))
4155 dpll_mgr = &ehl_pll_mgr;
4156 else if (DISPLAY_VER(dev_priv) >= 11)
4157 dpll_mgr = &icl_pll_mgr;
4158 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4159 dpll_mgr = &bxt_pll_mgr;
4160 else if (DISPLAY_VER(dev_priv) == 9)
4161 dpll_mgr = &skl_pll_mgr;
4162 else if (HAS_DDI(dev_priv))
4163 dpll_mgr = &hsw_pll_mgr;
4164 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4165 dpll_mgr = &pch_pll_mgr;
4168 dev_priv->display.dpll.num_shared_dpll = 0;
4172 dpll_info = dpll_mgr->dpll_info;
4174 for (i = 0; dpll_info[i].name; i++) {
4175 if (drm_WARN_ON(&dev_priv->drm,
4176 i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4179 drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4180 dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4183 dev_priv->display.dpll.mgr = dpll_mgr;
4184 dev_priv->display.dpll.num_shared_dpll = i;
4188 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4189 * @state: atomic state
4190 * @crtc: CRTC to compute DPLLs for
4193 * This function computes the DPLL state for the given CRTC and encoder.
4195 * The new configuration in the atomic commit @state is made effective by
4196 * calling intel_shared_dpll_swap_state().
4199 * 0 on success, negative error code on falure.
4201 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4202 struct intel_crtc *crtc,
4203 struct intel_encoder *encoder)
4205 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4206 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4208 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4211 return dpll_mgr->compute_dplls(state, crtc, encoder);
4215 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4216 * @state: atomic state
4217 * @crtc: CRTC to reserve DPLLs for
4220 * This function reserves all required DPLLs for the given CRTC and encoder
4221 * combination in the current atomic commit @state and the new @crtc atomic
4224 * The new configuration in the atomic commit @state is made effective by
4225 * calling intel_shared_dpll_swap_state().
4227 * The reserved DPLLs should be released by calling
4228 * intel_release_shared_dplls().
4231 * 0 if all required DPLLs were successfully reserved,
4232 * negative error code otherwise.
4234 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4235 struct intel_crtc *crtc,
4236 struct intel_encoder *encoder)
4238 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4239 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4241 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4244 return dpll_mgr->get_dplls(state, crtc, encoder);
4248 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4249 * @state: atomic state
4250 * @crtc: crtc from which the DPLLs are to be released
4252 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4253 * from the current atomic commit @state and the old @crtc atomic state.
4255 * The new configuration in the atomic commit @state is made effective by
4256 * calling intel_shared_dpll_swap_state().
4258 void intel_release_shared_dplls(struct intel_atomic_state *state,
4259 struct intel_crtc *crtc)
4261 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4262 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4265 * FIXME: this function is called for every platform having a
4266 * compute_clock hook, even though the platform doesn't yet support
4267 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4273 dpll_mgr->put_dplls(state, crtc);
4277 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4278 * @state: atomic state
4279 * @crtc: the CRTC for which to update the active DPLL
4280 * @encoder: encoder determining the type of port DPLL
4282 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4283 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4284 * DPLL selected will be based on the current mode of the encoder's port.
4286 void intel_update_active_dpll(struct intel_atomic_state *state,
4287 struct intel_crtc *crtc,
4288 struct intel_encoder *encoder)
4290 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4291 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4293 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4296 dpll_mgr->update_active_dpll(state, crtc, encoder);
4300 * intel_dpll_get_freq - calculate the DPLL's output frequency
4301 * @i915: i915 device
4302 * @pll: DPLL for which to calculate the output frequency
4303 * @pll_state: DPLL state from which to calculate the output frequency
4305 * Return the output frequency corresponding to @pll's passed in @pll_state.
4307 int intel_dpll_get_freq(struct drm_i915_private *i915,
4308 const struct intel_shared_dpll *pll,
4309 const struct intel_dpll_hw_state *pll_state)
4311 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4314 return pll->info->funcs->get_freq(i915, pll, pll_state);
4318 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4319 * @i915: i915 device
4320 * @pll: DPLL for which to calculate the output frequency
4321 * @hw_state: DPLL's hardware state
4323 * Read out @pll's hardware state into @hw_state.
4325 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4326 struct intel_shared_dpll *pll,
4327 struct intel_dpll_hw_state *hw_state)
4329 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4332 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4333 struct intel_shared_dpll *pll)
4335 struct intel_crtc *crtc;
4337 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4339 if (IS_JSL_EHL(i915) && pll->on &&
4340 pll->info->id == DPLL_ID_EHL_DPLL4) {
4341 pll->wakeref = intel_display_power_get(i915,
4342 POWER_DOMAIN_DC_OFF);
4345 pll->state.pipe_mask = 0;
4346 for_each_intel_crtc(&i915->drm, crtc) {
4347 struct intel_crtc_state *crtc_state =
4348 to_intel_crtc_state(crtc->base.state);
4350 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4351 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4353 pll->active_mask = pll->state.pipe_mask;
4355 drm_dbg_kms(&i915->drm,
4356 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4357 pll->info->name, pll->state.pipe_mask, pll->on);
4360 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4362 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4363 i915->display.dpll.mgr->update_ref_clks(i915);
4366 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4370 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4371 readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4374 static void sanitize_dpll_state(struct drm_i915_private *i915,
4375 struct intel_shared_dpll *pll)
4380 adlp_cmtg_clock_gating_wa(i915, pll);
4382 if (pll->active_mask)
4385 drm_dbg_kms(&i915->drm,
4386 "%s enabled but not in use, disabling\n",
4389 pll->info->funcs->disable(i915, pll);
4393 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4397 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4398 sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4402 * intel_dpll_dump_hw_state - write hw_state to dmesg
4403 * @dev_priv: i915 drm device
4404 * @hw_state: hw state to be written to the log
4406 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4408 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4409 const struct intel_dpll_hw_state *hw_state)
4411 if (dev_priv->display.dpll.mgr) {
4412 dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4414 /* fallback for platforms that don't use the shared dpll
4417 drm_dbg_kms(&dev_priv->drm,
4418 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4419 "fp0: 0x%x, fp1: 0x%x\n",
4428 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4429 struct intel_shared_dpll *pll,
4430 struct intel_crtc *crtc,
4431 struct intel_crtc_state *new_crtc_state)
4433 struct intel_dpll_hw_state dpll_hw_state;
4437 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4439 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4441 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4443 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4444 I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4445 "pll in active use but not on in sw tracking\n");
4446 I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4447 "pll is on but not used by any active pipe\n");
4448 I915_STATE_WARN(dev_priv, pll->on != active,
4449 "pll on state mismatch (expected %i, found %i)\n",
4454 I915_STATE_WARN(dev_priv,
4455 pll->active_mask & ~pll->state.pipe_mask,
4456 "more active pll users than references: 0x%x vs 0x%x\n",
4457 pll->active_mask, pll->state.pipe_mask);
4462 pipe_mask = BIT(crtc->pipe);
4464 if (new_crtc_state->hw.active)
4465 I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4466 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4467 pipe_name(crtc->pipe), pll->active_mask);
4469 I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4470 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4471 pipe_name(crtc->pipe), pll->active_mask);
4473 I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4474 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4475 pipe_mask, pll->state.pipe_mask);
4477 I915_STATE_WARN(dev_priv,
4478 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4479 sizeof(dpll_hw_state)),
4480 "pll hw state mismatch\n");
4483 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4484 struct intel_crtc_state *old_crtc_state,
4485 struct intel_crtc_state *new_crtc_state)
4487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4489 if (new_crtc_state->shared_dpll)
4490 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4491 crtc, new_crtc_state);
4493 if (old_crtc_state->shared_dpll &&
4494 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4495 u8 pipe_mask = BIT(crtc->pipe);
4496 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4498 I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4499 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4500 pipe_name(crtc->pipe), pll->active_mask);
4501 I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4502 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4503 pipe_name(crtc->pipe), pll->state.pipe_mask);
4507 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4511 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4512 verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],