1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Marvell International Ltd.
14 #include <asm/sections.h>
17 #include <mach/octeon_ddr.h>
19 #define CONFIG_REF_HERTZ 50000000
21 DECLARE_GLOBAL_DATA_PTR;
23 /* Sign of an integer */
24 static s64 _sign(s64 v)
30 char *lookup_env(struct ddr_priv *priv, const char *format, ...)
37 va_start(args, format);
38 vsnprintf(buffer, sizeof(buffer), format, args);
41 s = ddr_getenv_debug(priv, buffer);
43 value = simple_strtoul(s, NULL, 0);
44 printf("Parameter found in environment %s=\"%s\" 0x%lx (%ld)\n",
45 buffer, s, value, value);
51 char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
58 va_start(args, format);
59 vsnprintf(buffer, sizeof(buffer), format, args);
62 s = ddr_getenv_debug(priv, buffer);
64 value = simple_strtoull(s, NULL, 0);
65 printf("Parameter found in environment. %s = 0x%016llx\n",
72 char *lookup_env(struct ddr_priv *priv, const char *format, ...)
77 char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
83 /* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
84 #define CVMX_L2C_TADS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
85 OCTEON_IS_MODEL(OCTEON_CN73XX) || \
86 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 4 : \
87 (OCTEON_IS_MODEL(OCTEON_CN78XX)) ? 8 : 1)
89 /* Number of L2C IOBs connected to LMC. */
90 #define CVMX_L2C_IOBS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
91 OCTEON_IS_MODEL(OCTEON_CN78XX) || \
92 OCTEON_IS_MODEL(OCTEON_CN73XX) || \
93 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 2 : 1)
95 #define CVMX_L2C_MAX_MEMSZ_ALLOWED (OCTEON_IS_OCTEON2() ? \
96 (32 * CVMX_L2C_TADS) : \
97 (OCTEON_IS_MODEL(OCTEON_CN70XX) ? \
98 512 : (OCTEON_IS_OCTEON3() ? 1024 : 0)))
101 * Initialize the BIG address in L2C+DRAM to generate proper error
102 * on reading/writing to an non-existent memory location.
104 * @param node OCX CPU node number
105 * @param mem_size Amount of DRAM configured in MB.
106 * @param mode Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
108 static void cvmx_l2c_set_big_size(struct ddr_priv *priv, u64 mem_size, int mode)
110 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) &&
111 !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
112 union cvmx_l2c_big_ctl big_ctl;
113 int bits = 0, zero_bits = 0;
116 if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024ull)) {
117 printf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
119 (u64)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
120 mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
131 if ((bits - zero_bits) != 1 || (bits - 9) <= 0) {
132 printf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n",
138 * The BIG/HOLE is logic is not supported in pass1 as per
141 if (mode == 0 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
145 big_ctl.s.maxdram = bits - 9;
146 big_ctl.cn61xx.disable = mode;
147 l2c_wr(priv, CVMX_L2C_BIG_CTL, big_ctl.u64);
151 static u32 octeon3_refclock(u32 alt_refclk, u32 ddr_hertz,
152 struct dimm_config *dimm_config)
154 u32 ddr_ref_hertz = CONFIG_REF_HERTZ;
158 debug("%s(%u, %u, %p)\n", __func__, alt_refclk, ddr_hertz, dimm_config);
160 /* Octeon 3 case... */
162 /* we know whether alternate refclk is always wanted
163 * we also know already if we want 2133 MT/s
164 * if alt refclk not always wanted, then probe DDR and
165 * DIMM type if DDR4 and RDIMMs, then set desired refclk
166 * to 100MHz, otherwise to default (50MHz)
167 * depend on ddr_initialize() to do the refclk selection
172 * If alternate refclk was specified, let it override
175 ddr_ref_hertz = alt_refclk * 1000000;
176 printf("%s: DRAM init: %d MHz refclk is REQUESTED ALWAYS\n",
177 __func__, alt_refclk);
178 } else if (ddr_hertz > 1000000000) {
179 ddr_type = get_ddr_type(dimm_config, 0);
180 spd_dimm_type = get_dimm_module_type(dimm_config, 0, ddr_type);
182 debug("ddr type: 0x%x, dimm type: 0x%x\n", ddr_type,
184 /* Is DDR4 and RDIMM just to be sure. */
185 if (ddr_type == DDR4_DRAM &&
186 (spd_dimm_type == 1 || spd_dimm_type == 5 ||
187 spd_dimm_type == 8)) {
188 /* Yes, we require 100MHz refclk, so set it. */
189 ddr_ref_hertz = 100000000;
190 puts("DRAM init: 100 MHz refclk is REQUIRED\n");
194 debug("%s: speed: %u\n", __func__, ddr_ref_hertz);
195 return ddr_ref_hertz;
198 int encode_row_lsb_ddr3(int row_lsb)
200 int row_lsb_start = 14;
202 /* Decoding for row_lsb */
203 /* 000: row_lsb = mem_adr[14] */
204 /* 001: row_lsb = mem_adr[15] */
205 /* 010: row_lsb = mem_adr[16] */
206 /* 011: row_lsb = mem_adr[17] */
207 /* 100: row_lsb = mem_adr[18] */
208 /* 101: row_lsb = mem_adr[19] */
209 /* 110: row_lsb = mem_adr[20] */
212 if (octeon_is_cpuid(OCTEON_CN6XXX) ||
213 octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
216 printf("ERROR: Unsupported Octeon model: 0x%x\n",
219 return row_lsb - row_lsb_start;
222 int encode_pbank_lsb_ddr3(int pbank_lsb)
224 /* Decoding for pbank_lsb */
225 /* 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) */
226 /* 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " */
227 /* 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " */
228 /* 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " */
229 /* 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " */
230 /* 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " */
231 /* 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " */
232 /* 0111:DIMM = 0 / rank = mem_adr[34] " */
233 /* 1000-1111: RESERVED */
235 int pbank_lsb_start = 0;
237 if (octeon_is_cpuid(OCTEON_CN6XXX) ||
238 octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
239 pbank_lsb_start = 28;
241 printf("ERROR: Unsupported Octeon model: 0x%x\n",
244 return pbank_lsb - pbank_lsb_start;
247 static void set_ddr_clock_initialized(struct ddr_priv *priv, int if_num,
250 priv->ddr_clock_initialized[if_num] = inited_flag;
253 static int ddr_clock_initialized(struct ddr_priv *priv, int if_num)
255 return priv->ddr_clock_initialized[if_num];
258 static void set_ddr_memory_preserved(struct ddr_priv *priv)
260 priv->ddr_memory_preserved = true;
263 bool ddr_memory_preserved(struct ddr_priv *priv)
265 return priv->ddr_memory_preserved;
268 static void cn78xx_lmc_dreset_init(struct ddr_priv *priv, int if_num)
270 union cvmx_lmcx_dll_ctl2 dll_ctl2;
273 * The remainder of this section describes the sequence for LMCn.
275 * 1. If not done already, write LMC(0..3)_DLL_CTL2 to its reset value
276 * (except without changing the LMC(0..3)_DLL_CTL2[INTF_EN] value from
277 * that set in the prior Step 3), including
278 * LMC(0..3)_DLL_CTL2[DRESET] = 1.
280 * 2. Without changing any other LMC(0..3)_DLL_CTL2 fields, write
281 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 1.
284 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
285 dll_ctl2.cn78xx.dll_bringup = 1;
286 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
289 * 3. Read LMC(0..3)_DLL_CTL2 and wait for the result.
292 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
295 * 4. Wait for a minimum of 10 LMC CK cycles.
301 * 5. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
302 * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] = 1.
303 * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] must not change after this point
304 * without restarting the LMCn DRESET initialization sequence.
307 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
308 dll_ctl2.cn78xx.quad_dll_ena = 1;
309 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
312 * 6. Read LMC(0..3)_DLL_CTL2 and wait for the result.
315 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
318 * 7. Wait a minimum of 10 us.
324 * 8. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
325 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 0.
326 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] must not change after this point
327 * without restarting the LMCn DRESET initialization sequence.
330 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
331 dll_ctl2.cn78xx.dll_bringup = 0;
332 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
335 * 9. Read LMC(0..3)_DLL_CTL2 and wait for the result.
338 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
341 * 10. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
342 * LMC(0..3)_DLL_CTL2[DRESET] = 0.
343 * LMC(0..3)_DLL_CTL2[DRESET] must not change after this point without
344 * restarting the LMCn DRESET initialization sequence.
346 * After completing LMCn DRESET initialization, all LMC CSRs may be
347 * accessed. Prior to completing LMC DRESET initialization, only
348 * LMC(0..3)_DDR_PLL_CTL, LMC(0..3)_DLL_CTL2, LMC(0..3)_RESET_CTL, and
349 * LMC(0..3)_COMP_CTL2 LMC CSRs can be accessed.
352 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
353 dll_ctl2.cn78xx.dreset = 0;
354 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
357 int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
358 u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
359 int if_num, u32 if_mask)
363 if (ddr_clock_initialized(priv, if_num))
366 if (!ddr_clock_initialized(priv, 0)) { /* Do this once */
367 union cvmx_lmcx_reset_ctl reset_ctl;
371 * Check to see if memory is to be preserved and set global
374 for (i = 3; i >= 0; --i) {
375 if ((if_mask & (1 << i)) == 0)
378 reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
379 if (reset_ctl.s.ddr3psv == 1) {
380 debug("LMC%d Preserving memory\n", i);
381 set_ddr_memory_preserved(priv);
383 /* Re-initialize flags */
384 reset_ctl.s.ddr3pwarm = 0;
385 reset_ctl.s.ddr3psoft = 0;
386 reset_ctl.s.ddr3psv = 0;
387 lmc_wr(priv, CVMX_LMCX_RESET_CTL(i),
394 * ToDo: Add support for these SoCs:
396 * if (octeon_is_cpuid(OCTEON_CN63XX) ||
397 * octeon_is_cpuid(OCTEON_CN66XX) ||
398 * octeon_is_cpuid(OCTEON_CN61XX) || octeon_is_cpuid(OCTEON_CNF71XX))
402 * if (octeon_is_cpuid(OCTEON_CN68XX))
406 * if (octeon_is_cpuid(OCTEON_CN70XX))
410 if (octeon_is_cpuid(OCTEON_CN78XX) || octeon_is_cpuid(OCTEON_CN73XX) ||
411 octeon_is_cpuid(OCTEON_CNF75XX)) {
412 union cvmx_lmcx_dll_ctl2 dll_ctl2;
413 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
414 union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
415 struct dimm_config *dimm_config_table =
416 ddr_conf->dimm_config_table;
417 int en_idx, save_en_idx, best_en_idx = 0;
418 u64 clkf, clkr, max_clkf = 127;
419 u64 best_clkf = 0, best_clkr = 0;
420 u64 best_pll_MHz = 0;
422 u64 min_pll_MHz = 800;
423 u64 max_pll_MHz = 5000;
426 u64 best_calculated_ddr_hertz = 0;
427 u64 calculated_ddr_hertz = 0;
428 u64 orig_ddr_hertz = ddr_hertz;
429 const int _en[] = { 1, 2, 3, 4, 5, 6, 7, 8, 10, 12 };
430 int override_pll_settings;
435 /* ddr_type only indicates DDR4 or DDR3 */
436 ddr_type = (read_spd(&dimm_config_table[0], 0,
437 DDR4_SPD_KEY_BYTE_DEVICE_TYPE) ==
438 0x0C) ? DDR4_DRAM : DDR3_DRAM;
441 * 5.9 LMC Initialization Sequence
443 * There are 13 parts to the LMC initialization procedure:
445 * 1. DDR PLL initialization
447 * 2. LMC CK initialization
449 * 3. LMC interface enable initialization
451 * 4. LMC DRESET initialization
453 * 5. LMC CK local initialization
455 * 6. LMC RESET initialization
457 * 7. Early LMC initialization
459 * 8. LMC offset training
461 * 9. LMC internal Vref training
463 * 10. LMC deskew training
465 * 11. LMC write leveling
467 * 12. LMC read leveling
469 * 13. Final LMC initialization
471 * CN78XX supports two modes:
473 * - two-LMC mode: both LMCs 2/3 must not be enabled
474 * (LMC2/3_DLL_CTL2[DRESET] must be set to 1 and
475 * LMC2/3_DLL_CTL2[INTF_EN]
476 * must be set to 0) and both LMCs 0/1 must be enabled).
478 * - four-LMC mode: all four LMCs 0..3 must be enabled.
480 * Steps 4 and 6..13 should each be performed for each
481 * enabled LMC (either twice or four times). Steps 1..3 and
482 * 5 are more global in nature and each must be executed
483 * exactly once (not once per LMC) each time the DDR PLL
484 * changes or is first brought up. Steps 1..3 and 5 need
485 * not be performed if the DDR PLL is stable.
487 * Generally, the steps are performed in order. The exception
488 * is that the CK local initialization (step 5) must be
489 * performed after some DRESET initializations (step 4) and
490 * before other DRESET initializations when the DDR PLL is
491 * brought up or changed. (The CK local initialization uses
492 * information from some LMCs to bring up the other local
493 * CKs.) The following text describes these ordering
494 * requirements in more detail.
496 * Following any chip reset, the DDR PLL must be brought up,
497 * and all 13 steps should be executed. Subsequently, it is
498 * possible to execute only steps 4 and 6..13, or to execute
501 * The remainder of this section covers these initialization
505 /* Do the following init only once */
509 /* Only for interface #0 ... */
512 * 5.9.3 LMC Interface-Enable Initialization
514 * LMC interface-enable initialization (Step 3) must be#
515 * performed after Step 2 for each chip reset and whenever
516 * the DDR clock speed changes. This step needs to be
517 * performed only once, not once per LMC. Perform the
518 * following three substeps for the LMC interface-enable
521 * 1. Without changing any other LMC2_DLL_CTL2 fields
522 * (LMC(0..3)_DLL_CTL2 should be at their reset values after
523 * Step 1), write LMC2_DLL_CTL2[INTF_EN] = 1 if four-LMC
526 * 2. Without changing any other LMC3_DLL_CTL2 fields, write
527 * LMC3_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
529 * 3. Read LMC2_DLL_CTL2 and wait for the result.
531 * The LMC2_DLL_CTL2[INTF_EN] and LMC3_DLL_CTL2[INTF_EN]
532 * values should not be changed by software from this point.
535 for (i = 0; i < 4; ++i) {
536 if ((if_mask & (1 << i)) == 0)
539 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
541 dll_ctl2.cn78xx.byp_setting = 0;
542 dll_ctl2.cn78xx.byp_sel = 0;
543 dll_ctl2.cn78xx.quad_dll_ena = 0;
544 dll_ctl2.cn78xx.dreset = 1;
545 dll_ctl2.cn78xx.dll_bringup = 0;
546 dll_ctl2.cn78xx.intf_en = 0;
548 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
552 * ###### Interface enable (intf_en) deferred until after
553 * DDR_DIV_RESET=0 #######
557 * 5.9.1 DDR PLL Initialization
559 * DDR PLL initialization (Step 1) must be performed for each
560 * chip reset and whenever the DDR clock speed changes. This
561 * step needs to be performed only once, not once per LMC.
563 * Perform the following eight substeps to initialize the
566 * 1. If not done already, write all fields in
567 * LMC(0..3)_DDR_PLL_CTL and
568 * LMC(0..1)_DLL_CTL2 to their reset values, including:
570 * .. LMC0_DDR_PLL_CTL[DDR_DIV_RESET] = 1
571 * .. LMC0_DLL_CTL2[DRESET] = 1
573 * This substep is not necessary after a chip reset.
577 ddr_pll_ctl.u64 = lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
579 ddr_pll_ctl.cn78xx.reset_n = 0;
580 ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
581 ddr_pll_ctl.cn78xx.phy_dcok = 0;
584 * 73XX pass 1.3 has LMC0 DCLK_INVERT tied to 1; earlier
585 * 73xx passes are tied to 0
587 * 75XX needs LMC0 DCLK_INVERT set to 1 to minimize duty
588 * cycle falling points
590 * and we default all other chips LMC0 to DCLK_INVERT=0
592 ddr_pll_ctl.cn78xx.dclk_invert =
593 !!(octeon_is_cpuid(OCTEON_CN73XX_PASS1_3) ||
594 octeon_is_cpuid(OCTEON_CNF75XX));
597 * allow override of LMC0 desired setting for DCLK_INVERT,
599 * we cannot change LMC0 DCLK_INVERT on 73XX any pass
601 if (!(octeon_is_cpuid(OCTEON_CN73XX))) {
602 s = lookup_env(priv, "ddr0_set_dclk_invert");
604 ddr_pll_ctl.cn78xx.dclk_invert =
605 !!simple_strtoul(s, NULL, 0);
606 debug("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
607 ddr_pll_ctl.cn78xx.dclk_invert);
611 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u64);
612 debug("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL",
615 // only when LMC1 is active
618 * For CNF75XX, both LMC0 and LMC1 use the same PLL,
619 * so we use the LMC0 setting of DCLK_INVERT for LMC1.
621 if (!octeon_is_cpuid(OCTEON_CNF75XX)) {
625 * by default, for non-CNF75XX, we want
628 int lmc0_dclk_invert =
629 ddr_pll_ctl.cn78xx.dclk_invert;
632 * FIXME: work-around for DDR3 UDIMM problems
633 * is to use LMC0 setting on LMC1 and if
634 * 73xx pass 1.3, we want to default LMC1
635 * DCLK_INVERT to LMC0, not the invert of LMC0
637 int lmc1_dclk_invert;
640 ((ddr_type == DDR4_DRAM) &&
641 !octeon_is_cpuid(OCTEON_CN73XX_PASS1_3))
642 ? lmc0_dclk_invert ^ 1 :
646 * allow override of LMC1 desired setting for
649 s = lookup_env(priv, "ddr1_set_dclk_invert");
652 !!simple_strtoul(s, NULL, 0);
655 debug("LMC1: %s DDR_PLL_CTL[dclk_invert] to %d (LMC0 %d)\n",
656 (override) ? "override" :
657 "default", lmc1_dclk_invert,
660 ddr_pll_ctl.cn78xx.dclk_invert =
664 // but always write LMC1 CSR if it is active
665 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u64);
666 debug("%-45s : 0x%016llx\n",
667 "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u64);
671 * 2. If the current DRAM contents are not preserved (see
672 * LMC(0..3)_RESET_ CTL[DDR3PSV]), this is also an appropriate
673 * time to assert the RESET# pin of the DDR3/DDR4 DRAM parts.
675 * LMC0_RESET_ CTL[DDR3RST] = 0 without modifying any other
676 * LMC0_RESET_CTL fields to assert the DDR_RESET_L pin.
677 * No action is required here to assert DDR_RESET_L
678 * following a chip reset. Refer to Section 5.9.6. Do this
679 * for all enabled LMCs.
682 for (i = 0; (!ddr_memory_preserved(priv)) && i < 4; ++i) {
683 union cvmx_lmcx_reset_ctl reset_ctl;
685 if ((if_mask & (1 << i)) == 0)
688 reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
689 reset_ctl.cn78xx.ddr3rst = 0; /* Reset asserted */
690 debug("LMC%d Asserting DDR_RESET_L\n", i);
691 lmc_wr(priv, CVMX_LMCX_RESET_CTL(i), reset_ctl.u64);
692 lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
696 * 3. Without changing any other LMC0_DDR_PLL_CTL values,
697 * write LMC0_DDR_PLL_CTL[CLKF] with a value that gives a
698 * desired DDR PLL speed. The LMC0_DDR_PLL_CTL[CLKF] value
699 * should be selected in conjunction with the post-scalar
700 * divider values for LMC (LMC0_DDR_PLL_CTL[DDR_PS_EN]) so
701 * that the desired LMC CK speeds are is produced (all
702 * enabled LMCs must run the same speed). Section 5.14
703 * describes LMC0_DDR_PLL_CTL[CLKF] and
704 * LMC0_DDR_PLL_CTL[DDR_PS_EN] programmings that produce
705 * the desired LMC CK speed. Section 5.9.2 describes LMC CK
706 * initialization, which can be done separately from the DDR
707 * PLL initialization described in this section.
709 * The LMC0_DDR_PLL_CTL[CLKF] value must not change after
710 * this point without restarting this SDRAM PLL
711 * initialization sequence.
714 /* Init to max error */
716 best_error = ddr_hertz;
718 debug("DDR Reference Hertz = %d\n", ddr_ref_hertz);
720 while (best_error == ddr_hertz) {
721 for (clkr = 0; clkr < 4; ++clkr) {
723 sizeof(_en) / sizeof(int) -
724 1; en_idx >= 0; --en_idx) {
725 save_en_idx = en_idx;
728 (clkr + 1) * (_en[save_en_idx]));
729 clkf = divide_nint(clkf, ddr_ref_hertz)
733 (clkf + 1) / (clkr + 1) / 1000000;
734 calculated_ddr_hertz =
738 1) * (_en[save_en_idx]));
740 ddr_hertz - calculated_ddr_hertz;
742 if (pll_MHz < min_pll_MHz ||
743 pll_MHz > max_pll_MHz)
745 if (clkf > max_clkf) {
747 * PLL requires clkf to be
752 if (abs(error) > abs(best_error))
755 debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
757 _en[save_en_idx], clkf, pll_MHz,
758 calculated_ddr_hertz, error);
760 /* Favor the highest PLL frequency. */
761 if (abs(error) < abs(best_error) ||
762 pll_MHz > best_pll_MHz) {
763 best_pll_MHz = pll_MHz;
764 best_calculated_ddr_hertz =
765 calculated_ddr_hertz;
769 best_en_idx = save_en_idx;
774 override_pll_settings = 0;
776 s = lookup_env(priv, "ddr_pll_clkr");
778 best_clkr = simple_strtoul(s, NULL, 0);
779 override_pll_settings = 1;
782 s = lookup_env(priv, "ddr_pll_clkf");
784 best_clkf = simple_strtoul(s, NULL, 0);
785 override_pll_settings = 1;
788 s = lookup_env(priv, "ddr_pll_en_idx");
790 best_en_idx = simple_strtoul(s, NULL, 0);
791 override_pll_settings = 1;
794 if (override_pll_settings) {
796 ddr_ref_hertz * (best_clkf +
798 (best_clkr + 1) / 1000000;
799 best_calculated_ddr_hertz =
800 ddr_ref_hertz * (best_clkf +
802 ((best_clkr + 1) * (_en[best_en_idx]));
804 ddr_hertz - best_calculated_ddr_hertz;
807 debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
808 best_clkr, best_en_idx, _en[best_en_idx],
809 best_clkf, best_pll_MHz,
810 best_calculated_ddr_hertz, best_error);
813 * Try lowering the frequency if we can't get a
814 * working configuration
816 if (best_error == ddr_hertz) {
817 if (ddr_hertz < orig_ddr_hertz - 10000000)
819 ddr_hertz -= 1000000;
820 best_error = ddr_hertz;
824 if (best_error == ddr_hertz) {
825 printf("ERROR: Can not compute a legal DDR clock speed configuration.\n");
829 new_bwadj = (best_clkf + 1) / 10;
830 debug("bwadj: %2d\n", new_bwadj);
832 s = lookup_env(priv, "ddr_pll_bwadj");
834 new_bwadj = strtoul(s, NULL, 0);
835 debug("bwadj: %2d\n", new_bwadj);
838 for (i = 0; i < 2; ++i) {
839 if ((if_mask & (1 << i)) == 0)
843 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
844 debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
847 ddr_pll_ctl.cn78xx.ddr_ps_en = best_en_idx;
848 ddr_pll_ctl.cn78xx.clkf = best_clkf;
849 ddr_pll_ctl.cn78xx.clkr = best_clkr;
850 ddr_pll_ctl.cn78xx.reset_n = 0;
851 ddr_pll_ctl.cn78xx.bwadj = new_bwadj;
853 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
854 debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
858 * For cnf75xx LMC0 and LMC1 use the same PLL so
859 * only program LMC0 PLL.
861 if (octeon_is_cpuid(OCTEON_CNF75XX))
865 for (i = 0; i < 4; ++i) {
866 if ((if_mask & (1 << i)) == 0)
870 * 4. Read LMC0_DDR_PLL_CTL and wait for the result.
873 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
876 * 5. Wait a minimum of 3 us.
879 udelay(3); /* Wait 3 us */
882 * 6. Write LMC0_DDR_PLL_CTL[RESET_N] = 1 without
883 * changing any other LMC0_DDR_PLL_CTL values.
887 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
888 ddr_pll_ctl.cn78xx.reset_n = 1;
889 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
892 * 7. Read LMC0_DDR_PLL_CTL and wait for the result.
895 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
898 * 8. Wait a minimum of 25 us.
901 udelay(25); /* Wait 25 us */
904 * For cnf75xx LMC0 and LMC1 use the same PLL so
905 * only program LMC0 PLL.
907 if (octeon_is_cpuid(OCTEON_CNF75XX))
911 for (i = 0; i < 4; ++i) {
912 if ((if_mask & (1 << i)) == 0)
916 * 5.9.2 LMC CK Initialization
918 * DDR PLL initialization must be completed prior to
919 * starting LMC CK initialization.
921 * Perform the following substeps to initialize the
924 * 1. Without changing any other LMC(0..3)_DDR_PLL_CTL
926 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 1 and
927 * LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] with the
928 * appropriate value to get the desired LMC CK speed.
929 * Section 5.14 discusses CLKF and DDR_PS_EN
930 * programmings. The LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN]
931 * must not change after this point without restarting
932 * this LMC CK initialization sequence.
935 ddr_pll_ctl.u64 = lmc_rd(priv,
936 CVMX_LMCX_DDR_PLL_CTL(i));
937 ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
938 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
941 * 2. Without changing any other fields in
942 * LMC(0..3)_DDR_PLL_CTL, write
943 * LMC(0..3)_DDR_PLL_CTL[DDR4_MODE] = 0.
947 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
948 ddr_pll_ctl.cn78xx.ddr4_mode =
949 (ddr_type == DDR4_DRAM) ? 1 : 0;
950 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
953 * 3. Read LMC(0..3)_DDR_PLL_CTL and wait for the
957 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
960 * 4. Wait a minimum of 1 us.
963 udelay(1); /* Wait 1 us */
966 * ###### Steps 5 through 7 deferred until after
967 * DDR_DIV_RESET=0 #######
971 * 8. Without changing any other LMC(0..3)_COMP_CTL2
973 * LMC(0..3)_COMP_CTL2[CK_CTL,CONTROL_CTL,CMD_CTL]
974 * to the desired DDR*_CK_*_P control and command
975 * signals drive strength.
978 union cvmx_lmcx_comp_ctl2 comp_ctl2;
979 const struct ddr3_custom_config *custom_lmc_config =
980 &ddr_conf->custom_lmc_config;
982 comp_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_COMP_CTL2(i));
984 /* Default 4=34.3 ohm */
985 comp_ctl2.cn78xx.dqx_ctl =
986 (custom_lmc_config->dqx_ctl ==
987 0) ? 4 : custom_lmc_config->dqx_ctl;
988 /* Default 4=34.3 ohm */
989 comp_ctl2.cn78xx.ck_ctl =
990 (custom_lmc_config->ck_ctl ==
991 0) ? 4 : custom_lmc_config->ck_ctl;
992 /* Default 4=34.3 ohm */
993 comp_ctl2.cn78xx.cmd_ctl =
994 (custom_lmc_config->cmd_ctl ==
995 0) ? 4 : custom_lmc_config->cmd_ctl;
997 comp_ctl2.cn78xx.rodt_ctl = 0x4; /* 60 ohm */
999 comp_ctl2.cn70xx.ptune_offset =
1000 (abs(custom_lmc_config->ptune_offset) & 0x7)
1001 | (_sign(custom_lmc_config->ptune_offset) << 3);
1002 comp_ctl2.cn70xx.ntune_offset =
1003 (abs(custom_lmc_config->ntune_offset) & 0x7)
1004 | (_sign(custom_lmc_config->ntune_offset) << 3);
1006 s = lookup_env(priv, "ddr_clk_ctl");
1008 comp_ctl2.cn78xx.ck_ctl =
1009 simple_strtoul(s, NULL, 0);
1012 s = lookup_env(priv, "ddr_ck_ctl");
1014 comp_ctl2.cn78xx.ck_ctl =
1015 simple_strtoul(s, NULL, 0);
1018 s = lookup_env(priv, "ddr_cmd_ctl");
1020 comp_ctl2.cn78xx.cmd_ctl =
1021 simple_strtoul(s, NULL, 0);
1024 s = lookup_env(priv, "ddr_dqx_ctl");
1026 comp_ctl2.cn78xx.dqx_ctl =
1027 simple_strtoul(s, NULL, 0);
1030 s = lookup_env(priv, "ddr_ptune_offset");
1032 comp_ctl2.cn78xx.ptune_offset =
1033 simple_strtoul(s, NULL, 0);
1036 s = lookup_env(priv, "ddr_ntune_offset");
1038 comp_ctl2.cn78xx.ntune_offset =
1039 simple_strtoul(s, NULL, 0);
1042 lmc_wr(priv, CVMX_LMCX_COMP_CTL2(i), comp_ctl2.u64);
1045 * 9. Read LMC(0..3)_DDR_PLL_CTL and wait for the
1049 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1052 * 10. Wait a minimum of 200 ns.
1055 udelay(1); /* Wait 1 us */
1058 * 11. Without changing any other
1059 * LMC(0..3)_DDR_PLL_CTL values, write
1060 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 0.
1063 ddr_pll_ctl.u64 = lmc_rd(priv,
1064 CVMX_LMCX_DDR_PLL_CTL(i));
1065 ddr_pll_ctl.cn78xx.ddr_div_reset = 0;
1066 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1069 * 12. Read LMC(0..3)_DDR_PLL_CTL and wait for the
1073 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1076 * 13. Wait a minimum of 200 ns.
1079 udelay(1); /* Wait 1 us */
1083 * Relocated Interface Enable (intf_en) Step
1085 for (i = (octeon_is_cpuid(OCTEON_CN73XX) ||
1086 octeon_is_cpuid(OCTEON_CNF75XX)) ? 1 : 2;
1089 * This step is only necessary for LMC 2 and 3 in
1090 * 4-LMC mode. The mask will cause the unpopulated
1091 * interfaces to be skipped.
1093 if ((if_mask & (1 << i)) == 0)
1096 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1097 dll_ctl2.cn78xx.intf_en = 1;
1098 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
1099 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1103 * Relocated PHY_DCOK Step
1105 for (i = 0; i < 4; ++i) {
1106 if ((if_mask & (1 << i)) == 0)
1109 * 5. Without changing any other fields in
1110 * LMC(0..3)_DDR_PLL_CTL, write
1111 * LMC(0..3)_DDR_PLL_CTL[PHY_DCOK] = 1.
1114 ddr_pll_ctl.u64 = lmc_rd(priv,
1115 CVMX_LMCX_DDR_PLL_CTL(i));
1116 ddr_pll_ctl.cn78xx.phy_dcok = 1;
1117 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1119 * 6. Read LMC(0..3)_DDR_PLL_CTL and wait for
1123 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1126 * 7. Wait a minimum of 20 us.
1129 udelay(20); /* Wait 20 us */
1133 * 5.9.4 LMC DRESET Initialization
1135 * All of the DDR PLL, LMC global CK, and LMC interface
1136 * enable initializations must be completed prior to starting
1137 * this LMC DRESET initialization (Step 4).
1139 * This LMC DRESET step is done for all enabled LMCs.
1141 * There are special constraints on the ordering of DRESET
1142 * initialization (Steps 4) and CK local initialization
1143 * (Step 5) whenever CK local initialization must be executed.
1144 * CK local initialization must be executed whenever the DDR
1145 * PLL is being brought up (for each chip reset* and whenever
1146 * the DDR clock speed changes).
1148 * When Step 5 must be executed in the two-LMC mode case:
1149 * - LMC0 DRESET initialization must occur before Step 5.
1150 * - LMC1 DRESET initialization must occur after Step 5.
1152 * When Step 5 must be executed in the four-LMC mode case:
1153 * - LMC2 and LMC3 DRESET initialization must occur before
1155 * - LMC0 and LMC1 DRESET initialization must occur after
1159 if (octeon_is_cpuid(OCTEON_CN73XX)) {
1160 /* ONE-LMC or TWO-LMC MODE BEFORE STEP 5 for cn73xx */
1161 cn78xx_lmc_dreset_init(priv, 0);
1162 } else if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1163 if (if_mask == 0x3) {
1165 * 2-LMC Mode: LMC1 DRESET must occur
1168 cn78xx_lmc_dreset_init(priv, 1);
1171 /* TWO-LMC MODE DRESET BEFORE STEP 5 */
1173 cn78xx_lmc_dreset_init(priv, 0);
1175 /* FOUR-LMC MODE BEFORE STEP 5 */
1176 if (if_mask == 0xf) {
1177 cn78xx_lmc_dreset_init(priv, 2);
1178 cn78xx_lmc_dreset_init(priv, 3);
1183 * 5.9.5 LMC CK Local Initialization
1185 * All of DDR PLL, LMC global CK, and LMC interface-enable
1186 * initializations must be completed prior to starting this
1187 * LMC CK local initialization (Step 5).
1189 * LMC CK Local initialization must be performed for each
1190 * chip reset and whenever the DDR clock speed changes. This
1191 * step needs to be performed only once, not once per LMC.
1193 * There are special constraints on the ordering of DRESET
1194 * initialization (Steps 4) and CK local initialization
1195 * (Step 5) whenever CK local initialization must be executed.
1196 * CK local initialization must be executed whenever the
1197 * DDR PLL is being brought up (for each chip reset and
1198 * whenever the DDR clock speed changes).
1200 * When Step 5 must be executed in the two-LMC mode case:
1201 * - LMC0 DRESET initialization must occur before Step 5.
1202 * - LMC1 DRESET initialization must occur after Step 5.
1204 * When Step 5 must be executed in the four-LMC mode case:
1205 * - LMC2 and LMC3 DRESET initialization must occur before
1207 * - LMC0 and LMC1 DRESET initialization must occur after
1210 * LMC CK local initialization is different depending on
1211 * whether two-LMC or four-LMC modes are desired.
1214 if (if_mask == 0x3) {
1215 int temp_lmc_if_num = octeon_is_cpuid(OCTEON_CNF75XX) ?
1219 * 5.9.5.1 LMC CK Local Initialization for Two-LMC
1222 * 1. Write LMC0_DLL_CTL3 to its reset value. (Note
1223 * that LMC0_DLL_CTL3[DLL_90_BYTE_SEL] = 0x2 .. 0x8
1224 * should also work.)
1227 ddr_dll_ctl3.u64 = 0;
1228 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1230 if (octeon_is_cpuid(OCTEON_CNF75XX))
1231 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1233 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 1;
1236 CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1240 * 2. Read LMC0_DLL_CTL3 and wait for the result.
1243 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1246 * 3. Without changing any other fields in
1247 * LMC0_DLL_CTL3, write
1248 * LMC0_DLL_CTL3[DCLK90_FWD] = 1. Writing
1249 * LMC0_DLL_CTL3[DCLK90_FWD] = 1
1250 * causes clock-delay information to be forwarded
1251 * from LMC0 to LMC1.
1254 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1256 CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1260 * 4. Read LMC0_DLL_CTL3 and wait for the result.
1263 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1266 if (if_mask == 0xf) {
1268 * 5.9.5.2 LMC CK Local Initialization for Four-LMC
1271 * 1. Write LMC2_DLL_CTL3 to its reset value except
1272 * LMC2_DLL_CTL3[DLL90_BYTE_SEL] = 0x7.
1275 ddr_dll_ctl3.u64 = 0;
1276 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1277 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1278 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1281 * 2. Write LMC3_DLL_CTL3 to its reset value except
1282 * LMC3_DLL_CTL3[DLL90_BYTE_SEL] = 0x2.
1285 ddr_dll_ctl3.u64 = 0;
1286 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1287 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 2;
1288 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1291 * 3. Read LMC3_DLL_CTL3 and wait for the result.
1294 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1297 * 4. Without changing any other fields in
1298 * LMC2_DLL_CTL3, write LMC2_DLL_CTL3[DCLK90_FWD] = 1
1299 * and LMC2_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
1300 * Writing LMC2_DLL_CTL3[DCLK90_FWD] = 1 causes LMC 2
1301 * to forward clockdelay information to LMC0. Setting
1302 * LMC2_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC2
1303 * from periodically recalibrating this delay
1307 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(2));
1308 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1309 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1310 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1313 * 5. Without changing any other fields in
1314 * LMC3_DLL_CTL3, write LMC3_DLL_CTL3[DCLK90_FWD] = 1
1315 * and LMC3_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
1316 * Writing LMC3_DLL_CTL3[DCLK90_FWD] = 1 causes LMC3
1317 * to forward clockdelay information to LMC1. Setting
1318 * LMC3_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC3
1319 * from periodically recalibrating this delay
1323 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1324 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1325 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1326 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1329 * 6. Read LMC3_DLL_CTL3 and wait for the result.
1332 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1335 if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1337 * cnf75xx 2-LMC Mode: LMC0 DRESET must occur after
1338 * Step 5, Do LMC0 for 1-LMC Mode here too
1340 cn78xx_lmc_dreset_init(priv, 0);
1343 /* TWO-LMC MODE AFTER STEP 5 */
1344 if (if_mask == 0x3) {
1345 if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1347 * cnf75xx 2-LMC Mode: LMC0 DRESET must
1348 * occur after Step 5
1350 cn78xx_lmc_dreset_init(priv, 0);
1352 cn78xx_lmc_dreset_init(priv, 1);
1356 /* FOUR-LMC MODE AFTER STEP 5 */
1357 if (if_mask == 0xf) {
1358 cn78xx_lmc_dreset_init(priv, 0);
1359 cn78xx_lmc_dreset_init(priv, 1);
1362 * Enable periodic recalibration of DDR90 delay
1365 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(0));
1366 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1367 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u64);
1368 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(1));
1369 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1370 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(1), ddr_dll_ctl3.u64);
1373 /* Enable fine tune mode for all LMCs */
1374 for (i = 0; i < 4; ++i) {
1375 if ((if_mask & (1 << i)) == 0)
1377 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(i));
1378 ddr_dll_ctl3.cn78xx.fine_tune_mode = 1;
1379 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(i), ddr_dll_ctl3.u64);
1383 * Enable the trim circuit on the appropriate channels to
1384 * adjust the DDR clock duty cycle for chips that support
1387 if (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X) ||
1388 octeon_is_cpuid(OCTEON_CN73XX) ||
1389 octeon_is_cpuid(OCTEON_CNF75XX)) {
1390 union cvmx_lmcx_phy_ctl lmc_phy_ctl;
1393 for (i = 0; i < 4; ++i) {
1394 if ((if_mask & (1 << i)) == 0)
1398 lmc_rd(priv, CVMX_LMCX_PHY_CTL(i));
1400 if (octeon_is_cpuid(OCTEON_CNF75XX) ||
1401 octeon_is_cpuid(OCTEON_CN73XX_PASS1_3)) {
1403 lmc_phy_ctl.s.lv_mode = 0;
1405 /* Odd LMCs = 0, Even LMCs = 1 */
1406 lmc_phy_ctl.s.lv_mode = (~i) & 1;
1409 debug("LMC%d: PHY_CTL : 0x%016llx\n",
1410 i, lmc_phy_ctl.u64);
1411 lmc_wr(priv, CVMX_LMCX_PHY_CTL(i),
1418 * 5.9.6 LMC RESET Initialization
1420 * NOTE: this is now done as the first step in
1421 * init_octeon3_ddr3_interface, rather than the last step in clock
1422 * init. This reorg allows restarting per-LMC initialization should
1423 * problems be encountered, rather than being forced to resort to
1424 * resetting the chip and starting all over.
1426 * Look for the code in octeon3_lmc.c: perform_lmc_reset().
1429 /* Fallthrough for all interfaces... */
1433 * Start the DDR clock so that its frequency can be measured.
1434 * For some chips we must activate the memory controller with
1435 * init_start to make the DDR clock start to run.
1437 if ((!octeon_is_cpuid(OCTEON_CN6XXX)) &&
1438 (!octeon_is_cpuid(OCTEON_CNF7XXX)) &&
1439 (!octeon_is_cpuid(OCTEON_CN7XXX))) {
1440 union cvmx_lmcx_mem_cfg0 mem_cfg0;
1443 mem_cfg0.s.init_start = 1;
1444 lmc_wr(priv, CVMX_LMCX_MEM_CFG0(if_num), mem_cfg0.u64);
1445 lmc_rd(priv, CVMX_LMCX_MEM_CFG0(if_num));
1448 set_ddr_clock_initialized(priv, if_num, 1);
1453 static void octeon_ipd_delay_cycles(u64 cycles)
1455 u64 start = csr_rd(CVMX_IPD_CLK_COUNT);
1457 while (start + cycles > csr_rd(CVMX_IPD_CLK_COUNT))
1461 static void octeon_ipd_delay_cycles_o3(u64 cycles)
1463 u64 start = csr_rd(CVMX_FPA_CLK_COUNT);
1465 while (start + cycles > csr_rd(CVMX_FPA_CLK_COUNT))
1469 static u32 measure_octeon_ddr_clock(struct ddr_priv *priv,
1470 struct ddr_conf *ddr_conf, u32 cpu_hertz,
1471 u32 ddr_hertz, u32 ddr_ref_hertz,
1472 int if_num, u32 if_mask)
1479 if (initialize_ddr_clock(priv, ddr_conf, cpu_hertz,
1480 ddr_hertz, ddr_ref_hertz, if_num,
1485 /* Dynamically determine the DDR clock speed */
1486 if (OCTEON_IS_OCTEON2() || octeon_is_cpuid(OCTEON_CN70XX)) {
1487 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1488 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1489 /* How many cpu cycles to measure over */
1490 octeon_ipd_delay_cycles(100000000);
1491 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1493 lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1494 calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1495 } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
1496 core_clocks = csr_rd(CVMX_FPA_CLK_COUNT);
1497 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1498 /* How many cpu cycles to measure over */
1499 octeon_ipd_delay_cycles_o3(100000000);
1500 core_clocks = csr_rd(CVMX_FPA_CLK_COUNT) - core_clocks;
1502 lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1503 calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1505 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1507 * ignore overflow, starts counting when we enable the
1510 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num));
1511 /* How many cpu cycles to measure over */
1512 octeon_ipd_delay_cycles(100000000);
1513 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1515 lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num)) - ddr_clocks;
1516 calc_ddr_hertz = ddr_clocks * cpu_hertz / core_clocks;
1519 debug("core clocks: %llu, ddr clocks: %llu, calc rate: %llu\n",
1520 core_clocks, ddr_clocks, calc_ddr_hertz);
1521 debug("LMC%d: Measured DDR clock: %lld, cpu clock: %u, ddr clocks: %llu\n",
1522 if_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
1524 /* Check for unreasonable settings. */
1525 if (calc_ddr_hertz < 10000) {
1526 udelay(8000000 * 100);
1527 printf("DDR clock misconfigured on interface %d. Resetting...\n",
1529 do_reset(NULL, 0, 0, NULL);
1532 return calc_ddr_hertz;
1535 u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1537 union cvmx_lmcx_rlevel_dbg rlevel_dbg;
1538 union cvmx_lmcx_rlevel_ctl rlevel_ctl;
1540 rlevel_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1541 rlevel_ctl.s.byte = idx;
1543 lmc_wr(priv, CVMX_LMCX_RLEVEL_CTL(if_num), rlevel_ctl.u64);
1544 lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1546 rlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_DBG(if_num));
1547 return rlevel_dbg.s.bitmask;
1550 u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1552 union cvmx_lmcx_wlevel_dbg wlevel_dbg;
1555 wlevel_dbg.s.byte = idx;
1557 lmc_wr(priv, CVMX_LMCX_WLEVEL_DBG(if_num), wlevel_dbg.u64);
1558 lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1560 wlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1561 return wlevel_dbg.s.bitmask;
1564 int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
1569 u64 mask = 0; /* Used in 64-bit comparisons */
1579 u64 bitmask = rlevel_bitmask_p->bm;
1585 blank += RLEVEL_BITMASK_BLANK_ERROR;
1587 /* Look for fb, the first bit */
1589 while (!(temp & 1)) {
1594 /* Look for lb, the last bit */
1596 while ((temp >>= 1))
1600 * Start with the max range to try to find the largest mask
1601 * within the bitmask data
1603 width = MASKRANGE_BITS;
1604 for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
1605 for (mstart = lastbit - width + 1; mstart >= firstbit;
1607 temp = mask << mstart;
1608 if ((bitmask & temp) == temp)
1613 /* look for any more contiguous 1's to the right of mstart */
1614 if (width == MASKRANGE_BITS) { // only when maximum mask
1615 while ((bitmask >> (mstart - 1)) & 1) {
1616 // slide right over more 1's
1618 // count the number of extra bits only for DDR4
1619 if (ddr_type == DDR4_DRAM)
1624 /* Penalize any extra 1's beyond the maximum desired mask */
1627 RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
1629 /* Detect if bitmask is too narrow. */
1631 narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
1634 * detect leading bubble bits, that is, any 0's between first
1637 temp = bitmask >> (firstbit + 1);
1638 i = mstart - firstbit - 1;
1640 if ((temp & 1) == 0)
1641 bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1645 temp = bitmask >> (mstart + width + extras);
1646 i = lastbit - (mstart + width + extras - 1);
1650 * Detect 1 bits after the trailing end of
1651 * the mask, including last.
1653 trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
1656 * Detect trailing bubble bits, that is,
1657 * any 0's between end-of-mask and last
1659 tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1665 errors = bubble + tbubble + blank + narrow + trailing + toolong;
1667 /* Pass out useful statistics */
1668 rlevel_bitmask_p->mstart = mstart;
1669 rlevel_bitmask_p->width = width;
1671 debug_bitmask_print("bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
1672 (unsigned long)bitmask, mask, width, mstart,
1673 firstbit, lastbit, bubble, tbubble, blank,
1674 narrow, trailing, toolong, errors,
1675 (errors) ? "=> invalid" : "");
1680 int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
1681 union cvmx_lmcx_rlevel_ctl rlevel_ctl)
1685 debug_bitmask_print(" offset_en:%d", rlevel_ctl.s.offset_en);
1687 if (rlevel_ctl.s.offset_en) {
1688 delay = max((int)mstart,
1689 (int)(mstart + width - 1 - rlevel_ctl.s.offset));
1691 /* if (rlevel_ctl.s.offset) { *//* Experimental */
1693 delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
1695 * Insure that the offset delay falls within the
1698 delay = min(delay, mstart + width - 1);
1701 delay = (width - 1) / 2 + mstart;
1708 /* Default ODT config must disable ODT */
1709 /* Must be const (read only) so that the structure is in flash */
1710 const struct dimm_odt_config disable_odt_config[] = {
1711 /* 1 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1712 /* 2 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1713 /* 3 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1714 /* 4 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1717 /* Memory controller setup function */
1718 static int init_octeon_dram_interface(struct ddr_priv *priv,
1719 struct ddr_conf *ddr_conf,
1720 u32 ddr_hertz, u32 cpu_hertz,
1721 u32 ddr_ref_hertz, int if_num,
1724 u32 mem_size_mbytes = 0;
1727 s = lookup_env(priv, "ddr_timing_hertz");
1729 ddr_hertz = simple_strtoul(s, NULL, 0);
1731 if (OCTEON_IS_OCTEON3()) {
1732 int lmc_restart_retries = 0;
1733 #define DEFAULT_RESTART_RETRIES 3
1734 int lmc_restart_retries_limit = DEFAULT_RESTART_RETRIES;
1736 s = lookup_env(priv, "ddr_restart_retries_limit");
1738 lmc_restart_retries_limit = simple_strtoul(s, NULL, 0);
1741 mem_size_mbytes = init_octeon3_ddr3_interface(priv, ddr_conf,
1746 if (mem_size_mbytes == 0) { // 0 means restart is possible
1747 if (lmc_restart_retries < lmc_restart_retries_limit) {
1748 lmc_restart_retries++;
1749 printf("N0.LMC%d Configuration problem: attempting LMC reset and init restart %d\n",
1750 if_num, lmc_restart_retries);
1751 goto restart_lmc_init;
1753 if (lmc_restart_retries_limit > 0) {
1754 printf("INFO: N0.LMC%d Configuration: fatal problem remains after %d LMC init retries - Resetting node...\n",
1755 if_num, lmc_restart_retries);
1757 do_reset(NULL, 0, 0, NULL);
1759 // return an error, no restart
1760 mem_size_mbytes = -1;
1766 debug("N0.LMC%d Configuration Completed: %d MB\n",
1767 if_num, mem_size_mbytes);
1769 return mem_size_mbytes;
1772 #define WLEVEL_BYTE_BITS 5
1773 #define WLEVEL_BYTE_MSK ((1ULL << 5) - 1)
1775 void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank,
1776 int byte, int delay)
1778 union cvmx_lmcx_wlevel_rankx temp_wlevel_rank;
1780 if (byte >= 0 && byte <= 8) {
1781 temp_wlevel_rank.u64 = lmc_wlevel_rank->u64;
1782 temp_wlevel_rank.u64 &=
1783 ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
1784 temp_wlevel_rank.u64 |=
1785 ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
1786 lmc_wlevel_rank->u64 = temp_wlevel_rank.u64;
1790 int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1794 if (byte >= 0 && byte <= 8)
1796 ((lmc_wlevel_rank->u64) >> (WLEVEL_BYTE_BITS *
1797 byte)) & WLEVEL_BYTE_MSK;
1802 void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1803 int byte, int delay)
1805 union cvmx_lmcx_rlevel_rankx temp_rlevel_rank;
1807 if (byte >= 0 && byte <= 8) {
1808 temp_rlevel_rank.u64 =
1809 lmc_rlevel_rank->u64 & ~(RLEVEL_BYTE_MSK <<
1810 (RLEVEL_BYTE_BITS * byte));
1811 temp_rlevel_rank.u64 |=
1812 ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
1813 lmc_rlevel_rank->u64 = temp_rlevel_rank.u64;
1817 int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte)
1821 if (byte >= 0 && byte <= 8)
1823 ((lmc_rlevel_rank->u64) >> (RLEVEL_BYTE_BITS *
1824 byte)) & RLEVEL_BYTE_MSK;
1829 void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1830 union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1832 int byte_delay = get_rl_rank(lmc_rlevel_rank, byte);
1834 debug("Estimating Wlevel delay byte %d: ", byte);
1835 debug("Rlevel=%d => ", byte_delay);
1836 byte_delay = divide_roundup(byte_delay, 2) & 0x1e;
1837 debug("Wlevel=%d\n", byte_delay);
1838 upd_wl_rank(lmc_wlevel_rank, byte, byte_delay);
1841 /* Delay trend: constant=0, decreasing=-1, increasing=1 */
1842 static s64 calc_delay_trend(s64 v)
1853 * Evaluate delay sequence across the whole range of byte delays while
1854 * keeping track of the overall delay trend, increasing or decreasing.
1855 * If the trend changes charge an error amount to the score.
1858 // NOTE: "max_adj_delay_inc" argument is, by default, 1 for DDR3 and 2 for DDR4
1860 int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
1861 int max_adj_delay_inc)
1864 s64 delay_trend, prev_trend = 0;
1871 for (byte_idx = start; byte_idx < end; ++byte_idx) {
1872 delay_diff = rlevel_byte[byte_idx + 1].delay -
1873 rlevel_byte[byte_idx].delay;
1874 delay_trend = calc_delay_trend(delay_diff);
1877 * Increment error each time the trend changes to the
1878 * opposite direction.
1880 if (prev_trend != 0 && delay_trend != 0 &&
1881 prev_trend != delay_trend) {
1882 seq_err = RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
1887 // how big was the delay change, if any
1888 delay_inc = abs(delay_diff);
1891 * Even if the trend did not change to the opposite direction,
1892 * check for the magnitude of the change, and scale the
1893 * penalty by the amount that the size is larger than the
1896 if (max_adj_delay_inc != 0 && delay_inc > max_adj_delay_inc) {
1897 adj_err = (delay_inc - max_adj_delay_inc) *
1898 RLEVEL_ADJACENT_DELAY_ERROR;
1903 rlevel_byte[byte_idx + 1].sqerrs = seq_err + adj_err;
1904 error += seq_err + adj_err;
1906 debug_bitmask_print("Byte %d: %d, Byte %d: %d, delay_trend: %ld, prev_trend: %ld, [%ld/%ld]%s%s\n",
1908 rlevel_byte[byte_idx + 0].delay,
1910 rlevel_byte[byte_idx + 1].delay,
1912 prev_trend, seq_err, adj_err,
1914 " => Nonsequential byte delay" : "",
1916 " => Adjacent delay error" : "");
1918 if (delay_trend != 0)
1919 prev_trend = delay_trend;
1925 int roundup_ddr3_wlevel_bitmask(int bitmask)
1927 int shifted_bitmask;
1931 for (leader = 0; leader < 8; ++leader) {
1932 shifted_bitmask = (bitmask >> leader);
1933 if ((shifted_bitmask & 1) == 0)
1937 for (leader = leader; leader < 16; ++leader) {
1938 shifted_bitmask = (bitmask >> (leader % 8));
1939 if (shifted_bitmask & 1)
1943 delay = (leader & 1) ? leader + 1 : leader;
1950 static void oct2_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
1955 #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
1956 static const char * const sequence_str[] = {
1959 "self-refresh entry",
1960 "self-refresh exit",
1961 "precharge power-down entry",
1962 "precharge power-down exit",
1968 union cvmx_lmcx_control lmc_control;
1969 union cvmx_lmcx_config lmc_config;
1972 lmc_control.u64 = lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
1973 save_ddr2t = lmc_control.s.ddr2t;
1975 if (save_ddr2t == 0 && octeon_is_cpuid(OCTEON_CN63XX_PASS1_X)) {
1976 /* Some register parts (IDT and TI included) do not like
1977 * the sequence that LMC generates for an MRS register
1978 * write in 1T mode. In this case, the register part does
1979 * not properly forward the MRS register write to the DRAM
1980 * parts. See errata (LMC-14548) Issues with registered
1983 debug("Forcing DDR 2T during init seq. Re: Pass 1 LMC-14548\n");
1984 lmc_control.s.ddr2t = 1;
1987 s = lookup_env(priv, "ddr_init_2t");
1989 lmc_control.s.ddr2t = simple_strtoul(s, NULL, 0);
1991 lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
1993 lmc_config.u64 = lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
1995 lmc_config.s.init_start = 1;
1996 if (OCTEON_IS_OCTEON2())
1997 lmc_config.cn63xx.sequence = sequence;
1998 lmc_config.s.rankmask = rank_mask;
2000 #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
2001 debug("Performing LMC sequence: rank_mask=0x%02x, sequence=%d, %s\n",
2002 rank_mask, sequence, sequence_str[sequence]);
2005 lmc_wr(priv, CVMX_LMCX_CONFIG(if_num), lmc_config.u64);
2006 lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
2007 udelay(600); /* Wait a while */
2009 lmc_control.s.ddr2t = save_ddr2t;
2010 lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
2011 lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
2014 /* Check to see if any custom offset values are used */
2015 static int is_dll_offset_provided(const int8_t *dll_offset_table)
2019 if (!dll_offset_table) /* Check for pointer to table. */
2022 for (i = 0; i < 9; ++i) {
2023 if (dll_offset_table[i] != 0)
2030 void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change)
2032 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2034 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2035 SET_DDR_DLL_CTL3(offset_ena, !!change);
2036 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2037 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2040 unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
2041 int dll_offset_mode, int byte_offset, int byte)
2043 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2044 int field_width = 6;
2047 * 0x1 = byte 0, ..., 0x9 = byte 8
2050 int byte_sel = (byte == 10) ? byte : byte + 1;
2052 if (octeon_is_cpuid(OCTEON_CN6XXX))
2055 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2056 SET_DDR_DLL_CTL3(load_offset, 0);
2057 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2058 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2060 SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
2061 SET_DDR_DLL_CTL3(offset,
2062 (abs(byte_offset) & (~(-1 << field_width))) |
2063 (_sign(byte_offset) << field_width));
2064 SET_DDR_DLL_CTL3(byte_sel, byte_sel);
2065 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2066 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2068 SET_DDR_DLL_CTL3(load_offset, 1);
2069 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2070 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2072 return (unsigned short)GET_DDR_DLL_CTL3(offset);
2075 void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
2076 const char *enable_str,
2077 const int8_t *offsets, const char *byte_str,
2084 unsigned short offset[9] = { 0 };
2087 s = lookup_env(priv, enable_str);
2089 enabled = !!simple_strtol(s, NULL, 0);
2094 * enabled == -1: no override, do only configured offsets if provided
2095 * enabled == 0: override OFF, do NOT do it even if configured
2097 * enabled == 1: override ON, do it for overrides plus configured
2104 provided = is_dll_offset_provided(offsets);
2106 if (enabled < 0 && !provided)
2109 change_dll_offset_enable(priv, if_num, 0);
2111 for (byte = 0; byte < 9; ++byte) {
2112 // always take the provided, if available
2113 byte_offset = (provided) ? offsets[byte] : 0;
2115 // then, if enabled, use any overrides present
2117 s = lookup_env(priv, byte_str, if_num, byte);
2119 byte_offset = simple_strtol(s, NULL, 0);
2123 load_dll_offset(priv, if_num, mode, byte_offset, byte);
2126 change_dll_offset_enable(priv, if_num, 1);
2128 debug("N0.LMC%d: DLL %s Offset 8:0 : 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
2129 if_num, (mode == 2) ? "Read " : "Write",
2130 offset[8], offset[7], offset[6], offset[5], offset[4],
2131 offset[3], offset[2], offset[1], offset[0]);
2134 void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num)
2137 int ddr_init_loops = 1;
2140 s = lookup_env(priv, "ddr%d_init_loops", if_num);
2142 ddr_init_loops = simple_strtoul(s, NULL, 0);
2144 while (ddr_init_loops--) {
2145 for (rankx = 0; rankx < 8; rankx++) {
2146 if (!(rank_mask & (1 << rankx)))
2149 if (OCTEON_IS_OCTEON3()) {
2151 oct3_ddr3_seq(priv, 1 << rankx, if_num, 0);
2154 oct2_ddr3_seq(priv, 1 << rankx, if_num, 0);
2157 udelay(1000); /* Wait a while. */
2159 s = lookup_env(priv, "ddr_sequence1");
2163 sequence1 = simple_strtoul(s, NULL, 0);
2165 if (OCTEON_IS_OCTEON3()) {
2166 oct3_ddr3_seq(priv, 1 << rankx,
2169 oct2_ddr3_seq(priv, 1 << rankx,
2174 s = lookup_env(priv, "ddr_sequence2");
2178 sequence2 = simple_strtoul(s, NULL, 0);
2180 if (OCTEON_IS_OCTEON3())
2181 oct3_ddr3_seq(priv, 1 << rankx,
2184 oct2_ddr3_seq(priv, 1 << rankx,
2191 static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
2192 u32 ddr_hertz, u32 ddr_ref_hertz,
2194 struct ddr_conf *ddr_conf,
2195 u32 *measured_ddr_hertz)
2197 u32 ddr_conf_valid_mask = 0;
2198 int memsize_mbytes = 0;
2201 u32 ddr_max_speed = 667000000;
2202 u32 calc_ddr_hertz = -1;
2206 if (env_get("ddr_verbose") || env_get("ddr_prompt"))
2207 priv->flags |= FLAG_DDR_VERBOSE;
2210 priv->flags |= FLAG_DDR_VERBOSE;
2213 if (env_get("ddr_trace_init")) {
2214 printf("Parameter ddr_trace_init found in environment.\n");
2215 priv->flags |= FLAG_DDR_TRACE_INIT;
2216 priv->flags |= FLAG_DDR_VERBOSE;
2219 priv->flags |= FLAG_DDR_DEBUG;
2221 val = env_get_ulong("ddr_debug", 10, (u32)-1);
2224 priv->flags &= ~FLAG_DDR_DEBUG;
2225 printf("Parameter ddr_debug clear in environment\n");
2230 printf("Parameter ddr_debug set in environment\n");
2231 priv->flags |= FLAG_DDR_DEBUG;
2232 priv->flags |= FLAG_DDR_VERBOSE;
2235 if (env_get("ddr_prompt"))
2236 priv->flags |= FLAG_DDR_PROMPT;
2238 /* Force ddr_verbose for failsafe debugger */
2239 if (priv->flags & FLAG_FAILSAFE_MODE)
2240 priv->flags |= FLAG_DDR_VERBOSE;
2243 priv->flags |= FLAG_DDR_DEBUG;
2244 /* Keep verbose on while we are still debugging. */
2245 priv->flags |= FLAG_DDR_VERBOSE;
2248 if ((octeon_is_cpuid(OCTEON_CN61XX) ||
2249 octeon_is_cpuid(OCTEON_CNF71XX)) && ddr_max_speed > 533333333) {
2250 ddr_max_speed = 533333333;
2251 } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
2252 /* Override speed restrictions to support internal testing. */
2253 ddr_max_speed = 1210000000;
2256 if (ddr_hertz > ddr_max_speed) {
2257 printf("DDR clock speed %u exceeds maximum supported DDR speed, reducing to %uHz\n",
2258 ddr_hertz, ddr_max_speed);
2259 ddr_hertz = ddr_max_speed;
2262 if (OCTEON_IS_OCTEON3()) { // restrict check
2263 if (ddr_hertz > cpu_hertz) {
2264 printf("\nFATAL ERROR: DDR speed %u exceeds CPU speed %u, exiting...\n\n",
2265 ddr_hertz, cpu_hertz);
2271 eptr = env_get("disable_l2_ecc");
2273 printf("Disabling L2 ECC based on disable_l2_ecc environment variable\n");
2274 union cvmx_l2c_ctl l2c_val;
2276 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
2277 l2c_val.s.disecc = 1;
2278 l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
2280 union cvmx_l2c_ctl l2c_val;
2282 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
2283 l2c_val.s.disecc = 0;
2284 l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
2288 * Init the L2C, must be done before DRAM access so that we
2291 eptr = env_get("disable_l2_index_aliasing");
2293 union cvmx_l2c_ctl l2c_val;
2295 puts("L2 index aliasing disabled.\n");
2297 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
2298 l2c_val.s.disidxalias = 1;
2299 l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
2301 union cvmx_l2c_ctl l2c_val;
2303 /* Enable L2C index aliasing */
2305 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
2306 l2c_val.s.disidxalias = 0;
2307 l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
2310 if (OCTEON_IS_OCTEON3()) {
2312 * rdf_cnt: Defines the sample point of the LMC response data in
2313 * the DDR-clock/core-clock crossing. For optimal
2314 * performance set to 10 * (DDR-clock period/core-clock
2315 * period) - 1. To disable set to 0. All other values
2319 union cvmx_l2c_ctl l2c_ctl;
2323 l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL);
2326 * It is more convenient to compute the ratio using clock
2327 * frequencies rather than clock periods.
2329 rdf_cnt = (((u64)10 * cpu_hertz) / ddr_hertz) - 1;
2330 rdf_cnt = rdf_cnt < 256 ? rdf_cnt : 255;
2331 l2c_ctl.cn78xx.rdf_cnt = rdf_cnt;
2333 s = lookup_env(priv, "early_fill_count");
2335 l2c_ctl.cn78xx.rdf_cnt = simple_strtoul(s, NULL, 0);
2337 debug("%-45s : %d, cpu_hertz:%d, ddr_hertz:%d\n",
2338 "EARLY FILL COUNT ", l2c_ctl.cn78xx.rdf_cnt, cpu_hertz,
2340 l2c_wr(priv, CVMX_L2C_CTL, l2c_ctl.u64);
2343 /* Check for lower DIMM socket populated */
2344 for (if_idx = 0; if_idx < 4; ++if_idx) {
2345 if ((if_mask & (1 << if_idx)) &&
2347 &ddr_conf[(int)if_idx].dimm_config_table[0],
2349 ddr_conf_valid_mask |= (1 << if_idx);
2352 if (octeon_is_cpuid(OCTEON_CN68XX) || octeon_is_cpuid(OCTEON_CN78XX)) {
2353 int four_lmc_mode = 1;
2356 if (priv->flags & FLAG_FAILSAFE_MODE)
2359 /* Pass 1.0 disable four LMC mode.
2360 * See errata (LMC-15811)
2362 if (octeon_is_cpuid(OCTEON_CN68XX_PASS1_0))
2365 s = env_get("ddr_four_lmc");
2367 four_lmc_mode = simple_strtoul(s, NULL, 0);
2368 printf("Parameter found in environment. ddr_four_lmc = %d\n",
2372 if (!four_lmc_mode) {
2373 puts("Forcing two-LMC Mode.\n");
2374 /* Invalidate LMC[2:3] */
2375 ddr_conf_valid_mask &= ~(3 << 2);
2377 } else if (octeon_is_cpuid(OCTEON_CN73XX)) {
2378 int one_lmc_mode = 0;
2381 s = env_get("ddr_one_lmc");
2383 one_lmc_mode = simple_strtoul(s, NULL, 0);
2384 printf("Parameter found in environment. ddr_one_lmc = %d\n",
2389 puts("Forcing one-LMC Mode.\n");
2390 /* Invalidate LMC[1:3] */
2391 ddr_conf_valid_mask &= ~(1 << 1);
2395 if (!ddr_conf_valid_mask) {
2397 ("ERROR: No valid DIMMs detected on any DDR interface.\n");
2399 return -1; // testr-only: no ret negativ!!!
2403 * We measure the DDR frequency by counting DDR clocks. We can
2404 * confirm or adjust the expected frequency as necessary. We use
2405 * the measured frequency to make accurate timing calculations
2406 * used to configure the controller.
2408 for (if_idx = 0; if_idx < 4; ++if_idx) {
2411 if (!(ddr_conf_valid_mask & (1 << if_idx)))
2416 * only check for alternate refclk wanted on chips that
2419 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2420 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2421 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2422 // only need do this if we are LMC0
2424 union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
2427 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
2430 * If we are asking for 100 MHz refclk, we can
2431 * only get it via alternate, so switch to it
2433 if (ddr_ref_hertz == 100000000) {
2434 ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2436 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2438 udelay(1000); // wait 1 msec
2441 * If we are NOT asking for 100MHz,
2442 * then reset to (assumed) 50MHz and go
2445 ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2447 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2449 udelay(1000); // wait 1 msec
2453 if (ddr_ref_hertz == 100000000) {
2454 debug("N0: DRAM init: requested 100 MHz refclk NOT SUPPORTED\n");
2455 ddr_ref_hertz = CONFIG_REF_HERTZ;
2459 tmp_hertz = measure_octeon_ddr_clock(priv, &ddr_conf[if_idx],
2460 cpu_hertz, ddr_hertz,
2461 ddr_ref_hertz, if_idx,
2462 ddr_conf_valid_mask);
2465 * only check for alternate refclk acquired on chips that
2468 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2469 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2470 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2472 * if we are LMC0 and we are asked for 100 MHz refclk,
2473 * we must be sure it is available
2474 * If not, we print an error message, set to 50MHz,
2477 if (if_idx == 0 && ddr_ref_hertz == 100000000) {
2479 * Validate that the clock returned is close
2480 * enough to the clock desired
2482 // FIXME: is 5% close enough?
2484 abs((int)tmp_hertz - (int)ddr_hertz);
2485 if (hertz_diff > ((int)ddr_hertz * 5 / 100)) {
2486 // nope, diff is greater than than 5%
2487 debug("N0: DRAM init: requested 100 MHz refclk NOT FOUND\n");
2488 ddr_ref_hertz = CONFIG_REF_HERTZ;
2489 // clear the flag before trying again!!
2490 set_ddr_clock_initialized(priv, 0, 0);
2493 debug("N0: DRAM Init: requested 100 MHz refclk FOUND and SELECTED\n");
2499 calc_ddr_hertz = tmp_hertz;
2500 debug("LMC%d: measured speed: %u hz\n", if_idx, tmp_hertz);
2503 if (measured_ddr_hertz)
2504 *measured_ddr_hertz = calc_ddr_hertz;
2507 for (if_idx = 0; if_idx < 4; ++if_idx) {
2508 if (!(ddr_conf_valid_mask & (1 << if_idx)))
2511 ret = init_octeon_dram_interface(priv, &ddr_conf[if_idx],
2513 cpu_hertz, ddr_ref_hertz,
2514 if_idx, ddr_conf_valid_mask);
2516 memsize_mbytes += ret;
2519 if (memsize_mbytes == 0)
2520 /* All interfaces failed to initialize, so return error */
2524 * switch over to DBI mode only for chips that support it, and
2527 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2528 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2529 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2530 eptr = env_get("ddr_dbi_switchover");
2532 printf("DBI Switchover starting...\n");
2533 cvmx_dbi_switchover(priv);
2534 printf("DBI Switchover finished.\n");
2538 /* call HW-assist tuning here on chips that support it */
2539 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2540 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2541 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X)))
2542 cvmx_maybe_tune_node(priv, calc_ddr_hertz);
2544 eptr = env_get("limit_dram_mbytes");
2546 unsigned int mbytes = simple_strtoul(eptr, NULL, 10);
2549 memsize_mbytes = mbytes;
2550 printf("Limiting DRAM size to %d MBytes based on limit_dram_mbytes env. variable\n",
2555 debug("LMC Initialization complete. Total DRAM %d MB\n",
2558 return memsize_mbytes;
2561 static int octeon_ddr_probe(struct udevice *dev)
2563 struct ddr_priv *priv = dev_get_priv(dev);
2564 struct ofnode_phandle_args l2c_node;
2565 struct ddr_conf *ddr_conf_ptr;
2566 u32 ddr_conf_valid_mask = 0;
2567 u32 measured_ddr_hertz = 0;
2568 int conf_table_count;
2581 /* Don't try to re-init the DDR controller after relocation */
2582 if (gd->flags & GD_FLG_RELOC)
2586 * Dummy read all local variables into cache, so that they are
2587 * locked in cache when the DDR code runs with flushes etc enabled
2590 for (i = 0; i < (0x100000 / sizeof(u64)); i++)
2594 * The base addresses of LMC and L2C are read from the DT. This
2595 * makes it possible to use the DDR init code without the need
2596 * of the "node" variable, describing on which node to access. The
2597 * node number is already included implicitly in the base addresses
2598 * read from the DT this way.
2601 /* Get LMC base address */
2602 priv->lmc_base = dev_remap_addr(dev);
2603 debug("%s: lmc_base=%p\n", __func__, priv->lmc_base);
2605 /* Get L2C base address */
2606 ret = dev_read_phandle_with_args(dev, "l2c-handle", NULL, 0, 0,
2609 printf("Can't access L2C node!\n");
2613 addr = ofnode_get_addr(l2c_node.node);
2614 if (addr == FDT_ADDR_T_NONE) {
2615 printf("Can't access L2C node!\n");
2619 priv->l2c_base = map_physmem(addr, 0, MAP_NOCACHE);
2620 debug("%s: l2c_base=%p\n", __func__, priv->l2c_base);
2622 ddr_conf_ptr = octeon_ddr_conf_table_get(&conf_table_count,
2624 if (!ddr_conf_ptr) {
2625 printf("ERROR: unable to determine DDR configuration\n");
2629 for (i = 0; i < conf_table_count; i++) {
2630 if (ddr_conf_ptr[i].dimm_config_table[0].spd_addrs[0] ||
2631 ddr_conf_ptr[i].dimm_config_table[0].spd_ptrs[0])
2632 ddr_conf_valid_mask |= 1 << i;
2636 * Check for special case of mismarked 3005 samples,
2640 ddr_hertz = def_ddr_freq * 1000000;
2642 eptr = env_get("ddr_clock_hertz");
2644 ddr_hertz = simple_strtoul(eptr, NULL, 0);
2645 gd->mem_clk = divide_nint(ddr_hertz, 1000000);
2646 printf("Parameter found in environment. ddr_clock_hertz = %d\n",
2650 ddr_ref_hertz = octeon3_refclock(alt_refclk,
2652 &ddr_conf_ptr[0].dimm_config_table[0]);
2654 debug("Initializing DDR, clock = %uhz, reference = %uhz\n",
2655 ddr_hertz, ddr_ref_hertz);
2657 mem_mbytes = octeon_ddr_initialize(priv, gd->cpu_clk,
2658 ddr_hertz, ddr_ref_hertz,
2659 ddr_conf_valid_mask,
2660 ddr_conf_ptr, &measured_ddr_hertz);
2661 debug("Mem size in MBYTES: %u\n", mem_mbytes);
2663 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2665 debug("Measured DDR clock %d Hz\n", measured_ddr_hertz);
2667 if (measured_ddr_hertz != 0) {
2670 * If ddr_clock not set, use measured clock
2673 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2674 } else if ((measured_ddr_hertz > ddr_hertz + 3000000) ||
2675 (measured_ddr_hertz < ddr_hertz - 3000000)) {
2676 printf("\nWARNING:\n");
2677 printf("WARNING: Measured DDR clock mismatch! expected: %lld MHz, measured: %lldMHz, cpu clock: %lu MHz\n",
2678 divide_nint(ddr_hertz, 1000000),
2679 divide_nint(measured_ddr_hertz, 1000000),
2681 printf("WARNING:\n\n");
2682 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2689 priv->info.base = CONFIG_SYS_SDRAM_BASE;
2690 priv->info.size = MB(mem_mbytes);
2693 * For 6XXX generate a proper error when reading/writing
2694 * non-existent memory locations.
2696 cvmx_l2c_set_big_size(priv, mem_mbytes, 0);
2698 debug("Ram size %uMiB\n", mem_mbytes);
2703 static int octeon_get_info(struct udevice *dev, struct ram_info *info)
2705 struct ddr_priv *priv = dev_get_priv(dev);
2712 static struct ram_ops octeon_ops = {
2713 .get_info = octeon_get_info,
2716 static const struct udevice_id octeon_ids[] = {
2717 {.compatible = "cavium,octeon-7xxx-ddr4" },
2721 U_BOOT_DRIVER(octeon_ddr) = {
2722 .name = "octeon_ddr",
2724 .of_match = octeon_ids,
2726 .probe = octeon_ddr_probe,
2727 .plat_auto = sizeof(struct ddr_priv),