1 // SPDX-License-Identifier: GPL-2.0+
6 * Texas Instruments, <www.ti.com>
8 * Aneesh V <aneesh@ti.com>
14 #include <asm/arch/clock.h>
15 #include <asm/arch/sys_proto.h>
16 #include <asm/omap_common.h>
17 #include <asm/omap_sec_common.h>
18 #include <asm/utils.h>
19 #include <linux/compiler.h>
20 #include <asm/ti-common/ti-edma3.h>
22 static int emif1_enabled = -1, emif2_enabled = -1;
24 void set_lpmode_selfrefresh(u32 base)
26 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
29 reg = readl(&emif->emif_pwr_mgmt_ctrl);
30 reg &= ~EMIF_REG_LP_MODE_MASK;
31 reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
32 reg &= ~EMIF_REG_SR_TIM_MASK;
33 writel(reg, &emif->emif_pwr_mgmt_ctrl);
35 /* dummy read for the new SR_TIM to be loaded */
36 readl(&emif->emif_pwr_mgmt_ctrl);
39 void force_emif_self_refresh()
41 set_lpmode_selfrefresh(EMIF1_BASE);
43 set_lpmode_selfrefresh(EMIF2_BASE);
46 inline u32 emif_num(u32 base)
48 if (base == EMIF1_BASE)
50 else if (base == EMIF2_BASE)
56 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
59 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
61 mr_addr |= cs << EMIF_REG_CS_SHIFT;
62 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
63 if (omap_revision() == OMAP4430_ES2_0)
64 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
66 mr = readl(&emif->emif_lpddr2_mode_reg_data);
67 debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
69 if (((mr & 0x0000ff00) >> 8) == (mr & 0xff) &&
70 ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
71 ((mr & 0xff000000) >> 24) == (mr & 0xff))
77 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
79 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
81 mr_addr |= cs << EMIF_REG_CS_SHIFT;
82 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
83 writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
86 void emif_reset_phy(u32 base)
88 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
91 iodft = readl(&emif->emif_iodft_tlgc);
92 iodft |= EMIF_REG_RESET_PHY_MASK;
93 writel(iodft, &emif->emif_iodft_tlgc);
96 static void do_lpddr2_init(u32 base, u32 cs)
99 const struct lpddr2_mr_regs *mr_regs;
101 get_lpddr2_mr_regs(&mr_regs);
102 /* Wait till device auto initialization is complete */
103 while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
105 set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
108 * Enough loops assuming a maximum of 2GHz
113 set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
114 set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
117 * Enable refresh along with writing MR2
118 * Encoding of RL in MR2 is (RL - 2)
120 mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
121 set_mr(base, cs, mr_addr, mr_regs->mr2);
123 if (mr_regs->mr3 > 0)
124 set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
127 static void lpddr2_init(u32 base, const struct emif_regs *regs)
129 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
132 clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
135 * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
136 * when EMIF_SDRAM_CONFIG register is written
138 setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
141 * Set the SDRAM_CONFIG and PHY_CTRL for the
142 * un-locked frequency & default RL
144 writel(regs->sdram_config_init, &emif->emif_sdram_config);
145 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
147 do_ext_phy_settings(base, regs);
149 do_lpddr2_init(base, CS0);
150 if (regs->sdram_config & EMIF_REG_EBANK_MASK)
151 do_lpddr2_init(base, CS1);
153 writel(regs->sdram_config, &emif->emif_sdram_config);
154 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
156 /* Enable refresh now */
157 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
161 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
165 void emif_update_timings(u32 base, const struct emif_regs *regs)
167 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
170 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
172 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
174 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
175 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
176 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
177 if (omap_revision() == OMAP4430_ES1_0) {
178 /* ES1 bug EMIF should be in force idle during freq_update */
179 writel(0, &emif->emif_pwr_mgmt_ctrl);
181 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
182 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
184 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
185 writel(regs->zq_config, &emif->emif_zq_config);
186 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
187 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
189 if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
190 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
191 &emif->emif_l3_config);
192 } else if (omap_revision() >= OMAP4460_ES1_0) {
193 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
194 &emif->emif_l3_config);
196 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
197 &emif->emif_l3_config);
201 #ifndef CONFIG_OMAP44XX
202 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
204 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
206 /* keep sdram in self-refresh */
207 writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
208 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
212 * Set invert_clkout (if activated)--DDR_PHYCTRL_1
213 * Invert clock adds an additional half cycle delay on the
214 * command interface. The additional half cycle, is usually
215 * meant to enable leveling in the situation that DQS is later
216 * than CK on the board.It also helps provide some additional
217 * margin for leveling.
219 writel(regs->emif_ddr_phy_ctlr_1,
220 &emif->emif_ddr_phy_ctrl_1);
222 writel(regs->emif_ddr_phy_ctlr_1,
223 &emif->emif_ddr_phy_ctrl_1_shdw);
226 writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
227 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
229 /* Launch Full leveling */
230 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
232 /* Wait till full leveling is complete */
233 readl(&emif->emif_rd_wr_lvl_ctl);
236 /* Read data eye leveling no of samples */
237 config_data_eye_leveling_samples(base);
240 * Launch 8 incremental WR_LVL- to compensate for
243 writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
244 &emif->emif_rd_wr_lvl_ctl);
248 /* Launch Incremental leveling */
249 writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
253 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
255 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
256 u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
259 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[6];
260 phy = readl(&emif->emif_ddr_phy_ctrl_1);
262 /* Update PHY_REG_RDDQS_RATIO */
263 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
264 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
265 for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
266 reg = readl(emif_phy_status++);
267 writel(reg, emif_ext_phy_ctrl_reg++);
268 writel(reg, emif_ext_phy_ctrl_reg++);
271 /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
272 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
273 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[11];
274 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
275 for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
276 reg = readl(emif_phy_status++);
277 writel(reg, emif_ext_phy_ctrl_reg++);
278 writel(reg, emif_ext_phy_ctrl_reg++);
281 /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
282 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
283 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[16];
284 if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
285 for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
286 reg = readl(emif_phy_status++);
287 writel(reg, emif_ext_phy_ctrl_reg++);
288 writel(reg, emif_ext_phy_ctrl_reg++);
291 /* Disable Leveling */
292 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
293 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
294 writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
297 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
299 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
301 /* Clear Error Status */
302 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
303 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
304 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
306 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
307 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
308 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
310 /* Disable refreshed before leveling */
311 clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
312 EMIF_REG_INITREF_DIS_MASK);
314 /* Start Full leveling */
315 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
319 /* Check for leveling timeout */
320 if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
321 printf("Leveling timeout on EMIF%d\n", emif_num(base));
325 /* Enable refreshes after leveling */
326 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
328 debug("HW leveling success\n");
330 * Update slave ratios in EXT_PHY_CTRLx registers
331 * as per HW leveling output
333 update_hwleveling_output(base, regs);
336 static void dra7_reset_ddr_data(u32 base, u32 size)
338 #if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
339 enable_edma3_clocks();
341 edma3_fill(EDMA3_BASE, 1, (void *)base, 0, size);
343 disable_edma3_clocks();
345 memset((void *)base, 0, size);
349 static void dra7_enable_ecc(u32 base, const struct emif_regs *regs)
351 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
352 u32 rgn, rgn_start, size, ctrl_reg;
354 /* ECC available only on dra76x EMIF1 */
355 if ((base != EMIF1_BASE) || !is_dra76x())
358 if (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK) {
359 /* Disable high-order interleaving */
360 clrbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
363 /* Clear the status flags and other history */
364 writel(readl(&emif->emif_1b_ecc_err_cnt),
365 &emif->emif_1b_ecc_err_cnt);
366 writel(0xffffffff, &emif->emif_1b_ecc_err_dist_1);
367 writel(0x2, &emif->emif_1b_ecc_err_addr_log);
368 writel(0x1, &emif->emif_2b_ecc_err_addr_log);
369 writel(EMIF_INT_WR_ECC_ERR_SYS_MASK |
370 EMIF_INT_TWOBIT_ECC_ERR_SYS_MASK |
371 EMIF_INT_ONEBIT_ECC_ERR_SYS_MASK,
372 &emif->emif_irqstatus_sys);
374 writel(regs->emif_ecc_address_range_1,
375 &emif->emif_ecc_address_range_1);
376 writel(regs->emif_ecc_address_range_2,
377 &emif->emif_ecc_address_range_2);
379 /* Disable RMW and ECC verification for read accesses */
380 ctrl_reg = (regs->emif_ecc_ctrl_reg &
381 ~EMIF_ECC_REG_RMW_EN_MASK) |
382 EMIF_ECC_CTRL_REG_ECC_VERIFY_DIS_MASK;
383 writel(ctrl_reg, &emif->emif_ecc_ctrl_reg);
385 /* Set region1 memory with 0 */
386 rgn_start = (regs->emif_ecc_address_range_1 &
387 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
388 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
389 size = (regs->emif_ecc_address_range_1 &
390 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
392 if (regs->emif_ecc_ctrl_reg &
393 EMIF_ECC_REG_ECC_ADDR_RGN_1_EN_MASK)
394 dra7_reset_ddr_data(rgn, size);
396 /* Set region2 memory with 0 */
397 rgn_start = (regs->emif_ecc_address_range_2 &
398 EMIF_ECC_REG_ECC_START_ADDR_MASK) << 16;
399 rgn = rgn_start + CONFIG_SYS_SDRAM_BASE;
400 size = (regs->emif_ecc_address_range_2 &
401 EMIF_ECC_REG_ECC_END_ADDR_MASK) + 0x10000 - rgn_start;
403 if (regs->emif_ecc_ctrl_reg &
404 EMIF_ECC_REG_ECC_ADDR_RGN_2_EN_MASK)
405 dra7_reset_ddr_data(rgn, size);
407 /* Default value enables RMW and ECC verification */
408 writel(regs->emif_ecc_ctrl_reg, &emif->emif_ecc_ctrl_reg);
412 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
414 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
417 emif_reset_phy(base);
418 writel(0x0, &emif->emif_pwr_mgmt_ctrl);
420 do_ext_phy_settings(base, regs);
422 writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
423 &emif->emif_sdram_ref_ctrl);
424 /* Update timing registers */
425 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
426 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
427 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
429 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
430 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
431 writel(regs->zq_config, &emif->emif_zq_config);
432 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
433 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
434 writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
436 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
437 writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
439 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
441 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
442 writel(regs->sdram_config_init, &emif->emif_sdram_config);
446 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
448 if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK) {
450 * Perform Dummy ECC setup just to allow hardware
451 * leveling of ECC memories
453 if (is_dra76x() && (base == EMIF1_BASE) &&
454 (regs->emif_ecc_ctrl_reg & EMIF_ECC_CTRL_REG_ECC_EN_MASK)) {
455 writel(0, &emif->emif_ecc_address_range_1);
456 writel(0, &emif->emif_ecc_address_range_2);
457 writel(EMIF_ECC_CTRL_REG_ECC_EN_MASK |
458 EMIF_ECC_CTRL_REG_ECC_ADDR_RGN_PROT_MASK,
459 &emif->emif_ecc_ctrl_reg);
462 dra7_ddr3_leveling(base, regs);
466 writel(0, &emif->emif_ecc_ctrl_reg);
469 /* Enable ECC as necessary */
470 dra7_enable_ecc(base, regs);
473 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
475 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
477 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
478 writel(regs->sdram_config_init, &emif->emif_sdram_config);
480 * Set SDRAM_CONFIG and PHY control registers to locked frequency
481 * and RL =7. As the default values of the Mode Registers are not
482 * defined, contents of mode Registers must be fully initialized.
483 * H/W takes care of this initialization
485 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
487 /* Update timing registers */
488 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
489 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
490 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
492 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
494 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
495 writel(regs->sdram_config_init, &emif->emif_sdram_config);
496 do_ext_phy_settings(base, regs);
498 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
499 omap5_ddr3_leveling(base, regs);
502 static void ddr3_init(u32 base, const struct emif_regs *regs)
505 omap5_ddr3_init(base, regs);
507 dra7_ddr3_init(base, regs);
511 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
512 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
515 * Organization and refresh requirements for LPDDR2 devices of different
516 * types and densities. Derived from JESD209-2 section 2.4
518 const struct lpddr2_addressing addressing_table[] = {
519 /* Banks tREFIx10 rowx32,rowx16 colx32,colx16 density */
520 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
521 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
522 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
523 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
524 {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
525 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
526 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
527 {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
528 {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
529 {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
532 static const u32 lpddr2_density_2_size_in_mbytes[] = {
546 * Calculate the period of DDR clock from frequency value and set the
547 * denominator and numerator in global variables for easy access later
549 static void set_ddr_clk_period(u32 freq)
553 * period_in_ns = 10^9/freq
557 cancel_out(T_num, T_den, 200);
562 * Convert time in nano seconds to number of cycles of DDR clock
564 static inline u32 ns_2_cycles(u32 ns)
566 return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
570 * ns_2_cycles with the difference that the time passed is 2 times the actual
571 * value(to avoid fractions). The cycles returned is for the original value of
572 * the timing parameter
574 static inline u32 ns_x2_2_cycles(u32 ns)
576 return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
580 * Find addressing table index based on the device's type(S2 or S4) and
583 s8 addressing_table_index(u8 type, u8 density, u8 width)
586 if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
590 * Look at the way ADDR_TABLE_INDEX* values have been defined
591 * in emif.h compared to LPDDR2_DENSITY_* values
592 * The table is layed out in the increasing order of density
593 * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
596 if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
597 index = ADDR_TABLE_INDEX1GS2;
598 else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
599 index = ADDR_TABLE_INDEX2GS2;
603 debug("emif: addressing table index %d\n", index);
609 * Find the the right timing table from the array of timing
610 * tables of the device using DDR clock frequency
612 static const struct lpddr2_ac_timings *get_timings_table(const struct
613 lpddr2_ac_timings *const *device_timings,
616 u32 i, temp, freq_nearest;
617 const struct lpddr2_ac_timings *timings = 0;
619 emif_assert(freq <= MAX_LPDDR2_FREQ);
620 emif_assert(device_timings);
623 * Start with the maximum allowed frequency - that is always safe
625 freq_nearest = MAX_LPDDR2_FREQ;
627 * Find the timings table that has the max frequency value:
628 * i. Above or equal to the DDR frequency - safe
629 * ii. The lowest that satisfies condition (i) - optimal
631 for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
632 temp = device_timings[i]->max_freq;
633 if ((temp >= freq) && (temp <= freq_nearest)) {
635 timings = device_timings[i];
638 debug("emif: timings table: %d\n", freq_nearest);
643 * Finds the value of emif_sdram_config_reg
644 * All parameters are programmed based on the device on CS0.
645 * If there is a device on CS1, it will be same as that on CS0 or
646 * it will be NVM. We don't support NVM yet.
647 * If cs1_device pointer is NULL it is assumed that there is no device
650 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
651 const struct lpddr2_device_details *cs1_device,
652 const struct lpddr2_addressing *addressing,
657 config_reg |= (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
658 config_reg |= EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
659 EMIF_REG_IBANK_POS_SHIFT;
661 config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
663 config_reg |= RL << EMIF_REG_CL_SHIFT;
665 config_reg |= addressing->row_sz[cs0_device->io_width] <<
666 EMIF_REG_ROWSIZE_SHIFT;
668 config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
670 config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
671 EMIF_REG_EBANK_SHIFT;
673 config_reg |= addressing->col_sz[cs0_device->io_width] <<
674 EMIF_REG_PAGESIZE_SHIFT;
679 static u32 get_sdram_ref_ctrl(u32 freq,
680 const struct lpddr2_addressing *addressing)
682 u32 ref_ctrl = 0, val = 0, freq_khz;
683 freq_khz = freq / 1000;
685 * refresh rate to be set is 'tREFI * freq in MHz
686 * division by 10000 to account for khz and x10 in t_REFI_us_x10
688 val = addressing->t_REFI_us_x10 * freq_khz / 10000;
689 ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
694 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
695 const struct lpddr2_min_tck *min_tck,
696 const struct lpddr2_addressing *addressing)
698 u32 tim1 = 0, val = 0;
699 val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
700 tim1 |= val << EMIF_REG_T_WTR_SHIFT;
702 if (addressing->num_banks == BANKS8)
703 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
706 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
708 tim1 |= val << EMIF_REG_T_RRD_SHIFT;
710 val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
711 tim1 |= val << EMIF_REG_T_RC_SHIFT;
713 val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
714 tim1 |= val << EMIF_REG_T_RAS_SHIFT;
716 val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
717 tim1 |= val << EMIF_REG_T_WR_SHIFT;
719 val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
720 tim1 |= val << EMIF_REG_T_RCD_SHIFT;
722 val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
723 tim1 |= val << EMIF_REG_T_RP_SHIFT;
728 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
729 const struct lpddr2_min_tck *min_tck)
731 u32 tim2 = 0, val = 0;
732 val = max(min_tck->tCKE, timings->tCKE) - 1;
733 tim2 |= val << EMIF_REG_T_CKE_SHIFT;
735 val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
736 tim2 |= val << EMIF_REG_T_RTP_SHIFT;
739 * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
742 val = ns_2_cycles(timings->tXSR) - 1;
743 tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
744 tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
746 val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
747 tim2 |= val << EMIF_REG_T_XP_SHIFT;
752 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
753 const struct lpddr2_min_tck *min_tck,
754 const struct lpddr2_addressing *addressing)
756 u32 tim3 = 0, val = 0;
757 val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
758 tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
760 val = ns_2_cycles(timings->tRFCab) - 1;
761 tim3 |= val << EMIF_REG_T_RFC_SHIFT;
763 val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
764 tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
766 val = ns_2_cycles(timings->tZQCS) - 1;
767 tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
769 val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
770 tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
775 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
776 const struct lpddr2_addressing *addressing,
782 EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
783 addressing->t_REFI_us_x10;
786 EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
787 addressing->t_REFI_us_x10;
788 zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
790 zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
792 zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
794 zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
797 * Assuming that two chipselects have a single calibration resistor
798 * If there are indeed two calibration resistors, then this flag should
799 * be enabled to take advantage of dual calibration feature.
800 * This data should ideally come from board files. But considering
801 * that none of the boards today have calibration resistors per CS,
802 * it would be an unnecessary overhead.
804 zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
806 zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
808 zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
813 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
814 const struct lpddr2_addressing *addressing,
817 u32 alert = 0, interval;
819 TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
822 alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
824 alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
826 alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
828 alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
830 alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
832 alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
837 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
839 u32 idle = 0, val = 0;
841 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
843 /*Maximum value in normal conditions - suggested by hw team */
845 idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
847 idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
852 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
854 u32 phy = 0, val = 0;
856 phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
858 if (freq <= 100000000)
859 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
860 else if (freq <= 200000000)
861 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
863 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
864 phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
866 /* Other fields are constant magic values. Hardcode them together */
867 phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
868 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
873 static u32 get_emif_mem_size(u32 base)
875 u32 size_mbytes = 0, temp;
876 struct emif_device_details dev_details;
877 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
878 u32 emif_nr = emif_num(base);
880 emif_reset_phy(base);
881 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
883 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
885 emif_reset_phy(base);
887 if (dev_details.cs0_device_details) {
888 temp = dev_details.cs0_device_details->density;
889 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
892 if (dev_details.cs1_device_details) {
893 temp = dev_details.cs1_device_details->density;
894 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
896 /* convert to bytes */
897 return size_mbytes << 20;
900 /* Gets the encoding corresponding to a given DMM section size */
901 u32 get_dmm_section_size_map(u32 section_size)
904 * Section size mapping:
905 * 0x0: 16-MiB section
906 * 0x1: 32-MiB section
907 * 0x2: 64-MiB section
908 * 0x3: 128-MiB section
909 * 0x4: 256-MiB section
910 * 0x5: 512-MiB section
914 section_size >>= 24; /* divide by 16 MB */
915 return log_2_n_round_down(section_size);
918 static void emif_calculate_regs(
919 const struct emif_device_details *emif_dev_details,
920 u32 freq, struct emif_regs *regs)
923 const struct lpddr2_addressing *addressing;
924 const struct lpddr2_ac_timings *timings;
925 const struct lpddr2_min_tck *min_tck;
926 const struct lpddr2_device_details *cs0_dev_details =
927 emif_dev_details->cs0_device_details;
928 const struct lpddr2_device_details *cs1_dev_details =
929 emif_dev_details->cs1_device_details;
930 const struct lpddr2_device_timings *cs0_dev_timings =
931 emif_dev_details->cs0_device_timings;
933 emif_assert(emif_dev_details);
936 * You can not have a device on CS1 without one on CS0
937 * So configuring EMIF without a device on CS0 doesn't
940 emif_assert(cs0_dev_details);
941 emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
943 * If there is a device on CS1 it should be same type as CS0
944 * (or NVM. But NVM is not supported in this driver yet)
946 emif_assert((cs1_dev_details == NULL) ||
947 (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
948 (cs0_dev_details->type == cs1_dev_details->type));
949 emif_assert(freq <= MAX_LPDDR2_FREQ);
951 set_ddr_clk_period(freq);
954 * The device on CS0 is used for all timing calculations
955 * There is only one set of registers for timings per EMIF. So, if the
956 * second CS(CS1) has a device, it should have the same timings as the
959 timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
960 emif_assert(timings);
961 min_tck = cs0_dev_timings->min_tck;
963 temp = addressing_table_index(cs0_dev_details->type,
964 cs0_dev_details->density,
965 cs0_dev_details->io_width);
967 emif_assert((temp >= 0));
968 addressing = &(addressing_table[temp]);
969 emif_assert(addressing);
971 sys_freq = get_sys_clk_freq();
973 regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
975 addressing, RL_BOOT);
977 regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
979 addressing, RL_FINAL);
981 regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
983 regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
985 regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
987 regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
989 regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
991 regs->temp_alert_config =
992 get_temp_alert_config(cs1_dev_details, addressing, 0);
994 regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
995 LPDDR2_VOLTAGE_STABLE);
997 regs->emif_ddr_phy_ctlr_1_init =
998 get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
1000 regs->emif_ddr_phy_ctlr_1 =
1001 get_ddr_phy_ctrl_1(freq, RL_FINAL);
1005 print_timing_reg(regs->sdram_config_init);
1006 print_timing_reg(regs->sdram_config);
1007 print_timing_reg(regs->ref_ctrl);
1008 print_timing_reg(regs->sdram_tim1);
1009 print_timing_reg(regs->sdram_tim2);
1010 print_timing_reg(regs->sdram_tim3);
1011 print_timing_reg(regs->read_idle_ctrl);
1012 print_timing_reg(regs->temp_alert_config);
1013 print_timing_reg(regs->zq_config);
1014 print_timing_reg(regs->emif_ddr_phy_ctlr_1);
1015 print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
1017 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1019 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
1020 const char *get_lpddr2_type(u8 type_id)
1023 case LPDDR2_TYPE_S4:
1025 case LPDDR2_TYPE_S2:
1032 const char *get_lpddr2_io_width(u8 width_id)
1035 case LPDDR2_IO_WIDTH_8:
1037 case LPDDR2_IO_WIDTH_16:
1039 case LPDDR2_IO_WIDTH_32:
1046 const char *get_lpddr2_manufacturer(u32 manufacturer)
1048 switch (manufacturer) {
1049 case LPDDR2_MANUFACTURER_SAMSUNG:
1051 case LPDDR2_MANUFACTURER_QIMONDA:
1053 case LPDDR2_MANUFACTURER_ELPIDA:
1055 case LPDDR2_MANUFACTURER_ETRON:
1057 case LPDDR2_MANUFACTURER_NANYA:
1059 case LPDDR2_MANUFACTURER_HYNIX:
1061 case LPDDR2_MANUFACTURER_MOSEL:
1063 case LPDDR2_MANUFACTURER_WINBOND:
1065 case LPDDR2_MANUFACTURER_ESMT:
1067 case LPDDR2_MANUFACTURER_SPANSION:
1069 case LPDDR2_MANUFACTURER_SST:
1071 case LPDDR2_MANUFACTURER_ZMOS:
1073 case LPDDR2_MANUFACTURER_INTEL:
1075 case LPDDR2_MANUFACTURER_NUMONYX:
1077 case LPDDR2_MANUFACTURER_MICRON:
1084 static void display_sdram_details(u32 emif_nr, u32 cs,
1085 struct lpddr2_device_details *device)
1087 const char *mfg_str;
1088 const char *type_str;
1089 char density_str[10];
1092 debug("EMIF%d CS%d\t", emif_nr, cs);
1099 mfg_str = get_lpddr2_manufacturer(device->manufacturer);
1100 type_str = get_lpddr2_type(device->type);
1102 density = lpddr2_density_2_size_in_mbytes[device->density];
1103 if ((density / 1024 * 1024) == density) {
1105 sprintf(density_str, "%d GB", density);
1107 sprintf(density_str, "%d MB", density);
1108 if (mfg_str && type_str)
1109 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1112 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1113 struct lpddr2_device_details *lpddr2_device)
1117 mr = get_mr(base, cs, LPDDR2_MR0);
1119 /* Mode register value bigger than 8 bit */
1123 temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1128 temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1131 /* DNV supported - But DNV is only supported for NVM */
1135 mr = get_mr(base, cs, LPDDR2_MR4);
1137 /* Mode register value bigger than 8 bit */
1141 mr = get_mr(base, cs, LPDDR2_MR5);
1143 /* Mode register value bigger than 8 bit */
1147 if (!get_lpddr2_manufacturer(mr)) {
1148 /* Manufacturer not identified */
1151 lpddr2_device->manufacturer = mr;
1153 mr = get_mr(base, cs, LPDDR2_MR6);
1155 /* Mode register value bigger than 8 bit */
1159 mr = get_mr(base, cs, LPDDR2_MR7);
1161 /* Mode register value bigger than 8 bit */
1165 mr = get_mr(base, cs, LPDDR2_MR8);
1167 /* Mode register value bigger than 8 bit */
1171 temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1172 if (!get_lpddr2_type(temp)) {
1176 lpddr2_device->type = temp;
1178 temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1179 if (temp > LPDDR2_DENSITY_32Gb) {
1180 /* Density not supported */
1183 lpddr2_device->density = temp;
1185 temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1186 if (!get_lpddr2_io_width(temp)) {
1187 /* IO width unsupported value */
1190 lpddr2_device->io_width = temp;
1193 * If all the above tests pass we should
1194 * have a device on this chip-select
1199 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1200 struct lpddr2_device_details *lpddr2_dev_details)
1203 u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1205 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1207 if (!lpddr2_dev_details)
1210 /* Do the minimum init for mode register accesses */
1211 if (!(running_from_sdram() || warm_reset())) {
1212 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1213 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1216 if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1219 display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1221 return lpddr2_dev_details;
1223 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1225 static void do_sdram_init(u32 base)
1227 const struct emif_regs *regs;
1228 u32 in_sdram, emif_nr;
1230 debug(">>do_sdram_init() %x\n", base);
1232 in_sdram = running_from_sdram();
1233 emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1235 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1236 emif_get_reg_dump(emif_nr, ®s);
1238 debug("EMIF: reg dump not provided\n");
1243 * The user has not provided the register values. We need to
1244 * calculate it based on the timings and the DDR frequency
1246 struct emif_device_details dev_details;
1247 struct emif_regs calculated_regs;
1250 * Get device details:
1251 * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1252 * - Obtained from user otherwise
1254 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1255 emif_reset_phy(base);
1256 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1258 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1260 emif_reset_phy(base);
1262 /* Return if no devices on this EMIF */
1263 if (!dev_details.cs0_device_details &&
1264 !dev_details.cs1_device_details) {
1269 * Get device timings:
1270 * - Default timings specified by JESD209-2 if
1271 * CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1272 * - Obtained from user otherwise
1274 emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1275 &dev_details.cs1_device_timings);
1277 /* Calculate the register values */
1278 emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1279 regs = &calculated_regs;
1280 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1283 * Initializing the DDR device can not happen from SDRAM.
1284 * Changing the timing registers in EMIF can happen(going from one
1287 if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1288 if (emif_sdram_type(regs->sdram_config) ==
1289 EMIF_SDRAM_TYPE_LPDDR2)
1290 lpddr2_init(base, regs);
1291 #ifndef CONFIG_OMAP44XX
1293 ddr3_init(base, regs);
1296 #ifdef CONFIG_OMAP54XX
1297 if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1298 EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1299 set_lpmode_selfrefresh(base);
1300 emif_reset_phy(base);
1301 omap5_ddr3_leveling(base, regs);
1305 /* Write to the shadow registers */
1306 emif_update_timings(base, regs);
1308 debug("<<do_sdram_init() %x\n", base);
1311 void emif_post_init_config(u32 base)
1313 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1314 u32 omap_rev = omap_revision();
1316 /* reset phy on ES2.0 */
1317 if (omap_rev == OMAP4430_ES2_0)
1318 emif_reset_phy(base);
1320 /* Put EMIF back in smart idle on ES1.0 */
1321 if (omap_rev == OMAP4430_ES1_0)
1322 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1325 void dmm_init(u32 base)
1327 const struct dmm_lisa_map_regs *lisa_map_regs;
1328 u32 i, section, valid;
1330 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1331 emif_get_dmm_regs(&lisa_map_regs);
1333 u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1334 u32 section_cnt, sys_addr;
1335 struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1339 sys_addr = CONFIG_SYS_SDRAM_BASE;
1340 emif1_size = get_emif_mem_size(EMIF1_BASE);
1341 emif2_size = get_emif_mem_size(EMIF2_BASE);
1342 debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1344 if (!emif1_size && !emif2_size)
1347 /* symmetric interleaved section */
1348 if (emif1_size && emif2_size) {
1349 mapped_size = min(emif1_size, emif2_size);
1350 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1351 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1353 section_map |= (sys_addr >> 24) <<
1354 EMIF_SYS_ADDR_SHIFT;
1355 section_map |= get_dmm_section_size_map(mapped_size * 2)
1356 << EMIF_SYS_SIZE_SHIFT;
1357 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1358 emif1_size -= mapped_size;
1359 emif2_size -= mapped_size;
1360 sys_addr += (mapped_size * 2);
1365 * Single EMIF section(we can have a maximum of 1 single EMIF
1366 * section- either EMIF1 or EMIF2 or none, but not both)
1369 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1370 section_map |= get_dmm_section_size_map(emif1_size)
1371 << EMIF_SYS_SIZE_SHIFT;
1373 section_map |= (mapped_size >> 24) <<
1374 EMIF_SDRC_ADDR_SHIFT;
1376 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1380 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1381 section_map |= get_dmm_section_size_map(emif2_size) <<
1382 EMIF_SYS_SIZE_SHIFT;
1384 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1386 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1390 if (section_cnt == 2) {
1391 /* Only 1 section - either symmetric or single EMIF */
1392 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1393 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1394 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1396 /* 2 sections - 1 symmetric, 1 single EMIF */
1397 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1398 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1401 /* TRAP for invalid TILER mappings in section 0 */
1402 lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1404 if (omap_revision() >= OMAP4460_ES1_0)
1405 lis_map_regs_calculated.is_ma_present = 1;
1407 lisa_map_regs = &lis_map_regs_calculated;
1409 struct dmm_lisa_map_regs *hw_lisa_map_regs =
1410 (struct dmm_lisa_map_regs *)base;
1412 writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1413 writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1414 writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1415 writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1417 writel(lisa_map_regs->dmm_lisa_map_3,
1418 &hw_lisa_map_regs->dmm_lisa_map_3);
1419 writel(lisa_map_regs->dmm_lisa_map_2,
1420 &hw_lisa_map_regs->dmm_lisa_map_2);
1421 writel(lisa_map_regs->dmm_lisa_map_1,
1422 &hw_lisa_map_regs->dmm_lisa_map_1);
1423 writel(lisa_map_regs->dmm_lisa_map_0,
1424 &hw_lisa_map_regs->dmm_lisa_map_0);
1426 if (lisa_map_regs->is_ma_present) {
1428 (struct dmm_lisa_map_regs *)MA_BASE;
1430 writel(lisa_map_regs->dmm_lisa_map_3,
1431 &hw_lisa_map_regs->dmm_lisa_map_3);
1432 writel(lisa_map_regs->dmm_lisa_map_2,
1433 &hw_lisa_map_regs->dmm_lisa_map_2);
1434 writel(lisa_map_regs->dmm_lisa_map_1,
1435 &hw_lisa_map_regs->dmm_lisa_map_1);
1436 writel(lisa_map_regs->dmm_lisa_map_0,
1437 &hw_lisa_map_regs->dmm_lisa_map_0);
1439 setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1443 * EMIF should be configured only when
1444 * memory is mapped on it. Using emif1_enabled
1445 * and emif2_enabled variables for this.
1449 for (i = 0; i < 4; i++) {
1450 section = __raw_readl(DMM_BASE + i*4);
1451 valid = (section & EMIF_SDRC_MAP_MASK) >>
1452 (EMIF_SDRC_MAP_SHIFT);
1467 static void do_bug0039_workaround(u32 base)
1469 u32 val, i, clkctrl;
1470 struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1471 const struct read_write_regs *bug_00339_regs;
1473 u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1474 u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1479 bug_00339_regs = get_bug_regs(&iterations);
1481 /* Put EMIF in to idle */
1482 clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1483 __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1485 /* Copy the phy status registers in to phy ctrl shadow registers */
1486 for (i = 0; i < iterations; i++) {
1487 val = __raw_readl(phy_status_base +
1488 bug_00339_regs[i].read_reg - 1);
1490 __raw_writel(val, phy_ctrl_base +
1491 ((bug_00339_regs[i].write_reg - 1) << 1));
1493 __raw_writel(val, phy_ctrl_base +
1494 (bug_00339_regs[i].write_reg << 1) - 1);
1497 /* Disable leveling */
1498 writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1500 __raw_writel(clkctrl, (*prcm)->cm_memif_clkstctrl);
1504 * SDRAM initialization:
1505 * SDRAM initialization has two parts:
1506 * 1. Configuring the SDRAM device
1507 * 2. Update the AC timings related parameters in the EMIF module
1508 * (1) should be done only once and should not be done while we are
1509 * running from SDRAM.
1510 * (2) can and should be done more than once if OPP changes.
1511 * Particularly, this may be needed when we boot without SPL and
1512 * and using Configuration Header(CH). ROM code supports only at 50% OPP
1513 * at boot (low power boot). So u-boot has to switch to OPP100 and update
1514 * the frequency. So,
1515 * Doing (1) and (2) makes sense - first time initialization
1516 * Doing (2) and not (1) makes sense - OPP change (when using CH)
1517 * Doing (1) and not (2) doen't make sense
1518 * See do_sdram_init() for the details
1520 void sdram_init(void)
1522 u32 in_sdram, size_prog, size_detect;
1523 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1524 u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1526 debug(">>sdram_init()\n");
1528 if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1531 in_sdram = running_from_sdram();
1532 debug("in_sdram = %d\n", in_sdram);
1535 if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1536 bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1537 else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1538 writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1545 do_sdram_init(EMIF1_BASE);
1548 do_sdram_init(EMIF2_BASE);
1550 if (!(in_sdram || warm_reset())) {
1552 emif_post_init_config(EMIF1_BASE);
1554 emif_post_init_config(EMIF2_BASE);
1557 /* for the shadow registers to take effect */
1558 if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1561 /* Do some testing after the init */
1563 size_prog = omap_sdram_size();
1564 size_prog = log_2_n_round_down(size_prog);
1565 size_prog = (1 << size_prog);
1567 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1569 /* Compare with the size programmed */
1570 if (size_detect != size_prog) {
1571 printf("SDRAM: identified size not same as expected"
1572 " size identified: %x expected: %x\n",
1576 debug("get_ram_size() successful");
1579 #if defined(CONFIG_TI_SECURE_DEVICE)
1581 * On HS devices, do static EMIF firewall configuration
1582 * but only do it if not already running in SDRAM
1585 if (0 != secure_emif_reserve())
1588 /* On HS devices, ensure static EMIF firewall APIs are locked */
1589 if (0 != secure_emif_firewall_lock())
1593 if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1594 (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1596 do_bug0039_workaround(EMIF1_BASE);
1598 do_bug0039_workaround(EMIF2_BASE);
1601 debug("<<sdram_init()\n");