1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) Marvell International Ltd. and its affiliates
7 #include "mv_ddr_common.h"
8 #include "mv_ddr_training_db.h"
9 #include "mv_ddr_regs.h"
10 #include "mv_ddr_sys_env_lib.h"
12 #define DDR_INTERFACES_NUM 1
13 #define DDR_INTERFACE_OCTETS_NUM 5
16 * 1. L2 filter should be set at binary header to 0xD000000,
17 * to avoid conflict with internal register IO.
18 * 2. U-Boot modifies internal registers base to 0xf100000,
19 * and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
21 #define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
22 #define ADDRESS_FILTERING_END_REGISTER 0x8c04
24 #define DYNAMIC_CS_SIZE_CONFIG
25 #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
27 /* Termal Sensor Registers */
28 #define TSEN_CONTROL_LSB_REG 0xE4070
29 #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
30 #define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
31 #define TSEN_CONTROL_MSB_REG 0xE4074
32 #define TSEN_CONTROL_MSB_RST_OFFSET 8
33 #define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
34 #define TSEN_STATUS_REG 0xe4078
35 #define TSEN_STATUS_READOUT_VALID_OFFSET 10
36 #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
37 TSEN_STATUS_READOUT_VALID_OFFSET)
38 #define TSEN_STATUS_TEMP_OUT_OFFSET 0
39 #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
41 static struct dlb_config ddr3_dlb_config_table[] = {
42 {DLB_CTRL_REG, 0x2000005c},
43 {DLB_BUS_OPT_WT_REG, 0x00880000},
44 {DLB_AGING_REG, 0x0f7f007f},
45 {DLB_EVICTION_CTRL_REG, 0x0000129f},
46 {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
47 {DLB_WTS_DIFF_CS_REG, 0x04030802},
48 {DLB_WTS_DIFF_BG_REG, 0x00000a02},
49 {DLB_WTS_SAME_BG_REG, 0x09000a01},
50 {DLB_WTS_CMDS_REG, 0x00020005},
51 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
52 {DLB_QUEUE_MAP_REG, 0x00000543},
53 {DLB_SPLIT_REG, 0x00000000},
54 {DLB_USER_CMD_REG, 0x00000000},
58 static struct dlb_config *sys_env_dlb_config_ptr_get(void)
60 return &ddr3_dlb_config_table[0];
63 static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = {
64 0x3, /* MV_DDR_FREQ_100 */
65 0x4, /* MV_DDR_FREQ_400 */
66 0x4, /* MV_DDR_FREQ_533 */
67 0x5, /* MV_DDR_FREQ_667 */
68 0x5, /* MV_DDR_FREQ_800 */
69 0x5, /* MV_DDR_FREQ_933 */
70 0x5, /* MV_DDR_FREQ_1066 */
71 0x3, /* MV_DDR_FREQ_311 */
72 0x3, /* MV_DDR_FREQ_333 */
73 0x4, /* MV_DDR_FREQ_467 */
74 0x5, /* MV_DDR_FREQ_850 */
75 0x5, /* MV_DDR_FREQ_600 */
76 0x3, /* MV_DDR_FREQ_300 */
77 0x5, /* MV_DDR_FREQ_900 */
78 0x3, /* MV_DDR_FREQ_360 */
79 0x5 /* MV_DDR_FREQ_1000 */
82 static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = {
83 0x1, /* MV_DDR_FREQ_100 */
84 0x2, /* MV_DDR_FREQ_400 */
85 0x2, /* MV_DDR_FREQ_533 */
86 0x2, /* MV_DDR_FREQ_667 */
87 0x2, /* MV_DDR_FREQ_800 */
88 0x3, /* MV_DDR_FREQ_933 */
89 0x3, /* MV_DDR_FREQ_1066 */
90 0x1, /* MV_DDR_FREQ_311 */
91 0x1, /* MV_DDR_FREQ_333 */
92 0x2, /* MV_DDR_FREQ_467 */
93 0x2, /* MV_DDR_FREQ_850 */
94 0x2, /* MV_DDR_FREQ_600 */
95 0x1, /* MV_DDR_FREQ_300 */
96 0x2, /* MV_DDR_FREQ_900 */
97 0x1, /* MV_DDR_FREQ_360 */
98 0x2 /* MV_DDR_FREQ_1000 */
101 static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
135 static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
170 static u32 async_mode_at_tf;
172 static u32 dq_bit_map_2_phy_pin[] = {
173 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
174 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
175 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
176 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
177 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
180 void mv_ddr_mem_scrubbing(void)
182 ddr3_new_tip_ecc_scrub();
185 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
186 enum mv_ddr_freq freq);
189 * Read temperature TJ value
191 static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
195 /* Initiates TSEN hardware reset once */
196 if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
197 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
198 /* set Tsen Tc Trim to correct default value (errata #132698) */
199 reg = reg_read(TSEN_CONTROL_LSB_REG);
200 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
201 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
202 reg_write(TSEN_CONTROL_LSB_REG, reg);
206 /* Check if the readout field is valid */
207 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
208 printf("%s: TSEN not ready\n", __func__);
212 reg = reg_read(TSEN_STATUS_REG);
213 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
215 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
219 * Name: ddr3_tip_a38x_get_freq_config.
223 * Returns: MV_OK if success, other error code if fail.
225 static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq,
226 struct hws_tip_freq_config_info
229 if (a38x_bw_per_freq[freq] == 0xff)
230 return MV_NOT_SUPPORTED;
232 if (freq_config_info == NULL)
235 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
236 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
237 freq_config_info->is_supported = 1;
242 static void dunit_read(u32 addr, u32 mask, u32 *data)
244 *data = reg_read(addr) & mask;
247 static void dunit_write(u32 addr, u32 mask, u32 data)
251 if (mask != MASK_ALL_BITS) {
252 dunit_read(addr, MASK_ALL_BITS, ®_val);
254 reg_val |= (data & mask);
257 reg_write(addr, reg_val);
260 #define ODPG_ENABLE_REG 0x186d4
261 #define ODPG_EN_OFFS 0
262 #define ODPG_EN_MASK 0x1
263 #define ODPG_EN_ENA 1
264 #define ODPG_EN_DONE 0
265 #define ODPG_DIS_OFFS 8
266 #define ODPG_DIS_MASK 0x1
267 #define ODPG_DIS_DIS 1
268 void mv_ddr_odpg_enable(void)
270 dunit_write(ODPG_ENABLE_REG,
271 ODPG_EN_MASK << ODPG_EN_OFFS,
272 ODPG_EN_ENA << ODPG_EN_OFFS);
275 void mv_ddr_odpg_disable(void)
277 dunit_write(ODPG_ENABLE_REG,
278 ODPG_DIS_MASK << ODPG_DIS_OFFS,
279 ODPG_DIS_DIS << ODPG_DIS_OFFS);
282 void mv_ddr_odpg_done_clr(void)
287 int mv_ddr_is_odpg_done(u32 count)
291 for (i = 0; i < count; i++) {
292 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
293 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
299 printf("%s: timeout\n", __func__);
306 void mv_ddr_training_enable(void)
308 dunit_write(GLOB_CTRL_STATUS_REG,
309 TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
310 TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
313 #define DRAM_INIT_CTRL_STATUS_REG 0x18488
314 #define TRAINING_TRIGGER_OFFS 0
315 #define TRAINING_TRIGGER_MASK 0x1
316 #define TRAINING_TRIGGER_ENA 1
317 #define TRAINING_DONE_OFFS 1
318 #define TRAINING_DONE_MASK 0x1
319 #define TRAINING_DONE_DONE 1
320 #define TRAINING_DONE_NOT_DONE 0
321 #define TRAINING_RESULT_OFFS 2
322 #define TRAINING_RESULT_MASK 0x1
323 #define TRAINING_RESULT_PASS 0
324 #define TRAINING_RESULT_FAIL 1
325 int mv_ddr_is_training_done(u32 count, u32 *result)
329 if (result == NULL) {
330 printf("%s: NULL result pointer found\n", __func__);
334 for (i = 0; i < count; i++) {
335 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
336 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
342 printf("%s: timeout\n", __func__);
346 *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
352 u32 mv_ddr_dm_pad_get(void)
358 * Name: ddr3_tip_a38x_select_ddr_controller.
359 * Desc: Enable/Disable access to Marvell's server.
360 * Args: dev_num - device number
361 * enable - whether to enable or disable the server
363 * Returns: MV_OK if success, other error code if fail.
365 static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
369 reg = reg_read(DUAL_DUNIT_CFG_REG);
376 reg_write(DUAL_DUNIT_CFG_REG, reg);
381 static u8 ddr3_tip_clock_mode(u32 frequency)
383 if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400))
389 static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq)
391 u32 reg, ref_clk_satr;
393 /* Read sample at reset setting */
394 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
395 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
396 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
398 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
399 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
400 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
403 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
404 ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
408 *freq = MV_DDR_FREQ_333;
411 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
412 ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
416 *freq = MV_DDR_FREQ_400;
419 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
420 ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
424 *freq = MV_DDR_FREQ_533;
427 *freq = MV_DDR_FREQ_600;
431 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
432 ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
436 *freq = MV_DDR_FREQ_667;
440 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
441 ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
445 *freq = MV_DDR_FREQ_800;
448 *freq = MV_DDR_FREQ_933;
451 *freq = MV_DDR_FREQ_900;
454 *freq = MV_DDR_FREQ_933;
458 return MV_NOT_SUPPORTED;
460 } else { /* REFCLK 40MHz case */
463 *freq = MV_DDR_FREQ_400;
466 *freq = MV_DDR_FREQ_533;
469 *freq = MV_DDR_FREQ_800;
472 *freq = MV_DDR_FREQ_900;
476 return MV_NOT_SUPPORTED;
483 static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq)
485 u32 reg, ref_clk_satr;
487 /* Read sample at reset setting */
488 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
489 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
490 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
492 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
493 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
494 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
498 /* Medium is same as TF to run PBS in this freq */
499 *freq = MV_DDR_FREQ_333;
503 /* Medium is same as TF to run PBS in this freq */
504 *freq = MV_DDR_FREQ_400;
508 /* Medium is same as TF to run PBS in this freq */
509 *freq = MV_DDR_FREQ_533;
515 *freq = MV_DDR_FREQ_333;
520 *freq = MV_DDR_FREQ_400;
523 *freq = MV_DDR_FREQ_300;
526 *freq = MV_DDR_FREQ_360;
529 *freq = MV_DDR_FREQ_400;
533 return MV_NOT_SUPPORTED;
535 } else { /* REFCLK 40MHz case */
538 /* Medium is same as TF to run PBS in this freq */
539 *freq = MV_DDR_FREQ_400;
542 /* Medium is same as TF to run PBS in this freq */
543 *freq = MV_DDR_FREQ_533;
546 *freq = MV_DDR_FREQ_400;
549 *freq = MV_DDR_FREQ_360;
553 return MV_NOT_SUPPORTED;
560 static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
562 info_ptr->device_id = 0x6800;
563 info_ptr->ck_delay = ck_delay;
568 /* check indirect access to phy register file completed */
569 static int is_prfa_done(void)
575 if (iter++ > MAX_POLLING_ITERATIONS) {
576 printf("error: %s: polling timeout\n", __func__);
579 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
580 reg_val >>= PRFA_REQ_OFFS;
581 reg_val &= PRFA_REQ_MASK;
582 } while (reg_val == PRFA_REQ_ENA); /* request pending */
587 /* write to phy register thru indirect access */
588 static int prfa_write(enum hws_access_type phy_access, u32 phy,
589 enum hws_ddr_phy phy_type, u32 addr,
590 u32 data, enum hws_operation op_type)
592 u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
593 ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
594 ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
595 ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
596 ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
597 (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
598 ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
599 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
600 reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
601 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
603 /* polling for prfa request completion */
604 if (is_prfa_done() != MV_OK)
610 /* read from phy register thru indirect access */
611 static int prfa_read(enum hws_access_type phy_access, u32 phy,
612 enum hws_ddr_phy phy_type, u32 addr, u32 *data)
614 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
615 u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
618 if (phy_access == ACCESS_TYPE_MULTICAST) {
619 for (i = 0; i < max_phy; i++) {
620 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
621 if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
623 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
624 data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
627 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
629 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
630 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
636 static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
638 struct hws_tip_config_func_db config_func;
640 /* new read leveling version */
641 config_func.mv_ddr_dunit_read = dunit_read;
642 config_func.mv_ddr_dunit_write = dunit_write;
643 config_func.tip_dunit_mux_select_func =
644 ddr3_tip_a38x_select_ddr_controller;
645 config_func.tip_get_freq_config_info_func =
646 ddr3_tip_a38x_get_freq_config;
647 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
648 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
649 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
650 config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
651 config_func.tip_external_read = ddr3_tip_ext_read;
652 config_func.tip_external_write = ddr3_tip_ext_write;
653 config_func.mv_ddr_phy_read = prfa_read;
654 config_func.mv_ddr_phy_write = prfa_write;
656 ddr3_tip_init_config_func(dev_num, &config_func);
658 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
660 /* set device attributes*/
661 ddr3_tip_dev_attr_init(dev_num);
662 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
663 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
664 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
665 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
669 dfs_low_freq = DFS_LOW_FREQ_VALUE;
670 calibration_update_control = 1;
672 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
677 static int mv_ddr_training_mask_set(void)
679 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
680 enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
682 mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
683 LOAD_PATTERN_MASK_BIT |
684 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
685 WRITE_LEVELING_SUPP_MASK_BIT |
686 READ_LEVELING_MASK_BIT |
689 SET_TARGET_FREQ_MASK_BIT |
690 WRITE_LEVELING_TF_MASK_BIT |
691 WRITE_LEVELING_SUPP_TF_MASK_BIT |
692 READ_LEVELING_TF_MASK_BIT |
693 CENTRALIZATION_RX_MASK_BIT |
694 CENTRALIZATION_TX_MASK_BIT);
697 if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) {
698 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
699 LOAD_PATTERN_2_MASK_BIT |
700 WRITE_LEVELING_SUPP_MASK_BIT |
701 READ_LEVELING_MASK_BIT |
704 CENTRALIZATION_RX_MASK_BIT |
705 CENTRALIZATION_TX_MASK_BIT);
706 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
709 /* Supplementary not supported for ECC modes */
710 if (mv_ddr_is_ecc_ena()) {
711 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
712 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
713 mask_tune_func &= ~PBS_TX_MASK_BIT;
714 mask_tune_func &= ~PBS_RX_MASK_BIT;
720 /* function: mv_ddr_set_calib_controller
721 * this function sets the controller which will control
722 * the calibration cycle in the end of the training.
723 * 1 - internal controller
724 * 2 - external controller
726 void mv_ddr_set_calib_controller(void)
728 calibration_update_control = CAL_UPDATE_CTRL_INT;
731 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
732 enum mv_ddr_freq frequency)
735 u32 sar_val, ref_clk_satr;
737 u32 freq = mv_ddr_freq_get(frequency);
740 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
741 ("A38x does not support interface 0x%x\n",
746 /* get VCO freq index */
747 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
748 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
749 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
751 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
752 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
753 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
754 divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq;
756 divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq;
758 if ((async_mode_at_tf == 1) && (freq > 400)) {
760 dunit_write(0x20220, 0x1000, 0x1000);
761 dunit_write(0xe42f4, 0x200, 0x200);
763 /* Wait for async mode setup */
768 case MV_DDR_FREQ_467:
769 async_val = 0x806f012;
771 case MV_DDR_FREQ_533:
772 async_val = 0x807f012;
774 case MV_DDR_FREQ_600:
775 async_val = 0x805f00a;
777 case MV_DDR_FREQ_667:
778 async_val = 0x809f012;
780 case MV_DDR_FREQ_800:
781 async_val = 0x807f00a;
783 case MV_DDR_FREQ_850:
784 async_val = 0x80cb012;
786 case MV_DDR_FREQ_900:
787 async_val = 0x80d7012;
789 case MV_DDR_FREQ_933:
790 async_val = 0x80df012;
792 case MV_DDR_FREQ_1000:
793 async_val = 0x80ef012;
795 case MV_DDR_FREQ_1066:
796 async_val = 0x80ff012;
799 /* set MV_DDR_FREQ_667 as default */
800 async_val = 0x809f012;
802 dunit_write(0xe42f0, 0xffffffff, async_val);
805 dunit_write(0x20220, 0x1000, 0x0);
806 dunit_write(0xe42f4, 0x200, 0x0);
808 /* cpupll_clkdiv_reset_mask */
809 dunit_write(0xe4264, 0xff, 0x1f);
811 /* cpupll_clkdiv_reload_smooth */
812 dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
814 /* cpupll_clkdiv_relax_en */
815 dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
817 /* write the divider */
818 dunit_write(0xe4268, (0x3f << 8), (divider << 8));
820 /* set cpupll_clkdiv_reload_ratio */
821 dunit_write(0xe4264, (1 << 8), (1 << 8));
823 /* undet cpupll_clkdiv_reload_ratio */
824 dunit_write(0xe4264, (1 << 8), 0x0);
826 /* clear cpupll_clkdiv_reload_force */
827 dunit_write(0xe4260, (0xff << 8), 0x0);
829 /* clear cpupll_clkdiv_relax_en */
830 dunit_write(0xe4260, (0xff << 24), 0x0);
832 /* clear cpupll_clkdiv_reset_mask */
833 dunit_write(0xe4264, 0xff, 0x0);
836 /* Dunit training clock + 1:1/2:1 mode */
837 dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
838 dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
844 * external read from memory
846 int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
847 u32 num_of_bursts, u32 *data)
851 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
852 data[burst_num] = readl(reg_addr + 4 * burst_num);
858 * external write to memory
860 int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
861 u32 num_of_bursts, u32 *data) {
864 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
865 writel(data[burst_num], reg_addr + 4 * burst_num);
870 int mv_ddr_early_init(void)
872 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
874 /* FIXME: change this configuration per ddr type
875 * configure a380 and a390 to work with receiver odt timing
876 * the odt_config is defined:
879 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
880 * to configure the odt to work with timing restrictions
883 mv_ddr_sw_db_init(0, 0);
885 if (tm->interface_params[0].memory_freq != MV_DDR_FREQ_SAR)
886 async_mode_at_tf = 1;
891 int mv_ddr_early_init2(void)
893 mv_ddr_training_mask_set();
898 int mv_ddr_pre_training_fixup(void)
903 int mv_ddr_post_training_fixup(void)
908 int ddr3_post_run_alg(void)
913 int ddr3_silicon_post_init(void)
915 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
917 /* Set half bus width */
918 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
919 CHECK_STATUS(ddr3_tip_if_write
920 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
921 SDRAM_CFG_REG, 0x0, 0x8000));
927 u32 mv_ddr_init_freq_get(void)
929 enum mv_ddr_freq freq;
931 mv_ddr_sar_freq_get(0, &freq);
936 static u32 ddr3_get_bus_width(void)
940 bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
943 return (bus_width == 0) ? 16 : 32;
946 static u32 ddr3_get_device_width(u32 cs)
950 device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
951 (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
954 return (device_width == 0) ? 8 : 16;
957 static u32 ddr3_get_device_size(u32 cs)
959 u32 device_size_low, device_size_high, device_size;
960 u32 data, cs_low_offset, cs_high_offset;
962 cs_low_offset = CS_SIZE_OFFS(cs);
963 cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
965 data = reg_read(SDRAM_ADDR_CTRL_REG);
966 device_size_low = (data >> cs_low_offset) & 0x3;
967 device_size_high = (data >> cs_high_offset) & 0x1;
969 device_size = device_size_low | (device_size_high << 2);
971 switch (device_size) {
984 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
985 /* zeroes mem size in ddr3_calc_mem_cs_size */
990 int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
994 /* Calculate in MiB */
995 cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
996 ddr3_get_device_size(cs)) / 8;
999 * Multiple controller bus width, 2x for 64 bit
1000 * (SoC controller may be 32 or 64 bit,
1001 * so bit 15 in 0x1400, that means if whole bus used or only half,
1002 * have a differnt meaning
1004 cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1006 if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1007 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1008 return MV_BAD_VALUE;
1011 *cs_size = cs_mem_size;
1016 static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1019 uint64_t mem_total_size = 0;
1020 uint64_t cs_mem_size_mb = 0;
1021 uint64_t cs_mem_size = 0;
1022 uint64_t mem_total_size_c, cs_mem_size_c;
1025 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1026 u32 physical_mem_size;
1027 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1028 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1031 /* Open fast path windows */
1032 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1033 if (cs_ena & (1 << cs)) {
1035 if (ddr3_calc_mem_cs_size(cs, &cs_mem_size_mb) != MV_OK)
1037 cs_mem_size = cs_mem_size_mb * _1M;
1039 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1041 * if number of address pins doesn't allow to use max
1042 * mem size that is defined in topology
1043 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
1045 physical_mem_size = mem_size
1046 [tm->interface_params[0].memory_size];
1048 if (ddr3_get_device_width(cs) == 16) {
1050 * 16bit mem device can be twice more - no need
1051 * in less significant pin
1053 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1056 if (physical_mem_size > max_mem_size) {
1057 cs_mem_size = max_mem_size *
1058 (ddr3_get_bus_width() /
1059 ddr3_get_device_width(cs));
1060 printf("Updated Physical Mem size is from 0x%x to %x\n",
1062 DEVICE_MAX_DRAM_ADDRESS_SIZE);
1066 /* set fast path window control for the cs */
1069 reg |= (cs_mem_size - 1) & 0xffff0000;
1070 /*Open fast path Window */
1071 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1073 /* Set fast path window base address for the cs */
1074 reg = ((cs_mem_size) * cs) & 0xffff0000;
1075 /* Set base address */
1076 reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1079 * Since memory size may be bigger than 4G the summ may
1080 * be more than 32 bit word,
1081 * so to estimate the result divide mem_total_size and
1082 * cs_mem_size by 0x10000 (it is equal to >> 16)
1084 mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1085 cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
1087 /* if the sum less than 2 G - calculate the value */
1088 if (mem_total_size_c + cs_mem_size_c < 0x10000)
1089 mem_total_size += cs_mem_size;
1090 else /* put max possible size */
1091 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1095 /* Set L2 filtering to Max Memory size */
1096 reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1101 static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1103 u32 win_ctrl_reg, num_of_win_regs;
1104 u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1107 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1108 num_of_win_regs = 16;
1110 /* Return XBAR windows 4-7 or 16-19 init configuration */
1111 for (ui = 0; ui < num_of_win_regs; ui++)
1112 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1114 printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1117 #if defined DYNAMIC_CS_SIZE_CONFIG
1118 if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1119 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1123 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1124 if (cs_ena & (1 << cs)) {
1129 /* Open fast path Window to - 0.5G */
1130 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1136 static int ddr3_save_and_set_training_windows(u32 *win)
1139 u32 reg, tmp_count, cs, ui;
1140 u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1141 u32 num_of_win_regs, win_jump_index;
1142 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1143 win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1144 win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1145 win_jump_index = 0x10;
1146 num_of_win_regs = 16;
1147 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1149 #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1151 * Disable L2 filtering during DDR training
1152 * (when Cross Bar window is open)
1154 reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1157 cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1159 /* Close XBAR Window 19 - Not needed */
1160 /* {0x000200e8} - Open Mbus Window - 2G */
1161 reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1163 /* Save XBAR Windows 4-19 init configurations */
1164 for (ui = 0; ui < num_of_win_regs; ui++)
1165 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1167 /* Open XBAR Windows 4-7 or 16-19 for other CS */
1170 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1171 if (cs_ena & (1 << cs)) {
1187 reg |= (SDRAM_CS_SIZE & 0xffff0000);
1189 reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1191 reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1193 reg_write(win_base_reg + win_jump_index * tmp_count,
1196 if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1197 reg_write(win_remap_reg +
1198 win_jump_index * tmp_count, 0);
1209 int mv_ddr_pre_training_soc_config(const char *ddr_type)
1214 /* Switching CPU to MRVL ID */
1215 soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1216 SAR1_CPU_CORE_OFFSET;
1219 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1220 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1223 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1226 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1233 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
1234 * suspend i.e the DRAM values will not be overwritten / reset when
1235 * waking from suspend
1237 if (mv_ddr_sys_env_suspend_wakeup_check() ==
1238 SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1239 reg_bit_set(SDRAM_INIT_CTRL_REG,
1240 DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1243 /* Check if DRAM is already initialized */
1244 if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1245 (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1246 printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1250 /* Fix read ready phases for all SOC in reg 0x15c8 */
1251 reg_val = reg_read(TRAINING_DBG_3_REG);
1253 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1254 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */
1256 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1257 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */
1259 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1260 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */
1262 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1263 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */
1265 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1266 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */
1268 reg_write(TRAINING_DBG_3_REG, reg_val);
1271 * Axi_bresp_mode[8] = Compliant,
1272 * Axi_addr_decode_cntrl[11] = Internal,
1273 * Axi_data_bus_width[0] = 128bit
1275 /* 0x14a8 - AXI Control Register */
1276 reg_write(AXI_CTRL_REG, 0);
1279 * Stage 2 - Training Values Setup
1281 /* Set X-BAR windows for the training sequence */
1282 ddr3_save_and_set_training_windows(win);
1287 static int ddr3_new_tip_dlb_config(void)
1290 struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1292 /* Write the configuration */
1293 while (config_table_ptr[i].reg_addr != 0) {
1294 reg_write(config_table_ptr[i].reg_addr,
1295 config_table_ptr[i].reg_data);
1301 reg = reg_read(DLB_CTRL_REG);
1302 reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1303 ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1304 ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1305 ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1306 ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1308 reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1309 (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1310 (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1311 (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1312 (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1314 reg_write(DLB_CTRL_REG, reg);
1319 int mv_ddr_post_training_soc_config(const char *ddr_type)
1323 /* Restore and set windows */
1324 ddr3_restore_and_set_final_windows(win, ddr_type);
1326 /* Update DRAM init indication in bootROM register */
1327 reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1328 reg_write(REG_BOOTROM_ROUTINE_ADDR,
1329 reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1332 ddr3_new_tip_dlb_config();
1337 void mv_ddr_mc_config(void)
1339 /* Memory controller initializations */
1340 struct init_cntr_param init_param;
1343 init_param.do_mrs_phy = 1;
1344 init_param.is_ctrl64_bit = 0;
1345 init_param.init_phy = 1;
1346 init_param.msys_init = 1;
1347 status = hws_ddr3_tip_init_controller(0, &init_param);
1348 if (status != MV_OK)
1349 printf("DDR3 init controller - FAILED 0x%x\n", status);
1351 status = mv_ddr_mc_init();
1352 if (status != MV_OK)
1353 printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1355 /* function: mv_ddr_mc_init
1356 * this function enables the dunit after init controller configuration
1358 int mv_ddr_mc_init(void)
1360 CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1365 /* function: ddr3_tip_configure_phy
1366 * configures phy and electrical parameters
1368 int ddr3_tip_configure_phy(u32 dev_num)
1371 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1372 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1374 CHECK_STATUS(ddr3_tip_bus_write
1375 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1376 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1377 PAD_ZRI_CAL_PHY_REG,
1378 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1379 CHECK_STATUS(ddr3_tip_bus_write
1380 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1381 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1382 PAD_ZRI_CAL_PHY_REG,
1383 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1384 CHECK_STATUS(ddr3_tip_bus_write
1385 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1386 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1387 PAD_ODT_CAL_PHY_REG,
1388 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1389 CHECK_STATUS(ddr3_tip_bus_write
1390 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1391 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1392 PAD_ODT_CAL_PHY_REG,
1393 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1395 CHECK_STATUS(ddr3_tip_bus_write
1396 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1397 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1398 PAD_PRE_DISABLE_PHY_REG, 0));
1399 CHECK_STATUS(ddr3_tip_bus_write
1400 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1401 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1402 CMOS_CONFIG_PHY_REG, 0));
1403 CHECK_STATUS(ddr3_tip_bus_write
1404 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1405 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1406 CMOS_CONFIG_PHY_REG, 0));
1408 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1409 /* check if the interface is enabled */
1410 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1413 phy_id < octets_per_if_num;
1415 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1417 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1418 (dev_num, ACCESS_TYPE_UNICAST,
1419 if_id, phy_id, DDR_PHY_DATA,
1421 ((clamp_tbl[if_id] << 4) | vref_init_val),
1422 ((0x7 << 4) | 0x7)));
1423 /* clamp not relevant for control */
1424 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1425 (dev_num, ACCESS_TYPE_UNICAST,
1426 if_id, phy_id, DDR_PHY_CONTROL,
1427 PAD_CFG_PHY_REG, 0x4, 0x7));
1431 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1432 MV_DDR_PHY_EDGE_POSITIVE)
1433 CHECK_STATUS(ddr3_tip_bus_write
1434 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1435 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1436 DDR_PHY_DATA, 0x90, 0x6002));
1443 int mv_ddr_manual_cal_do(void)