2 * Copyright 2014-2015 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
12 #include <asm/arch/fsl_serdes.h>
13 #include <asm/arch/soc.h>
15 #include <asm/global_data.h>
16 #include <asm/arch-fsl-layerscape/config.h>
17 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
20 #ifdef CONFIG_SYS_FSL_DDR
21 #include <fsl_ddr_sdram.h>
24 #ifdef CONFIG_CHAIN_OF_TRUST
25 #include <fsl_validate.h>
27 #include <fsl_immap.h>
29 DECLARE_GLOBAL_DATA_PTR;
31 bool soc_has_dp_ddr(void)
33 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
34 u32 svr = gur_in32(&gur->svr);
36 /* LS2085A, LS2088A, LS2048A has DP_DDR */
37 if ((SVR_SOC_VER(svr) == SVR_LS2085A) ||
38 (SVR_SOC_VER(svr) == SVR_LS2088A) ||
39 (SVR_SOC_VER(svr) == SVR_LS2048A))
45 bool soc_has_aiop(void)
47 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
48 u32 svr = gur_in32(&gur->svr);
50 /* LS2085A has AIOP */
51 if (SVR_SOC_VER(svr) == SVR_LS2085A)
57 #if defined(CONFIG_FSL_LSCH3)
59 * This erratum requires setting a value to eddrtqcr1 to
60 * optimal the DDR performance.
62 static void erratum_a008336(void)
64 #ifdef CONFIG_SYS_FSL_ERRATUM_A008336
67 #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR
68 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800;
69 if (fsl_ddr_get_version(0) == 0x50200)
70 out_le32(eddrtqcr1, 0x63b30002);
72 #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR
73 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800;
74 if (fsl_ddr_get_version(0) == 0x50200)
75 out_le32(eddrtqcr1, 0x63b30002);
81 * This erratum requires a register write before being Memory
82 * controller 3 being enabled.
84 static void erratum_a008514(void)
86 #ifdef CONFIG_SYS_FSL_ERRATUM_A008514
89 #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR
90 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800;
91 out_le32(eddrtqcr1, 0x63b20002);
95 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
96 #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val"
98 static unsigned long get_internval_val_mhz(void)
100 char *interval = env_get(PLATFORM_CYCLE_ENV_VAR);
102 * interval is the number of platform cycles(MHz) between
103 * wake up events generated by EPU.
105 ulong interval_mhz = get_bus_freq(0) / (1000 * 1000);
108 interval_mhz = simple_strtoul(interval, NULL, 10);
113 void erratum_a009635(void)
116 unsigned long interval_mhz = get_internval_val_mhz();
121 val = in_le32(DCSR_CGACRE5);
122 writel(val | 0x00000200, DCSR_CGACRE5);
124 val = in_le32(EPU_EPCMPR5);
125 writel(interval_mhz, EPU_EPCMPR5);
126 val = in_le32(EPU_EPCCR5);
127 writel(val | 0x82820000, EPU_EPCCR5);
128 val = in_le32(EPU_EPSMCR5);
129 writel(val | 0x002f0000, EPU_EPSMCR5);
130 val = in_le32(EPU_EPECR5);
131 writel(val | 0x20000000, EPU_EPECR5);
132 val = in_le32(EPU_EPGCR);
133 writel(val | 0x80000000, EPU_EPGCR);
135 #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */
137 static void erratum_rcw_src(void)
139 #if defined(CONFIG_SPL) && defined(CONFIG_NAND_BOOT)
140 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
141 u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE;
144 val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
145 val &= ~DCFG_PORSR1_RCW_SRC;
146 val |= DCFG_PORSR1_RCW_SRC_NOR;
147 out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val);
151 #define I2C_DEBUG_REG 0x6
152 #define I2C_GLITCH_EN 0x8
154 * This erratum requires setting glitch_en bit to enable
155 * digital glitch filter to improve clock stability.
157 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
158 static void erratum_a009203(void)
161 #ifdef CONFIG_SYS_I2C
162 #ifdef I2C1_BASE_ADDR
163 ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG);
165 writeb(I2C_GLITCH_EN, ptr);
167 #ifdef I2C2_BASE_ADDR
168 ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG);
170 writeb(I2C_GLITCH_EN, ptr);
172 #ifdef I2C3_BASE_ADDR
173 ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG);
175 writeb(I2C_GLITCH_EN, ptr);
177 #ifdef I2C4_BASE_ADDR
178 ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG);
180 writeb(I2C_GLITCH_EN, ptr);
186 void bypass_smmu(void)
189 val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
190 out_le32(SMMU_SCR0, val);
191 val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
192 out_le32(SMMU_NSCR0, val);
194 void fsl_lsch3_early_init_f(void)
197 init_early_memctl_regs(); /* tighten IFC timing */
198 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
203 #ifdef CONFIG_CHAIN_OF_TRUST
204 /* In case of Secure Boot, the IBR configures the SMMU
205 * to allow only Secure transactions.
206 * SMMU must be reset in bypass mode.
207 * Set the ClientPD bit and Clear the USFCFG Bit
209 if (fsl_check_boot_mode_secure() == 1)
214 #ifdef CONFIG_SCSI_AHCI_PLAT
217 struct ccsr_ahci __iomem *ccsr_ahci;
219 ccsr_ahci = (void *)CONFIG_SYS_SATA2;
220 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
221 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
222 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
224 ccsr_ahci = (void *)CONFIG_SYS_SATA1;
225 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
226 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
227 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
229 ahci_init((void __iomem *)CONFIG_SYS_SATA1);
236 #elif defined(CONFIG_FSL_LSCH2)
237 #ifdef CONFIG_SCSI_AHCI_PLAT
240 struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA;
242 /* Disable SATA ECC */
243 out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000);
244 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
245 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
246 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
248 ahci_init((void __iomem *)CONFIG_SYS_SATA);
255 static void erratum_a009929(void)
257 #ifdef CONFIG_SYS_FSL_ERRATUM_A009929
258 struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR;
259 u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR;
260 u32 rstrqmr1 = gur_in32(&gur->rstrqmr1);
262 rstrqmr1 |= 0x00000400;
263 gur_out32(&gur->rstrqmr1, rstrqmr1);
264 writel(0x01000000, dcsr_cop_ccp);
269 * This erratum requires setting a value to eddrtqcr1 to optimal
270 * the DDR performance. The eddrtqcr1 register is in SCFG space
271 * of LS1043A and the offset is 0x157_020c.
273 #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \
274 && defined(CONFIG_SYS_FSL_ERRATUM_A008514)
275 #error A009660 and A008514 can not be both enabled.
278 static void erratum_a009660(void)
280 #ifdef CONFIG_SYS_FSL_ERRATUM_A009660
281 u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c;
282 out_be32(eddrtqcr1, 0x63b20042);
286 static void erratum_a008850_early(void)
288 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
290 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR +
291 CONFIG_SYS_CCI400_OFFSET);
292 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
294 /* Skip if running at lower exception level */
295 if (current_el() < 3)
298 /* disables propagation of barrier transactions to DDRC from CCI400 */
299 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER);
301 /* disable the re-ordering in DDRC */
302 ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
306 void erratum_a008850_post(void)
308 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
310 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR +
311 CONFIG_SYS_CCI400_OFFSET);
312 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
315 /* Skip if running at lower exception level */
316 if (current_el() < 3)
319 /* enable propagation of barrier transactions to DDRC from CCI400 */
320 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER);
322 /* enable the re-ordering in DDRC */
323 tmp = ddr_in32(&ddr->eor);
324 tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
325 ddr_out32(&ddr->eor, tmp);
329 #ifdef CONFIG_SYS_FSL_ERRATUM_A010315
330 void erratum_a010315(void)
334 for (i = PCIE1; i <= PCIE4; i++)
335 if (!is_serdes_configured(i)) {
336 debug("PCIe%d: disabled all R/W permission!\n", i);
337 set_pcie_ns_access(i, 0);
342 static void erratum_a010539(void)
344 #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT)
345 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
348 porsr1 = in_be32(&gur->porsr1);
349 porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK;
350 out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1),
355 /* Get VDD in the unit mV from voltage ID */
356 int get_core_volt_from_fuse(void)
358 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
363 fusesr = in_be32(&gur->dcfg_fusesr);
364 debug("%s: fusesr = 0x%x\n", __func__, fusesr);
365 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) &
366 FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK;
367 if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) {
368 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) &
369 FSL_CHASSIS2_DCFG_FUSESR_VID_MASK;
371 debug("%s: VID = 0x%x\n", __func__, vid);
373 case 0x00: /* VID isn't supported */
375 debug("%s: The VID feature is not supported\n", __func__);
377 case 0x08: /* 0.9V silicon */
380 case 0x10: /* 1.0V silicon */
383 default: /* Other core voltage */
385 printf("%s: The VID(%x) isn't supported\n", __func__, vid);
388 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd);
393 __weak int board_switch_core_volt(u32 vdd)
398 static int setup_core_volt(u32 vdd)
400 return board_setup_core_volt(vdd);
403 #ifdef CONFIG_SYS_FSL_DDR
404 static void ddr_enable_0v9_volt(bool en)
406 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
409 tmp = ddr_in32(&ddr->ddr_cdr1);
412 tmp |= DDR_CDR1_V0PT9_EN;
414 tmp &= ~DDR_CDR1_V0PT9_EN;
416 ddr_out32(&ddr->ddr_cdr1, tmp);
420 int setup_chip_volt(void)
424 vdd = get_core_volt_from_fuse();
425 /* Nothing to do for silicons doesn't support VID */
429 if (setup_core_volt(vdd))
430 printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd);
431 #ifdef CONFIG_SYS_HAS_SERDES
432 if (setup_serdes_volt(vdd))
433 printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd);
436 #ifdef CONFIG_SYS_FSL_DDR
438 ddr_enable_0v9_volt(true);
444 void fsl_lsch2_early_init_f(void)
446 struct ccsr_cci400 *cci = (struct ccsr_cci400 *)(CONFIG_SYS_IMMR +
447 CONFIG_SYS_CCI400_OFFSET);
448 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR;
450 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
451 enable_layerscape_ns_access();
454 #ifdef CONFIG_FSL_IFC
455 init_early_memctl_regs(); /* tighten IFC timing */
458 #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT)
459 out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL);
461 /* Make SEC reads and writes snoopable */
462 setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP |
463 SCFG_SNPCNFGCR_SECWRSNP |
464 SCFG_SNPCNFGCR_SATARDSNP |
465 SCFG_SNPCNFGCR_SATAWRSNP);
468 * Enable snoop requests and DVM message requests for
469 * Slave insterface S4 (A53 core cluster)
471 if (current_el() == 3) {
472 out_le32(&cci->slave[4].snoop_ctrl,
473 CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN);
477 erratum_a008850_early(); /* part 1 of 2 */
484 #ifdef CONFIG_QSPI_AHB_INIT
485 /* Enable 4bytes address support and fast read */
486 int qspi_ahb_init(void)
488 u32 *qspi_lut, lut_key, *qspi_key;
490 qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300;
491 qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310;
493 lut_key = in_be32(qspi_key);
495 if (lut_key == 0x5af05af0) {
496 /* That means the register is BE */
497 out_be32(qspi_key, 0x5af05af0);
498 /* Unlock the lut table */
499 out_be32(qspi_key + 1, 0x00000002);
500 out_be32(qspi_lut, 0x0820040c);
501 out_be32(qspi_lut + 1, 0x1c080c08);
502 out_be32(qspi_lut + 2, 0x00002400);
503 /* Lock the lut table */
504 out_be32(qspi_key, 0x5af05af0);
505 out_be32(qspi_key + 1, 0x00000001);
507 /* That means the register is LE */
508 out_le32(qspi_key, 0x5af05af0);
509 /* Unlock the lut table */
510 out_le32(qspi_key + 1, 0x00000002);
511 out_le32(qspi_lut, 0x0820040c);
512 out_le32(qspi_lut + 1, 0x1c080c08);
513 out_le32(qspi_lut + 2, 0x00002400);
514 /* Lock the lut table */
515 out_le32(qspi_key, 0x5af05af0);
516 out_le32(qspi_key + 1, 0x00000001);
523 #ifdef CONFIG_BOARD_LATE_INIT
524 int board_late_init(void)
526 #ifdef CONFIG_SCSI_AHCI_PLAT
529 #ifdef CONFIG_CHAIN_OF_TRUST
530 fsl_setenv_chain_of_trust();
532 #ifdef CONFIG_QSPI_AHB_INIT