1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2017-2021 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <clock_legacy.h>
16 #include <asm/cache.h>
17 #include <asm/global_data.h>
19 #include <asm/ptrace.h>
20 #include <linux/arm-smccc.h>
21 #include <linux/errno.h>
22 #include <asm/system.h>
24 #include <asm/armv8/mmu.h>
26 #include <asm/arch/fsl_serdes.h>
27 #include <asm/arch/soc.h>
28 #include <asm/arch/cpu.h>
29 #include <asm/arch/speed.h>
30 #include <fsl_immap.h>
31 #include <asm/arch/mp.h>
32 #include <efi_loader.h>
33 #include <fsl-mc/fsl_mc.h>
34 #ifdef CONFIG_FSL_ESDHC
35 #include <fsl_esdhc.h>
37 #include <asm/armv8/sec_firmware.h>
38 #ifdef CONFIG_SYS_FSL_DDR
39 #include <fsl_ddr_sdram.h>
42 #include <asm/arch/clock.h>
44 #include <fsl_qbman.h>
47 #include <env_internal.h>
48 #ifdef CONFIG_CHAIN_OF_TRUST
49 #include <fsl_validate.h>
52 #include <linux/mii.h>
55 DECLARE_GLOBAL_DATA_PTR;
57 static struct cpu_type cpu_type_list[] = {
58 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
59 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
60 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
61 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
62 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
63 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
64 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
65 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
66 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
67 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
68 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
69 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
70 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
71 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
72 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
73 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
74 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
75 CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
76 CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
77 CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
78 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
79 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
80 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
81 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
82 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
83 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
84 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
85 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
86 CPU_TYPE_ENTRY(LX2162A, LX2162A, 16),
87 CPU_TYPE_ENTRY(LX2122A, LX2122A, 12),
88 CPU_TYPE_ENTRY(LX2082A, LX2082A, 8),
91 #define EARLY_PGTABLE_SIZE 0x5000
92 static struct mm_region early_map[] = {
93 #ifdef CONFIG_FSL_LSCH3
94 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
95 CFG_SYS_FSL_CCSR_SIZE,
96 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
97 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
99 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
100 SYS_FSL_OCRAM_SPACE_SIZE,
101 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
103 { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1,
104 CFG_SYS_FSL_QSPI_SIZE1,
105 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
106 #ifdef CONFIG_FSL_IFC
107 /* For IFC Region #1, only the first 4MB is cache-enabled */
108 { CFG_SYS_FSL_IFC_BASE1, CFG_SYS_FSL_IFC_BASE1,
109 CFG_SYS_FSL_IFC_SIZE1_1,
110 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
112 { CFG_SYS_FSL_IFC_BASE1 + CFG_SYS_FSL_IFC_SIZE1_1,
113 CFG_SYS_FSL_IFC_BASE1 + CFG_SYS_FSL_IFC_SIZE1_1,
114 CFG_SYS_FSL_IFC_SIZE1 - CFG_SYS_FSL_IFC_SIZE1_1,
115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
117 { CFG_SYS_FLASH_BASE, CFG_SYS_FSL_IFC_BASE1,
118 CFG_SYS_FSL_IFC_SIZE1,
119 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
122 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
123 CFG_SYS_FSL_DRAM_SIZE1,
124 #if defined(CONFIG_TFABOOT) || \
125 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
126 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
127 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
128 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
130 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
132 #ifdef CONFIG_FSL_IFC
133 /* Map IFC region #2 up to CFG_SYS_FLASH_BASE for NAND boot */
134 { CFG_SYS_FSL_IFC_BASE2, CFG_SYS_FSL_IFC_BASE2,
135 CFG_SYS_FLASH_BASE - CFG_SYS_FSL_IFC_BASE2,
136 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
139 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
140 CFG_SYS_FSL_DCSR_SIZE,
141 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
142 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
144 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
145 CFG_SYS_FSL_DRAM_SIZE2,
146 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
147 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
149 #ifdef CFG_SYS_FSL_DRAM_BASE3
150 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
151 CFG_SYS_FSL_DRAM_SIZE3,
152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
153 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
156 #elif defined(CONFIG_FSL_LSCH2)
157 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
158 CFG_SYS_FSL_CCSR_SIZE,
159 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
160 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
162 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
163 SYS_FSL_OCRAM_SPACE_SIZE,
164 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
166 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
167 CFG_SYS_FSL_DCSR_SIZE,
168 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
169 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
171 { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE,
172 CFG_SYS_FSL_QSPI_SIZE,
173 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
175 #ifdef CONFIG_FSL_IFC
176 { CFG_SYS_FSL_IFC_BASE, CFG_SYS_FSL_IFC_BASE,
177 CFG_SYS_FSL_IFC_SIZE,
178 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
181 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
182 CFG_SYS_FSL_DRAM_SIZE1,
183 #if defined(CONFIG_TFABOOT) || \
184 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
185 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
186 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
187 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
189 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
191 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
192 CFG_SYS_FSL_DRAM_SIZE2,
193 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
194 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
197 {}, /* list terminator */
200 static struct mm_region final_map[] = {
201 #ifdef CONFIG_FSL_LSCH3
202 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
203 CFG_SYS_FSL_CCSR_SIZE,
204 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
205 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
207 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
208 SYS_FSL_OCRAM_SPACE_SIZE,
209 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
211 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
212 CFG_SYS_FSL_DRAM_SIZE1,
213 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
214 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
216 { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1,
217 CFG_SYS_FSL_QSPI_SIZE1,
218 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
219 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
221 { CFG_SYS_FSL_QSPI_BASE2, CFG_SYS_FSL_QSPI_BASE2,
222 CFG_SYS_FSL_QSPI_SIZE2,
223 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
224 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
226 #ifdef CONFIG_FSL_IFC
227 { CFG_SYS_FSL_IFC_BASE2, CFG_SYS_FSL_IFC_BASE2,
228 CFG_SYS_FSL_IFC_SIZE2,
229 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
230 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
233 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
234 CFG_SYS_FSL_DCSR_SIZE,
235 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
236 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
238 { CFG_SYS_FSL_MC_BASE, CFG_SYS_FSL_MC_BASE,
240 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
241 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
243 { CFG_SYS_FSL_NI_BASE, CFG_SYS_FSL_NI_BASE,
245 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
246 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
248 /* For QBMAN portal, only the first 64MB is cache-enabled */
249 { CFG_SYS_FSL_QBMAN_BASE, CFG_SYS_FSL_QBMAN_BASE,
250 CFG_SYS_FSL_QBMAN_SIZE_1,
251 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
252 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
254 { CFG_SYS_FSL_QBMAN_BASE + CFG_SYS_FSL_QBMAN_SIZE_1,
255 CFG_SYS_FSL_QBMAN_BASE + CFG_SYS_FSL_QBMAN_SIZE_1,
256 CFG_SYS_FSL_QBMAN_SIZE - CFG_SYS_FSL_QBMAN_SIZE_1,
257 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
258 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
260 { CFG_SYS_PCIE1_PHYS_ADDR, CFG_SYS_PCIE1_PHYS_ADDR,
261 CFG_SYS_PCIE1_PHYS_SIZE,
262 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
263 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
265 { CFG_SYS_PCIE2_PHYS_ADDR, CFG_SYS_PCIE2_PHYS_ADDR,
266 CFG_SYS_PCIE2_PHYS_SIZE,
267 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
268 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
270 #ifdef CFG_SYS_PCIE3_PHYS_ADDR
271 { CFG_SYS_PCIE3_PHYS_ADDR, CFG_SYS_PCIE3_PHYS_ADDR,
272 CFG_SYS_PCIE3_PHYS_SIZE,
273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
274 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
277 #ifdef CFG_SYS_PCIE4_PHYS_ADDR
278 { CFG_SYS_PCIE4_PHYS_ADDR, CFG_SYS_PCIE4_PHYS_ADDR,
279 CFG_SYS_PCIE4_PHYS_SIZE,
280 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
281 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
284 #ifdef SYS_PCIE5_PHYS_ADDR
285 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
287 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
288 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
291 #ifdef SYS_PCIE6_PHYS_ADDR
292 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
294 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
295 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
298 { CFG_SYS_FSL_WRIOP1_BASE, CFG_SYS_FSL_WRIOP1_BASE,
299 CFG_SYS_FSL_WRIOP1_SIZE,
300 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
301 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
303 { CFG_SYS_FSL_AIOP1_BASE, CFG_SYS_FSL_AIOP1_BASE,
304 CFG_SYS_FSL_AIOP1_SIZE,
305 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
306 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
308 { CFG_SYS_FSL_PEBUF_BASE, CFG_SYS_FSL_PEBUF_BASE,
309 CFG_SYS_FSL_PEBUF_SIZE,
310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
311 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
313 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
314 CFG_SYS_FSL_DRAM_SIZE2,
315 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
316 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
318 #ifdef CFG_SYS_FSL_DRAM_BASE3
319 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
320 CFG_SYS_FSL_DRAM_SIZE3,
321 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
322 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
325 #elif defined(CONFIG_FSL_LSCH2)
326 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
327 CONFIG_SYS_FSL_BOOTROM_SIZE,
328 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
329 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
331 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
332 CFG_SYS_FSL_CCSR_SIZE,
333 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
334 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
336 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
337 SYS_FSL_OCRAM_SPACE_SIZE,
338 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
340 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
341 CFG_SYS_FSL_DCSR_SIZE,
342 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
343 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
345 { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE,
346 CFG_SYS_FSL_QSPI_SIZE,
347 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
348 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
350 #ifdef CONFIG_FSL_IFC
351 { CFG_SYS_FSL_IFC_BASE, CFG_SYS_FSL_IFC_BASE,
352 CFG_SYS_FSL_IFC_SIZE,
353 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
356 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
357 CFG_SYS_FSL_DRAM_SIZE1,
358 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
359 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
361 { CFG_SYS_FSL_QBMAN_BASE, CFG_SYS_FSL_QBMAN_BASE,
362 CFG_SYS_FSL_QBMAN_SIZE,
363 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
364 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
366 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
367 CFG_SYS_FSL_DRAM_SIZE2,
368 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
369 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
371 { CFG_SYS_PCIE1_PHYS_ADDR, CFG_SYS_PCIE1_PHYS_ADDR,
372 CFG_SYS_PCIE1_PHYS_SIZE,
373 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
374 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
376 { CFG_SYS_PCIE2_PHYS_ADDR, CFG_SYS_PCIE2_PHYS_ADDR,
377 CFG_SYS_PCIE2_PHYS_SIZE,
378 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
379 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
381 #ifdef CFG_SYS_PCIE3_PHYS_ADDR
382 { CFG_SYS_PCIE3_PHYS_ADDR, CFG_SYS_PCIE3_PHYS_ADDR,
383 CFG_SYS_PCIE3_PHYS_SIZE,
384 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
385 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
388 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
389 CFG_SYS_FSL_DRAM_SIZE3,
390 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
391 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
394 #ifdef CFG_SYS_MEM_RESERVE_SECURE
395 {}, /* space holder for secure mem */
400 struct mm_region *mem_map = early_map;
402 void cpu_name(char *name)
404 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
405 unsigned int i, svr, ver;
407 svr = gur_in32(&gur->svr);
408 ver = SVR_SOC_VER(svr);
410 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
411 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
412 strcpy(name, cpu_type_list[i].name);
413 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
414 if (IS_C_PROCESSOR(svr))
418 if (IS_E_PROCESSOR(svr))
421 sprintf(name + strlen(name), " Rev%d.%d",
422 SVR_MAJ(svr), SVR_MIN(svr));
426 if (i == ARRAY_SIZE(cpu_type_list))
427 strcpy(name, "unknown");
430 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
432 * To start MMU before DDR is available, we create MMU table in SRAM.
433 * The base address of SRAM is CFG_SYS_FSL_OCRAM_BASE. We use three
434 * levels of translation tables here to cover 40-bit address space.
435 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
436 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
437 * Note, the debug print in cache_v8.c is not usable for debugging
438 * these early MMU tables because UART is not yet available.
440 static inline void early_mmu_setup(void)
442 unsigned int el = current_el();
444 /* global data is already setup, no allocation yet */
446 gd->arch.tlb_addr = CFG_SYS_FSL_OCRAM_BASE;
448 gd->arch.tlb_addr = CFG_SYS_DDR_SDRAM_BASE;
449 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
450 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
452 /* Create early page tables */
455 /* point TTBR to the new table */
456 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
457 get_tcr(NULL, NULL) &
458 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
461 set_sctlr(get_sctlr() | CR_M);
464 static void fix_pcie_mmu_map(void)
466 #ifdef CONFIG_ARCH_LS2080A
469 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
471 svr = gur_in32(&gur->svr);
472 ver = SVR_SOC_VER(svr);
474 /* Fix PCIE base and size for LS2088A */
475 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
476 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
477 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
478 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
479 switch (final_map[i].phys) {
480 case CFG_SYS_PCIE1_PHYS_ADDR:
481 final_map[i].phys = 0x2000000000ULL;
482 final_map[i].virt = 0x2000000000ULL;
483 final_map[i].size = 0x800000000ULL;
485 case CFG_SYS_PCIE2_PHYS_ADDR:
486 final_map[i].phys = 0x2800000000ULL;
487 final_map[i].virt = 0x2800000000ULL;
488 final_map[i].size = 0x800000000ULL;
490 #ifdef CFG_SYS_PCIE3_PHYS_ADDR
491 case CFG_SYS_PCIE3_PHYS_ADDR:
492 final_map[i].phys = 0x3000000000ULL;
493 final_map[i].virt = 0x3000000000ULL;
494 final_map[i].size = 0x800000000ULL;
497 #ifdef CFG_SYS_PCIE4_PHYS_ADDR
498 case CFG_SYS_PCIE4_PHYS_ADDR:
499 final_map[i].phys = 0x3800000000ULL;
500 final_map[i].virt = 0x3800000000ULL;
501 final_map[i].size = 0x800000000ULL;
513 * The final tables look similar to early tables, but different in detail.
514 * These tables are in DRAM. Sub tables are added to enable cache for
517 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
518 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
520 static inline void final_mmu_setup(void)
522 u64 tlb_addr_save = gd->arch.tlb_addr;
523 unsigned int el = current_el();
526 /* fix the final_map before filling in the block entries */
531 /* Update mapping for DDR to actual size */
532 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
534 * Find the entry for DDR mapping and update the address and
535 * size. Zero-sized mapping will be skipped when creating MMU
538 switch (final_map[index].virt) {
539 case CFG_SYS_FSL_DRAM_BASE1:
540 final_map[index].virt = gd->bd->bi_dram[0].start;
541 final_map[index].phys = gd->bd->bi_dram[0].start;
542 final_map[index].size = gd->bd->bi_dram[0].size;
544 #ifdef CFG_SYS_FSL_DRAM_BASE2
545 case CFG_SYS_FSL_DRAM_BASE2:
546 #if (CONFIG_NR_DRAM_BANKS >= 2)
547 final_map[index].virt = gd->bd->bi_dram[1].start;
548 final_map[index].phys = gd->bd->bi_dram[1].start;
549 final_map[index].size = gd->bd->bi_dram[1].size;
551 final_map[index].size = 0;
555 #ifdef CFG_SYS_FSL_DRAM_BASE3
556 case CFG_SYS_FSL_DRAM_BASE3:
557 #if (CONFIG_NR_DRAM_BANKS >= 3)
558 final_map[index].virt = gd->bd->bi_dram[2].start;
559 final_map[index].phys = gd->bd->bi_dram[2].start;
560 final_map[index].size = gd->bd->bi_dram[2].size;
562 final_map[index].size = 0;
571 #ifdef CFG_SYS_MEM_RESERVE_SECURE
572 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
575 * Only use gd->arch.secure_ram if the address is
576 * recalculated. Align to 4KB for MMU table.
578 /* put page tables in secure ram */
579 index = ARRAY_SIZE(final_map) - 2;
580 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
581 final_map[index].virt = gd->arch.secure_ram & ~0x3;
582 final_map[index].phys = final_map[index].virt;
583 final_map[index].size = CFG_SYS_MEM_RESERVE_SECURE;
584 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
585 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
586 tlb_addr_save = gd->arch.tlb_addr;
588 /* Use allocated (board_f.c) memory for TLB */
589 tlb_addr_save = gd->arch.tlb_allocated;
590 gd->arch.tlb_addr = tlb_addr_save;
595 /* Reset the fill ptr */
596 gd->arch.tlb_fillptr = tlb_addr_save;
598 /* Create normal system page tables */
601 /* Create emergency page tables */
602 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
603 gd->arch.tlb_emerg = gd->arch.tlb_addr;
605 gd->arch.tlb_addr = tlb_addr_save;
607 /* Disable cache and MMU */
608 dcache_disable(); /* TLBs are invalidated */
609 invalidate_icache_all();
611 /* point TTBR to the new table */
612 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
615 set_sctlr(get_sctlr() | CR_M);
618 u64 get_page_table_size(void)
623 int arch_cpu_init(void)
626 * This function is called before U-Boot relocates itself to speed up
627 * on system running. It is not necessary to run if performance is not
628 * critical. Skip if MMU is already enabled by SPL or other means.
630 if (get_sctlr() & CR_M)
634 __asm_invalidate_dcache_all();
635 __asm_invalidate_tlb_all();
637 set_sctlr(get_sctlr() | CR_C);
647 * This function is called from common/board_r.c.
648 * It recreates MMU table in main memory.
650 void enable_caches(void)
653 __asm_invalidate_tlb_all();
657 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
659 #ifdef CONFIG_TFABOOT
660 enum boot_src __get_boot_src(u32 porsr1)
662 enum boot_src src = BOOT_SOURCE_RESERVED;
663 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
664 #if !defined(CONFIG_NXP_LSCH3_2)
667 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
669 #if defined(CONFIG_FSL_LSCH3)
670 #if defined(CONFIG_NXP_LSCH3_2)
672 case RCW_SRC_SDHC1_VAL:
673 src = BOOT_SOURCE_SD_MMC;
675 case RCW_SRC_SDHC2_VAL:
676 src = BOOT_SOURCE_SD_MMC2;
678 case RCW_SRC_I2C1_VAL:
679 src = BOOT_SOURCE_I2C1_EXTENDED;
681 case RCW_SRC_FLEXSPI_NAND2K_VAL:
682 src = BOOT_SOURCE_XSPI_NAND;
684 case RCW_SRC_FLEXSPI_NAND4K_VAL:
685 src = BOOT_SOURCE_XSPI_NAND;
687 case RCW_SRC_RESERVED_1_VAL:
688 src = BOOT_SOURCE_RESERVED;
690 case RCW_SRC_FLEXSPI_NOR_24B:
691 src = BOOT_SOURCE_XSPI_NOR;
694 src = BOOT_SOURCE_RESERVED;
697 val = rcw_src & RCW_SRC_TYPE_MASK;
698 if (val == RCW_SRC_NOR_VAL) {
699 val = rcw_src & NOR_TYPE_MASK;
704 src = BOOT_SOURCE_IFC_NOR;
707 src = BOOT_SOURCE_RESERVED;
710 /* RCW SRC Serial Flash */
711 val = rcw_src & RCW_SRC_SERIAL_MASK;
713 case RCW_SRC_QSPI_VAL:
714 /* RCW SRC Serial NOR (QSPI) */
715 src = BOOT_SOURCE_QSPI_NOR;
717 case RCW_SRC_SD_CARD_VAL:
718 /* RCW SRC SD Card */
719 src = BOOT_SOURCE_SD_MMC;
721 case RCW_SRC_EMMC_VAL:
723 src = BOOT_SOURCE_SD_MMC;
725 case RCW_SRC_I2C1_VAL:
726 /* RCW SRC I2C1 Extended */
727 src = BOOT_SOURCE_I2C1_EXTENDED;
730 src = BOOT_SOURCE_RESERVED;
734 #elif defined(CONFIG_FSL_LSCH2)
736 val = rcw_src & RCW_SRC_NAND_MASK;
737 if (val == RCW_SRC_NAND_VAL) {
738 val = rcw_src & NAND_RESERVED_MASK;
739 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
740 src = BOOT_SOURCE_IFC_NAND;
744 val = rcw_src & RCW_SRC_NOR_MASK;
745 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
746 src = BOOT_SOURCE_IFC_NOR;
751 src = BOOT_SOURCE_QSPI_NOR;
754 src = BOOT_SOURCE_SD_MMC;
757 src = BOOT_SOURCE_RESERVED;
763 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_A010539) && !rcw_src)
764 src = BOOT_SOURCE_QSPI_NOR;
766 debug("%s: src 0x%x\n", __func__, src);
770 enum boot_src get_boot_src(void)
772 struct arm_smccc_res res;
775 #if defined(CONFIG_FSL_LSCH3)
776 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
777 #elif defined(CONFIG_FSL_LSCH2)
778 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
781 if (current_el() == 2) {
782 arm_smccc_smc(SIP_SVC_RCW, 0, 0, 0, 0, 0, 0, 0, &res);
787 if (current_el() == 3 || !porsr1) {
788 #ifdef CONFIG_FSL_LSCH3
789 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
790 #elif defined(CONFIG_FSL_LSCH2)
791 porsr1 = in_be32(&gur->porsr1);
795 debug("%s: porsr1 0x%x\n", __func__, porsr1);
797 return __get_boot_src(porsr1);
800 #ifdef CONFIG_ENV_IS_IN_MMC
801 int mmc_get_env_dev(void)
803 enum boot_src src = get_boot_src();
804 int dev = CONFIG_SYS_MMC_ENV_DEV;
807 case BOOT_SOURCE_SD_MMC:
810 case BOOT_SOURCE_SD_MMC2:
821 enum env_location arch_env_get_location(enum env_operation op, int prio)
823 enum boot_src src = get_boot_src();
824 enum env_location env_loc = ENVL_NOWHERE;
829 #ifdef CONFIG_ENV_IS_NOWHERE
834 case BOOT_SOURCE_IFC_NOR:
835 env_loc = ENVL_FLASH;
837 case BOOT_SOURCE_QSPI_NOR:
839 case BOOT_SOURCE_XSPI_NOR:
840 env_loc = ENVL_SPI_FLASH;
842 case BOOT_SOURCE_IFC_NAND:
844 case BOOT_SOURCE_QSPI_NAND:
846 case BOOT_SOURCE_XSPI_NAND:
849 case BOOT_SOURCE_SD_MMC:
851 case BOOT_SOURCE_SD_MMC2:
854 case BOOT_SOURCE_I2C1_EXTENDED:
862 #endif /* CONFIG_TFABOOT */
864 u32 initiator_type(u32 cluster, int init_id)
866 struct ccsr_gur *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
867 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
870 type = gur_in32(&gur->tp_ityp[idx]);
871 if (type & TP_ITYP_AV)
877 u32 cpu_pos_mask(void)
879 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
881 u32 cluster, type, mask = 0;
886 cluster = gur_in32(&gur->tp_cluster[i].lower);
887 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
888 type = initiator_type(cluster, j);
889 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
890 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
893 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
900 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
901 int i = 0, count = 0;
902 u32 cluster, type, mask = 0;
907 cluster = gur_in32(&gur->tp_cluster[i].lower);
908 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
909 type = initiator_type(cluster, j);
911 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
917 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
923 * Return the number of cores on this SOC.
925 int cpu_numcores(void)
927 return hweight32(cpu_mask());
930 int fsl_qoriq_core_to_cluster(unsigned int core)
932 struct ccsr_gur __iomem *gur =
933 (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR);
934 int i = 0, count = 0;
940 cluster = gur_in32(&gur->tp_cluster[i].lower);
941 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
942 if (initiator_type(cluster, j)) {
949 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
951 return -1; /* cannot identify the cluster */
954 u32 fsl_qoriq_core_to_type(unsigned int core)
956 struct ccsr_gur __iomem *gur =
957 (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR);
958 int i = 0, count = 0;
964 cluster = gur_in32(&gur->tp_cluster[i].lower);
965 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
966 type = initiator_type(cluster, j);
974 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
976 return -1; /* cannot identify the cluster */
979 #ifndef CONFIG_FSL_LSCH3
982 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
984 return gur_in32(&gur->svr);
988 #ifdef CONFIG_DISPLAY_CPUINFO
989 int print_cpuinfo(void)
991 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
992 struct sys_info sysinfo;
994 unsigned int i, core;
995 u32 type, rcw, svr = gur_in32(&gur->svr);
1000 printf(" %s (0x%x)\n", buf, svr);
1001 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
1002 get_sys_info(&sysinfo);
1003 puts("Clock Configuration:");
1004 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
1007 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
1008 printf("CPU%d(%s):%-4s MHz ", core,
1009 type == TY_ITYP_VER_A7 ? "A7 " :
1010 (type == TY_ITYP_VER_A53 ? "A53" :
1011 (type == TY_ITYP_VER_A57 ? "A57" :
1012 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
1013 strmhz(buf, sysinfo.freq_processor[core]));
1015 /* Display platform clock as Bus frequency. */
1016 printf("\n Bus: %-4s MHz ",
1017 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1018 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1019 #ifdef CONFIG_SYS_DPAA_FMAN
1020 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1022 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1023 if (soc_has_dp_ddr()) {
1024 printf(" DP-DDR: %-4s MT/s",
1025 strmhz(buf, sysinfo.freq_ddrbus2));
1031 * Display the RCW, so that no one gets confused as to what RCW
1032 * we're actually using for this boot.
1034 puts("Reset Configuration Word (RCW):");
1035 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1036 rcw = gur_in32(&gur->rcwsr[i]);
1038 printf("\n %08x:", i * 4);
1039 printf(" %08x", rcw);
1047 #ifdef CONFIG_FSL_ESDHC
1048 int cpu_mmc_init(struct bd_info *bis)
1050 return fsl_esdhc_mmc_init(bis);
1054 int cpu_eth_init(struct bd_info *bis)
1058 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1059 error = fsl_mc_ldpaa_init(bis);
1064 int check_psci(void)
1066 unsigned int psci_ver;
1068 psci_ver = sec_firmware_support_psci_version();
1069 if (psci_ver == PSCI_INVALID_VER)
1075 static void config_core_prefetch(void)
1078 char buffer[HWCONFIG_BUFFER_SIZE];
1079 const char *prefetch_arg = NULL;
1080 struct arm_smccc_res res;
1084 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1089 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1093 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1095 printf("Core0 prefetch can't be disabled\n");
1099 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1100 arm_smccc_smc(SIP_PREFETCH_DISABLE_64, mask, 0, 0, 0, 0, 0, 0,
1104 printf("Prefetch disable config failed for mask ");
1106 printf("Prefetch disable config passed for mask ");
1107 printf("0x%x\n", mask);
1111 #ifdef CONFIG_PCIE_ECAM_GENERIC
1112 __weak void set_ecam_icids(void)
1117 int arch_early_init_r(void)
1119 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1122 * erratum A009635 is valid only for LS2080A SoC and
1123 * its personalitiesi
1125 svr_dev_id = get_svr();
1126 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1129 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1130 erratum_a009942_check_cpo();
1133 debug("PSCI: PSCI does not exist.\n");
1135 /* if PSCI does not exist, boot secondary cores here */
1136 if (fsl_layerscape_wake_seconday_cores())
1137 printf("Did not wake secondary cores\n");
1140 config_core_prefetch();
1142 #ifdef CONFIG_SYS_HAS_SERDES
1145 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1146 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1147 * configured via both serdes(sgmii, 10gbase-r, xlaui etc) bits and via
1148 * EC*_PMUX(rgmii) bits in RCW.
1149 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1150 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1151 * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
1152 * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
1153 * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
1154 * RGMII, then the dpmac is RGMII and not SGMII.
1156 * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
1157 * function of SOC, the dpmac will be enabled as RGMII even if it was
1158 * also enabled before as SGMII. If ECx_PMUX is not configured for
1159 * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
1163 #ifdef CONFIG_FMAN_ENET
1164 #ifndef CONFIG_DM_ETH
1168 #ifdef CONFIG_SYS_DPAA_QBMAN
1169 setup_qbman_portals();
1171 #ifdef CONFIG_PCIE_ECAM_GENERIC
1177 int timer_init(void)
1179 u32 __iomem *cntcr = (u32 *)CFG_SYS_FSL_TIMER_ADDR;
1180 #ifdef CONFIG_FSL_LSCH3
1181 u32 __iomem *cltbenr = (u32 *)CFG_SYS_FSL_PMU_CLTBENR;
1183 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1184 defined(CONFIG_ARCH_LS1028A)
1185 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1188 #ifdef COUNTER_FREQUENCY_REAL
1189 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1191 /* Update with accurate clock frequency */
1192 if (current_el() == 3)
1193 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1196 #ifdef CONFIG_FSL_LSCH3
1197 /* Enable timebase for all clusters.
1198 * It is safe to do so even some clusters are not enabled.
1200 out_le32(cltbenr, 0xf);
1203 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1204 defined(CONFIG_ARCH_LS1028A)
1206 * In certain Layerscape SoCs, the clock for each core's
1207 * has an enable bit in the PMU Physical Core Time Base Enable
1208 * Register (PCTBENR), which allows the watchdog to operate.
1210 setbits_le32(pctbenr, 0xff);
1212 * For LS2080A SoC and its personalities, timer controller
1213 * offset is different
1215 svr_dev_id = get_svr();
1216 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1217 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1221 /* Enable clock for timer
1222 * This is a global setting.
1224 out_le32(cntcr, 0x1);
1229 #if !CONFIG_IS_ENABLED(SYSRESET)
1230 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CFG_SYS_FSL_RST_ADDR;
1232 void __efi_runtime reset_cpu(void)
1234 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
1235 /* clear the RST_REQ_MSK and SW_RST_REQ */
1236 out_le32(rstcr, 0x0);
1238 /* initiate the sw reset request */
1239 out_le32(rstcr, 0x1);
1243 /* Raise RESET_REQ_B */
1244 val = scfg_in32(rstcr);
1246 scfg_out32(rstcr, val);
1251 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
1253 void __efi_runtime EFIAPI efi_reset_system(
1254 enum efi_reset_type reset_type,
1255 efi_status_t reset_status,
1256 unsigned long data_size, void *reset_data)
1258 switch (reset_type) {
1259 case EFI_RESET_COLD:
1260 case EFI_RESET_WARM:
1261 case EFI_RESET_PLATFORM_SPECIFIC:
1264 case EFI_RESET_SHUTDOWN:
1265 /* Nothing we can do */
1272 efi_status_t efi_reset_system_init(void)
1274 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1280 * Calculate reserved memory with given memory bank
1281 * Return aligned memory size on success
1282 * Return (ram_size + needed size) for failure
1284 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1286 phys_size_t ram_top = ram_size;
1288 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1289 ram_top = mc_get_dram_block_size();
1290 if (ram_top > ram_size)
1291 return ram_size + ram_top;
1293 ram_top = ram_size - ram_top;
1294 /* The start address of MC reserved memory needs to be aligned. */
1295 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1298 return ram_size - ram_top;
1301 phys_size_t get_effective_memsize(void)
1303 phys_size_t ea_size, rem = 0;
1306 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1307 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1308 * allocated from first region. If the memory extends to the second
1309 * region (or the third region if applicable), Management Complex (MC)
1310 * memory should be put into the highest region, i.e. the end of DDR
1311 * memory. CFG_MAX_MEM_MAPPED is set to the size of first region so
1312 * U-Boot doesn't relocate itself into higher address. Should DDR be
1313 * configured to skip the first region, this function needs to be
1316 if (gd->ram_size > CFG_MAX_MEM_MAPPED) {
1317 ea_size = CFG_MAX_MEM_MAPPED;
1318 rem = gd->ram_size - ea_size;
1320 ea_size = gd->ram_size;
1323 #ifdef CFG_SYS_MEM_RESERVE_SECURE
1324 /* Check if we have enough space for secure memory */
1325 if (ea_size > CFG_SYS_MEM_RESERVE_SECURE)
1326 ea_size -= CFG_SYS_MEM_RESERVE_SECURE;
1328 printf("Error: No enough space for secure memory.\n");
1330 /* Check if we have enough memory for MC */
1331 if (rem < board_reserve_ram_top(rem)) {
1332 /* Not enough memory in high region to reserve */
1333 if (ea_size > board_reserve_ram_top(ea_size))
1334 ea_size -= board_reserve_ram_top(ea_size);
1336 printf("Error: No enough space for reserved memory.\n");
1342 #ifdef CONFIG_TFABOOT
1343 phys_size_t tfa_get_dram_size(void)
1345 struct arm_smccc_res res;
1347 arm_smccc_smc(SMC_DRAM_BANK_INFO, -1, 0, 0, 0, 0, 0, 0, &res);
1354 static int tfa_dram_init_banksize(void)
1357 phys_size_t dram_size = tfa_get_dram_size();
1358 struct arm_smccc_res res;
1360 debug("dram_size %llx\n", dram_size);
1366 arm_smccc_smc(SMC_DRAM_BANK_INFO, i, 0, 0, 0, 0, 0, 0, &res);
1372 debug("bank[%d]: start %lx, size %lx\n", i, res.a1, res.a2);
1373 gd->bd->bi_dram[i].start = res.a1;
1374 gd->bd->bi_dram[i].size = res.a2;
1376 dram_size -= gd->bd->bi_dram[i].size;
1379 } while (dram_size);
1384 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1385 /* Assign memory for MC */
1386 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1387 if (gd->bd->bi_dram[2].size >=
1388 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1389 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1390 gd->bd->bi_dram[2].size -
1391 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1395 if (gd->bd->bi_dram[1].size >=
1396 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1397 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1398 gd->bd->bi_dram[1].size -
1399 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1400 } else if (gd->bd->bi_dram[0].size >
1401 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1402 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1403 gd->bd->bi_dram[0].size -
1404 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1407 #endif /* CONFIG_RESV_RAM */
1413 int dram_init_banksize(void)
1415 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1416 phys_size_t dp_ddr_size;
1419 #ifdef CONFIG_TFABOOT
1420 if (!tfa_dram_init_banksize())
1424 * gd->ram_size has the total size of DDR memory, less reserved secure
1425 * memory. The DDR extends from low region to high region(s) presuming
1426 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1427 * the location of secure memory. gd->arch.resv_ram tracks the location
1428 * of reserved memory for Management Complex (MC). Because gd->ram_size
1429 * is reduced by this function if secure memory is reserved, checking
1430 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1433 #ifdef CFG_SYS_MEM_RESERVE_SECURE
1434 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1435 debug("No need to run again, skip %s\n", __func__);
1441 gd->bd->bi_dram[0].start = CFG_SYS_SDRAM_BASE;
1442 if (gd->ram_size > CFG_SYS_DDR_BLOCK1_SIZE) {
1443 gd->bd->bi_dram[0].size = CFG_SYS_DDR_BLOCK1_SIZE;
1444 gd->bd->bi_dram[1].start = CFG_SYS_DDR_BLOCK2_BASE;
1445 gd->bd->bi_dram[1].size = gd->ram_size -
1446 CFG_SYS_DDR_BLOCK1_SIZE;
1447 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1448 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1449 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1450 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1451 CONFIG_SYS_DDR_BLOCK2_SIZE;
1452 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1456 gd->bd->bi_dram[0].size = gd->ram_size;
1458 #ifdef CFG_SYS_MEM_RESERVE_SECURE
1459 if (gd->bd->bi_dram[0].size >
1460 CFG_SYS_MEM_RESERVE_SECURE) {
1461 gd->bd->bi_dram[0].size -=
1462 CFG_SYS_MEM_RESERVE_SECURE;
1463 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1464 gd->bd->bi_dram[0].size;
1465 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1466 gd->ram_size -= CFG_SYS_MEM_RESERVE_SECURE;
1468 #endif /* CFG_SYS_MEM_RESERVE_SECURE */
1470 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1471 /* Assign memory for MC */
1472 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1473 if (gd->bd->bi_dram[2].size >=
1474 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1475 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1476 gd->bd->bi_dram[2].size -
1477 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1481 if (gd->bd->bi_dram[1].size >=
1482 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1483 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1484 gd->bd->bi_dram[1].size -
1485 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1486 } else if (gd->bd->bi_dram[0].size >
1487 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1488 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1489 gd->bd->bi_dram[0].size -
1490 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1493 #endif /* CONFIG_RESV_RAM */
1495 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1496 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1497 #error "This SoC shouldn't have DP DDR"
1499 if (soc_has_dp_ddr()) {
1500 /* initialize DP-DDR here */
1503 * DDR controller use 0 as the base address for binding.
1504 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1506 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1508 CONFIG_DP_DDR_NUM_CTRLS,
1509 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1512 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1513 gd->bd->bi_dram[2].size = dp_ddr_size;
1515 puts("Not detected");
1520 #ifdef CFG_SYS_MEM_RESERVE_SECURE
1521 debug("%s is called. gd->ram_size is reduced to %lu\n",
1522 __func__, (ulong)gd->ram_size);
1528 #if CONFIG_IS_ENABLED(EFI_LOADER)
1529 void efi_add_known_memory(void)
1532 phys_addr_t ram_start;
1533 phys_size_t ram_size;
1536 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1537 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1538 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1539 #error "This SoC shouldn't have DP DDR"
1542 continue; /* skip DP-DDR */
1544 ram_start = gd->bd->bi_dram[i].start;
1545 ram_size = gd->bd->bi_dram[i].size;
1546 #ifdef CONFIG_RESV_RAM
1547 if (gd->arch.resv_ram >= ram_start &&
1548 gd->arch.resv_ram < ram_start + ram_size)
1549 ram_size = gd->arch.resv_ram - ram_start;
1551 efi_add_memory_map(ram_start, ram_size,
1552 EFI_CONVENTIONAL_MEMORY);
1558 * Before DDR size is known, early MMU table have DDR mapped as device memory
1559 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1560 * needs to be set for these mappings.
1561 * If a special case configures DDR with holes in the mapping, the holes need
1562 * to be marked as invalid. This is not implemented in this function.
1564 void update_early_mmu_table(void)
1566 if (!gd->arch.tlb_addr)
1569 if (gd->ram_size <= CFG_SYS_FSL_DRAM_SIZE1) {
1570 mmu_change_region_attr(
1573 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1574 PTE_BLOCK_OUTER_SHARE |
1578 mmu_change_region_attr(
1580 CFG_SYS_DDR_BLOCK1_SIZE,
1581 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1582 PTE_BLOCK_OUTER_SHARE |
1585 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1586 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1587 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1589 if (gd->ram_size - CFG_SYS_DDR_BLOCK1_SIZE >
1590 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1591 mmu_change_region_attr(
1592 CFG_SYS_DDR_BLOCK2_BASE,
1593 CONFIG_SYS_DDR_BLOCK2_SIZE,
1594 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1595 PTE_BLOCK_OUTER_SHARE |
1598 mmu_change_region_attr(
1599 CONFIG_SYS_DDR_BLOCK3_BASE,
1601 CFG_SYS_DDR_BLOCK1_SIZE -
1602 CONFIG_SYS_DDR_BLOCK2_SIZE,
1603 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1604 PTE_BLOCK_OUTER_SHARE |
1610 mmu_change_region_attr(
1611 CFG_SYS_DDR_BLOCK2_BASE,
1613 CFG_SYS_DDR_BLOCK1_SIZE,
1614 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1615 PTE_BLOCK_OUTER_SHARE |
1622 __weak int dram_init(void)
1624 #ifdef CONFIG_SYS_FSL_DDR
1626 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1627 defined(CONFIG_SPL_BUILD)
1628 /* This will break-before-make MMU for DDR */
1629 update_early_mmu_table();
1636 #ifdef CONFIG_ARCH_MISC_INIT
1637 __weak int serdes_misc_init(void)
1642 int arch_misc_init(void)
1644 if (IS_ENABLED(CONFIG_FSL_CAAM)) {
1645 struct udevice *dev;
1648 ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
1650 printf("Failed to initialize caam_jr: %d\n", ret);