1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2017-2020 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
10 #include <fsl_ddr_sdram.h>
16 #include <asm/cache.h>
18 #include <asm/ptrace.h>
19 #include <linux/errno.h>
20 #include <asm/system.h>
22 #include <asm/armv8/mmu.h>
24 #include <asm/arch/fsl_serdes.h>
25 #include <asm/arch/soc.h>
26 #include <asm/arch/cpu.h>
27 #include <asm/arch/speed.h>
28 #include <fsl_immap.h>
29 #include <asm/arch/mp.h>
30 #include <efi_loader.h>
31 #include <fsl-mc/fsl_mc.h>
32 #ifdef CONFIG_FSL_ESDHC
33 #include <fsl_esdhc.h>
35 #include <asm/armv8/sec_firmware.h>
36 #ifdef CONFIG_SYS_FSL_DDR
39 #include <asm/arch/clock.h>
41 #include <fsl_qbman.h>
44 #include <env_internal.h>
45 #ifdef CONFIG_CHAIN_OF_TRUST
46 #include <fsl_validate.h>
49 #include <linux/mii.h>
51 DECLARE_GLOBAL_DATA_PTR;
53 static struct cpu_type cpu_type_list[] = {
54 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
55 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
56 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
57 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
58 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
59 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
60 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
61 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
62 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
63 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
64 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
65 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
66 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
67 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
68 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
69 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
70 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
71 CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
72 CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
73 CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
74 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
75 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
76 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
77 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
78 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
79 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
80 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
81 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
82 CPU_TYPE_ENTRY(LX2162A, LX2162A, 16),
83 CPU_TYPE_ENTRY(LX2122A, LX2122A, 12),
84 CPU_TYPE_ENTRY(LX2082A, LX2082A, 8),
87 #define EARLY_PGTABLE_SIZE 0x5000
88 static struct mm_region early_map[] = {
89 #ifdef CONFIG_FSL_LSCH3
90 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
91 CONFIG_SYS_FSL_CCSR_SIZE,
92 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
93 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
95 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
96 SYS_FSL_OCRAM_SPACE_SIZE,
97 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
99 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
100 CONFIG_SYS_FSL_QSPI_SIZE1,
101 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
102 #ifdef CONFIG_FSL_IFC
103 /* For IFC Region #1, only the first 4MB is cache-enabled */
104 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
105 CONFIG_SYS_FSL_IFC_SIZE1_1,
106 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
108 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
109 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
110 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
111 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
113 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
114 CONFIG_SYS_FSL_IFC_SIZE1,
115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
118 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
119 CONFIG_SYS_FSL_DRAM_SIZE1,
120 #if defined(CONFIG_TFABOOT) || \
121 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
122 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
123 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
124 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
126 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
128 #ifdef CONFIG_FSL_IFC
129 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
130 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
131 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
132 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
135 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
136 CONFIG_SYS_FSL_DCSR_SIZE,
137 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
138 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
140 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
141 CONFIG_SYS_FSL_DRAM_SIZE2,
142 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
143 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
145 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
146 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
147 CONFIG_SYS_FSL_DRAM_SIZE3,
148 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
149 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
152 #elif defined(CONFIG_FSL_LSCH2)
153 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
154 CONFIG_SYS_FSL_CCSR_SIZE,
155 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
156 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
158 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
159 SYS_FSL_OCRAM_SPACE_SIZE,
160 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
162 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
163 CONFIG_SYS_FSL_DCSR_SIZE,
164 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
165 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
167 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
168 CONFIG_SYS_FSL_QSPI_SIZE,
169 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
171 #ifdef CONFIG_FSL_IFC
172 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
173 CONFIG_SYS_FSL_IFC_SIZE,
174 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
177 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
178 CONFIG_SYS_FSL_DRAM_SIZE1,
179 #if defined(CONFIG_TFABOOT) || \
180 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
181 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
182 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
183 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
185 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
187 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
188 CONFIG_SYS_FSL_DRAM_SIZE2,
189 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
190 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
193 {}, /* list terminator */
196 static struct mm_region final_map[] = {
197 #ifdef CONFIG_FSL_LSCH3
198 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
199 CONFIG_SYS_FSL_CCSR_SIZE,
200 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
201 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
203 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
204 SYS_FSL_OCRAM_SPACE_SIZE,
205 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
207 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
208 CONFIG_SYS_FSL_DRAM_SIZE1,
209 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
210 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
212 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
213 CONFIG_SYS_FSL_QSPI_SIZE1,
214 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
215 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
217 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
218 CONFIG_SYS_FSL_QSPI_SIZE2,
219 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
220 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
222 #ifdef CONFIG_FSL_IFC
223 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
224 CONFIG_SYS_FSL_IFC_SIZE2,
225 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
226 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
229 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
230 CONFIG_SYS_FSL_DCSR_SIZE,
231 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
232 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
234 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
235 CONFIG_SYS_FSL_MC_SIZE,
236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
239 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
240 CONFIG_SYS_FSL_NI_SIZE,
241 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
242 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
244 /* For QBMAN portal, only the first 64MB is cache-enabled */
245 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
246 CONFIG_SYS_FSL_QBMAN_SIZE_1,
247 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
248 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
250 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
251 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
252 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
253 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
254 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
256 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
257 CONFIG_SYS_PCIE1_PHYS_SIZE,
258 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
259 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
261 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
262 CONFIG_SYS_PCIE2_PHYS_SIZE,
263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
266 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
267 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
268 CONFIG_SYS_PCIE3_PHYS_SIZE,
269 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
270 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
273 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
274 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
275 CONFIG_SYS_PCIE4_PHYS_SIZE,
276 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
277 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
280 #ifdef SYS_PCIE5_PHYS_ADDR
281 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
283 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
284 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
287 #ifdef SYS_PCIE6_PHYS_ADDR
288 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
290 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
291 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
294 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
295 CONFIG_SYS_FSL_WRIOP1_SIZE,
296 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
297 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
299 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
300 CONFIG_SYS_FSL_AIOP1_SIZE,
301 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
302 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
304 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
305 CONFIG_SYS_FSL_PEBUF_SIZE,
306 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
307 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
309 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
310 CONFIG_SYS_FSL_DRAM_SIZE2,
311 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
312 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
314 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
315 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
316 CONFIG_SYS_FSL_DRAM_SIZE3,
317 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
318 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
321 #elif defined(CONFIG_FSL_LSCH2)
322 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
323 CONFIG_SYS_FSL_BOOTROM_SIZE,
324 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
325 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
327 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
328 CONFIG_SYS_FSL_CCSR_SIZE,
329 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
330 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
332 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
333 SYS_FSL_OCRAM_SPACE_SIZE,
334 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
336 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
337 CONFIG_SYS_FSL_DCSR_SIZE,
338 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
339 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
341 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
342 CONFIG_SYS_FSL_QSPI_SIZE,
343 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
344 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
346 #ifdef CONFIG_FSL_IFC
347 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
348 CONFIG_SYS_FSL_IFC_SIZE,
349 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
352 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
353 CONFIG_SYS_FSL_DRAM_SIZE1,
354 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
355 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
357 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
358 CONFIG_SYS_FSL_QBMAN_SIZE,
359 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
360 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
362 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
363 CONFIG_SYS_FSL_DRAM_SIZE2,
364 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
365 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
367 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
368 CONFIG_SYS_PCIE1_PHYS_SIZE,
369 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
370 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
372 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
373 CONFIG_SYS_PCIE2_PHYS_SIZE,
374 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
375 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
377 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
378 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
379 CONFIG_SYS_PCIE3_PHYS_SIZE,
380 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
381 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
384 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
385 CONFIG_SYS_FSL_DRAM_SIZE3,
386 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
387 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
390 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
391 {}, /* space holder for secure mem */
396 struct mm_region *mem_map = early_map;
398 void cpu_name(char *name)
400 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
401 unsigned int i, svr, ver;
403 svr = gur_in32(&gur->svr);
404 ver = SVR_SOC_VER(svr);
406 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
407 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
408 strcpy(name, cpu_type_list[i].name);
409 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
410 if (IS_C_PROCESSOR(svr))
414 if (IS_E_PROCESSOR(svr))
417 sprintf(name + strlen(name), " Rev%d.%d",
418 SVR_MAJ(svr), SVR_MIN(svr));
422 if (i == ARRAY_SIZE(cpu_type_list))
423 strcpy(name, "unknown");
426 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
428 * To start MMU before DDR is available, we create MMU table in SRAM.
429 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
430 * levels of translation tables here to cover 40-bit address space.
431 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
432 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
433 * Note, the debug print in cache_v8.c is not usable for debugging
434 * these early MMU tables because UART is not yet available.
436 static inline void early_mmu_setup(void)
438 unsigned int el = current_el();
440 /* global data is already setup, no allocation yet */
442 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
444 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
445 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
446 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
448 /* Create early page tables */
451 /* point TTBR to the new table */
452 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
453 get_tcr(el, NULL, NULL) &
454 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
457 set_sctlr(get_sctlr() | CR_M);
460 static void fix_pcie_mmu_map(void)
462 #ifdef CONFIG_ARCH_LS2080A
465 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
467 svr = gur_in32(&gur->svr);
468 ver = SVR_SOC_VER(svr);
470 /* Fix PCIE base and size for LS2088A */
471 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
472 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
473 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
474 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
475 switch (final_map[i].phys) {
476 case CONFIG_SYS_PCIE1_PHYS_ADDR:
477 final_map[i].phys = 0x2000000000ULL;
478 final_map[i].virt = 0x2000000000ULL;
479 final_map[i].size = 0x800000000ULL;
481 case CONFIG_SYS_PCIE2_PHYS_ADDR:
482 final_map[i].phys = 0x2800000000ULL;
483 final_map[i].virt = 0x2800000000ULL;
484 final_map[i].size = 0x800000000ULL;
486 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
487 case CONFIG_SYS_PCIE3_PHYS_ADDR:
488 final_map[i].phys = 0x3000000000ULL;
489 final_map[i].virt = 0x3000000000ULL;
490 final_map[i].size = 0x800000000ULL;
493 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
494 case CONFIG_SYS_PCIE4_PHYS_ADDR:
495 final_map[i].phys = 0x3800000000ULL;
496 final_map[i].virt = 0x3800000000ULL;
497 final_map[i].size = 0x800000000ULL;
509 * The final tables look similar to early tables, but different in detail.
510 * These tables are in DRAM. Sub tables are added to enable cache for
513 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
514 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
516 static inline void final_mmu_setup(void)
518 u64 tlb_addr_save = gd->arch.tlb_addr;
519 unsigned int el = current_el();
522 /* fix the final_map before filling in the block entries */
527 /* Update mapping for DDR to actual size */
528 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
530 * Find the entry for DDR mapping and update the address and
531 * size. Zero-sized mapping will be skipped when creating MMU
534 switch (final_map[index].virt) {
535 case CONFIG_SYS_FSL_DRAM_BASE1:
536 final_map[index].virt = gd->bd->bi_dram[0].start;
537 final_map[index].phys = gd->bd->bi_dram[0].start;
538 final_map[index].size = gd->bd->bi_dram[0].size;
540 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
541 case CONFIG_SYS_FSL_DRAM_BASE2:
542 #if (CONFIG_NR_DRAM_BANKS >= 2)
543 final_map[index].virt = gd->bd->bi_dram[1].start;
544 final_map[index].phys = gd->bd->bi_dram[1].start;
545 final_map[index].size = gd->bd->bi_dram[1].size;
547 final_map[index].size = 0;
551 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
552 case CONFIG_SYS_FSL_DRAM_BASE3:
553 #if (CONFIG_NR_DRAM_BANKS >= 3)
554 final_map[index].virt = gd->bd->bi_dram[2].start;
555 final_map[index].phys = gd->bd->bi_dram[2].start;
556 final_map[index].size = gd->bd->bi_dram[2].size;
558 final_map[index].size = 0;
567 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
568 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
571 * Only use gd->arch.secure_ram if the address is
572 * recalculated. Align to 4KB for MMU table.
574 /* put page tables in secure ram */
575 index = ARRAY_SIZE(final_map) - 2;
576 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
577 final_map[index].virt = gd->arch.secure_ram & ~0x3;
578 final_map[index].phys = final_map[index].virt;
579 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
580 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
581 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
582 tlb_addr_save = gd->arch.tlb_addr;
584 /* Use allocated (board_f.c) memory for TLB */
585 tlb_addr_save = gd->arch.tlb_allocated;
586 gd->arch.tlb_addr = tlb_addr_save;
591 /* Reset the fill ptr */
592 gd->arch.tlb_fillptr = tlb_addr_save;
594 /* Create normal system page tables */
597 /* Create emergency page tables */
598 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
599 gd->arch.tlb_emerg = gd->arch.tlb_addr;
601 gd->arch.tlb_addr = tlb_addr_save;
603 /* Disable cache and MMU */
604 dcache_disable(); /* TLBs are invalidated */
605 invalidate_icache_all();
607 /* point TTBR to the new table */
608 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
611 set_sctlr(get_sctlr() | CR_M);
614 u64 get_page_table_size(void)
619 int arch_cpu_init(void)
622 * This function is called before U-Boot relocates itself to speed up
623 * on system running. It is not necessary to run if performance is not
624 * critical. Skip if MMU is already enabled by SPL or other means.
626 if (get_sctlr() & CR_M)
630 __asm_invalidate_dcache_all();
631 __asm_invalidate_tlb_all();
633 set_sctlr(get_sctlr() | CR_C);
643 * This function is called from common/board_r.c.
644 * It recreates MMU table in main memory.
646 void enable_caches(void)
649 __asm_invalidate_tlb_all();
653 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
655 #ifdef CONFIG_TFABOOT
656 enum boot_src __get_boot_src(u32 porsr1)
658 enum boot_src src = BOOT_SOURCE_RESERVED;
659 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
660 #if !defined(CONFIG_NXP_LSCH3_2)
663 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
665 #if defined(CONFIG_FSL_LSCH3)
666 #if defined(CONFIG_NXP_LSCH3_2)
668 case RCW_SRC_SDHC1_VAL:
669 src = BOOT_SOURCE_SD_MMC;
671 case RCW_SRC_SDHC2_VAL:
672 src = BOOT_SOURCE_SD_MMC2;
674 case RCW_SRC_I2C1_VAL:
675 src = BOOT_SOURCE_I2C1_EXTENDED;
677 case RCW_SRC_FLEXSPI_NAND2K_VAL:
678 src = BOOT_SOURCE_XSPI_NAND;
680 case RCW_SRC_FLEXSPI_NAND4K_VAL:
681 src = BOOT_SOURCE_XSPI_NAND;
683 case RCW_SRC_RESERVED_1_VAL:
684 src = BOOT_SOURCE_RESERVED;
686 case RCW_SRC_FLEXSPI_NOR_24B:
687 src = BOOT_SOURCE_XSPI_NOR;
690 src = BOOT_SOURCE_RESERVED;
693 val = rcw_src & RCW_SRC_TYPE_MASK;
694 if (val == RCW_SRC_NOR_VAL) {
695 val = rcw_src & NOR_TYPE_MASK;
700 src = BOOT_SOURCE_IFC_NOR;
703 src = BOOT_SOURCE_RESERVED;
706 /* RCW SRC Serial Flash */
707 val = rcw_src & RCW_SRC_SERIAL_MASK;
709 case RCW_SRC_QSPI_VAL:
710 /* RCW SRC Serial NOR (QSPI) */
711 src = BOOT_SOURCE_QSPI_NOR;
713 case RCW_SRC_SD_CARD_VAL:
714 /* RCW SRC SD Card */
715 src = BOOT_SOURCE_SD_MMC;
717 case RCW_SRC_EMMC_VAL:
719 src = BOOT_SOURCE_SD_MMC;
721 case RCW_SRC_I2C1_VAL:
722 /* RCW SRC I2C1 Extended */
723 src = BOOT_SOURCE_I2C1_EXTENDED;
726 src = BOOT_SOURCE_RESERVED;
730 #elif defined(CONFIG_FSL_LSCH2)
732 val = rcw_src & RCW_SRC_NAND_MASK;
733 if (val == RCW_SRC_NAND_VAL) {
734 val = rcw_src & NAND_RESERVED_MASK;
735 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
736 src = BOOT_SOURCE_IFC_NAND;
740 val = rcw_src & RCW_SRC_NOR_MASK;
741 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
742 src = BOOT_SOURCE_IFC_NOR;
747 src = BOOT_SOURCE_QSPI_NOR;
750 src = BOOT_SOURCE_SD_MMC;
753 src = BOOT_SOURCE_RESERVED;
759 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
760 src = BOOT_SOURCE_QSPI_NOR;
762 debug("%s: src 0x%x\n", __func__, src);
766 enum boot_src get_boot_src(void)
771 #if defined(CONFIG_FSL_LSCH3)
772 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
773 #elif defined(CONFIG_FSL_LSCH2)
774 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
777 if (current_el() == 2) {
778 regs.regs[0] = SIP_SVC_RCW;
782 porsr1 = regs.regs[1];
785 if (current_el() == 3 || !porsr1) {
786 #ifdef CONFIG_FSL_LSCH3
787 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
788 #elif defined(CONFIG_FSL_LSCH2)
789 porsr1 = in_be32(&gur->porsr1);
793 debug("%s: porsr1 0x%x\n", __func__, porsr1);
795 return __get_boot_src(porsr1);
798 #ifdef CONFIG_ENV_IS_IN_MMC
799 int mmc_get_env_dev(void)
801 enum boot_src src = get_boot_src();
802 int dev = CONFIG_SYS_MMC_ENV_DEV;
805 case BOOT_SOURCE_SD_MMC:
808 case BOOT_SOURCE_SD_MMC2:
819 enum env_location env_get_location(enum env_operation op, int prio)
821 enum boot_src src = get_boot_src();
822 enum env_location env_loc = ENVL_NOWHERE;
827 #ifdef CONFIG_ENV_IS_NOWHERE
832 case BOOT_SOURCE_IFC_NOR:
833 env_loc = ENVL_FLASH;
835 case BOOT_SOURCE_QSPI_NOR:
837 case BOOT_SOURCE_XSPI_NOR:
838 env_loc = ENVL_SPI_FLASH;
840 case BOOT_SOURCE_IFC_NAND:
842 case BOOT_SOURCE_QSPI_NAND:
844 case BOOT_SOURCE_XSPI_NAND:
847 case BOOT_SOURCE_SD_MMC:
849 case BOOT_SOURCE_SD_MMC2:
852 case BOOT_SOURCE_I2C1_EXTENDED:
860 #endif /* CONFIG_TFABOOT */
862 u32 initiator_type(u32 cluster, int init_id)
864 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
865 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
868 type = gur_in32(&gur->tp_ityp[idx]);
869 if (type & TP_ITYP_AV)
875 u32 cpu_pos_mask(void)
877 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
879 u32 cluster, type, mask = 0;
884 cluster = gur_in32(&gur->tp_cluster[i].lower);
885 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
886 type = initiator_type(cluster, j);
887 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
888 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
891 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
898 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
899 int i = 0, count = 0;
900 u32 cluster, type, mask = 0;
905 cluster = gur_in32(&gur->tp_cluster[i].lower);
906 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
907 type = initiator_type(cluster, j);
909 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
915 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
921 * Return the number of cores on this SOC.
923 int cpu_numcores(void)
925 return hweight32(cpu_mask());
928 int fsl_qoriq_core_to_cluster(unsigned int core)
930 struct ccsr_gur __iomem *gur =
931 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
932 int i = 0, count = 0;
938 cluster = gur_in32(&gur->tp_cluster[i].lower);
939 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
940 if (initiator_type(cluster, j)) {
947 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
949 return -1; /* cannot identify the cluster */
952 u32 fsl_qoriq_core_to_type(unsigned int core)
954 struct ccsr_gur __iomem *gur =
955 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
956 int i = 0, count = 0;
962 cluster = gur_in32(&gur->tp_cluster[i].lower);
963 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
964 type = initiator_type(cluster, j);
972 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
974 return -1; /* cannot identify the cluster */
977 #ifndef CONFIG_FSL_LSCH3
980 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
982 return gur_in32(&gur->svr);
986 #ifdef CONFIG_DISPLAY_CPUINFO
987 int print_cpuinfo(void)
989 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
990 struct sys_info sysinfo;
992 unsigned int i, core;
993 u32 type, rcw, svr = gur_in32(&gur->svr);
998 printf(" %s (0x%x)\n", buf, svr);
999 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
1000 get_sys_info(&sysinfo);
1001 puts("Clock Configuration:");
1002 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
1005 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
1006 printf("CPU%d(%s):%-4s MHz ", core,
1007 type == TY_ITYP_VER_A7 ? "A7 " :
1008 (type == TY_ITYP_VER_A53 ? "A53" :
1009 (type == TY_ITYP_VER_A57 ? "A57" :
1010 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
1011 strmhz(buf, sysinfo.freq_processor[core]));
1013 /* Display platform clock as Bus frequency. */
1014 printf("\n Bus: %-4s MHz ",
1015 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1016 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1017 #ifdef CONFIG_SYS_DPAA_FMAN
1018 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1020 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1021 if (soc_has_dp_ddr()) {
1022 printf(" DP-DDR: %-4s MT/s",
1023 strmhz(buf, sysinfo.freq_ddrbus2));
1029 * Display the RCW, so that no one gets confused as to what RCW
1030 * we're actually using for this boot.
1032 puts("Reset Configuration Word (RCW):");
1033 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1034 rcw = gur_in32(&gur->rcwsr[i]);
1036 printf("\n %08x:", i * 4);
1037 printf(" %08x", rcw);
1045 #ifdef CONFIG_FSL_ESDHC
1046 int cpu_mmc_init(struct bd_info *bis)
1048 return fsl_esdhc_mmc_init(bis);
1052 int cpu_eth_init(struct bd_info *bis)
1056 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1057 error = fsl_mc_ldpaa_init(bis);
1059 #ifdef CONFIG_FMAN_ENET
1060 fm_standard_init(bis);
1065 static inline int check_psci(void)
1067 unsigned int psci_ver;
1069 psci_ver = sec_firmware_support_psci_version();
1070 if (psci_ver == PSCI_INVALID_VER)
1076 static void config_core_prefetch(void)
1079 char buffer[HWCONFIG_BUFFER_SIZE];
1080 const char *prefetch_arg = NULL;
1083 struct pt_regs regs;
1085 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1090 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1094 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1096 printf("Core0 prefetch can't be disabled\n");
1100 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1101 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1102 regs.regs[1] = mask;
1106 printf("Prefetch disable config failed for mask ");
1108 printf("Prefetch disable config passed for mask ");
1109 printf("0x%x\n", mask);
1113 #ifdef CONFIG_PCIE_ECAM_GENERIC
1114 __weak void set_ecam_icids(void)
1119 int arch_early_init_r(void)
1121 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1124 * erratum A009635 is valid only for LS2080A SoC and
1125 * its personalitiesi
1127 svr_dev_id = get_svr();
1128 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1131 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1132 erratum_a009942_check_cpo();
1135 debug("PSCI: PSCI does not exist.\n");
1137 /* if PSCI does not exist, boot secondary cores here */
1138 if (fsl_layerscape_wake_seconday_cores())
1139 printf("Did not wake secondary cores\n");
1142 config_core_prefetch();
1144 #ifdef CONFIG_SYS_HAS_SERDES
1147 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1148 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1149 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
1150 * EC*_PMUX(rgmii) bits in RCW.
1151 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1152 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1153 * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
1154 * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
1155 * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
1156 * RGMII, then the dpmac is RGMII and not SGMII.
1158 * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
1159 * function of SOC, the dpmac will be enabled as RGMII even if it was
1160 * also enabled before as SGMII. If ECx_PMUX is not configured for
1161 * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
1165 #ifdef CONFIG_FMAN_ENET
1166 #ifndef CONFIG_DM_ETH
1170 #ifdef CONFIG_SYS_DPAA_QBMAN
1171 setup_qbman_portals();
1173 #ifdef CONFIG_PCIE_ECAM_GENERIC
1179 int timer_init(void)
1181 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1182 #ifdef CONFIG_FSL_LSCH3
1183 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1185 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1186 defined(CONFIG_ARCH_LS1028A)
1187 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1190 #ifdef COUNTER_FREQUENCY_REAL
1191 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1193 /* Update with accurate clock frequency */
1194 if (current_el() == 3)
1195 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1198 #ifdef CONFIG_FSL_LSCH3
1199 /* Enable timebase for all clusters.
1200 * It is safe to do so even some clusters are not enabled.
1202 out_le32(cltbenr, 0xf);
1205 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1206 defined(CONFIG_ARCH_LS1028A)
1208 * In certain Layerscape SoCs, the clock for each core's
1209 * has an enable bit in the PMU Physical Core Time Base Enable
1210 * Register (PCTBENR), which allows the watchdog to operate.
1212 setbits_le32(pctbenr, 0xff);
1214 * For LS2080A SoC and its personalities, timer controller
1215 * offset is different
1217 svr_dev_id = get_svr();
1218 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1219 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1223 /* Enable clock for timer
1224 * This is a global setting.
1226 out_le32(cntcr, 0x1);
1231 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1233 void __efi_runtime reset_cpu(ulong addr)
1235 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
1236 /* clear the RST_REQ_MSK and SW_RST_REQ */
1237 out_le32(rstcr, 0x0);
1239 /* initiate the sw reset request */
1240 out_le32(rstcr, 0x1);
1244 /* Raise RESET_REQ_B */
1245 val = scfg_in32(rstcr);
1247 scfg_out32(rstcr, val);
1251 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
1253 void __efi_runtime EFIAPI efi_reset_system(
1254 enum efi_reset_type reset_type,
1255 efi_status_t reset_status,
1256 unsigned long data_size, void *reset_data)
1258 switch (reset_type) {
1259 case EFI_RESET_COLD:
1260 case EFI_RESET_WARM:
1261 case EFI_RESET_PLATFORM_SPECIFIC:
1264 case EFI_RESET_SHUTDOWN:
1265 /* Nothing we can do */
1272 efi_status_t efi_reset_system_init(void)
1274 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1280 * Calculate reserved memory with given memory bank
1281 * Return aligned memory size on success
1282 * Return (ram_size + needed size) for failure
1284 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1286 phys_size_t ram_top = ram_size;
1288 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1289 ram_top = mc_get_dram_block_size();
1290 if (ram_top > ram_size)
1291 return ram_size + ram_top;
1293 ram_top = ram_size - ram_top;
1294 /* The start address of MC reserved memory needs to be aligned. */
1295 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1298 return ram_size - ram_top;
1301 phys_size_t get_effective_memsize(void)
1303 phys_size_t ea_size, rem = 0;
1306 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1307 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1308 * allocated from first region. If the memory extends to the second
1309 * region (or the third region if applicable), Management Complex (MC)
1310 * memory should be put into the highest region, i.e. the end of DDR
1311 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1312 * U-Boot doesn't relocate itself into higher address. Should DDR be
1313 * configured to skip the first region, this function needs to be
1316 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1317 ea_size = CONFIG_MAX_MEM_MAPPED;
1318 rem = gd->ram_size - ea_size;
1320 ea_size = gd->ram_size;
1323 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1324 /* Check if we have enough space for secure memory */
1325 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1326 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1328 printf("Error: No enough space for secure memory.\n");
1330 /* Check if we have enough memory for MC */
1331 if (rem < board_reserve_ram_top(rem)) {
1332 /* Not enough memory in high region to reserve */
1333 if (ea_size > board_reserve_ram_top(ea_size))
1334 ea_size -= board_reserve_ram_top(ea_size);
1336 printf("Error: No enough space for reserved memory.\n");
1342 #ifdef CONFIG_TFABOOT
1343 phys_size_t tfa_get_dram_size(void)
1345 struct pt_regs regs;
1346 phys_size_t dram_size = 0;
1348 regs.regs[0] = SMC_DRAM_BANK_INFO;
1355 dram_size = regs.regs[1];
1359 static int tfa_dram_init_banksize(void)
1362 struct pt_regs regs;
1363 phys_size_t dram_size = tfa_get_dram_size();
1365 debug("dram_size %llx\n", dram_size);
1371 regs.regs[0] = SMC_DRAM_BANK_INFO;
1380 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1382 gd->bd->bi_dram[i].start = regs.regs[1];
1383 gd->bd->bi_dram[i].size = regs.regs[2];
1385 dram_size -= gd->bd->bi_dram[i].size;
1388 } while (dram_size);
1393 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1394 /* Assign memory for MC */
1395 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1396 if (gd->bd->bi_dram[2].size >=
1397 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1398 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1399 gd->bd->bi_dram[2].size -
1400 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1404 if (gd->bd->bi_dram[1].size >=
1405 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1406 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1407 gd->bd->bi_dram[1].size -
1408 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1409 } else if (gd->bd->bi_dram[0].size >
1410 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1411 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1412 gd->bd->bi_dram[0].size -
1413 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1416 #endif /* CONFIG_RESV_RAM */
1422 int dram_init_banksize(void)
1424 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1425 phys_size_t dp_ddr_size;
1428 #ifdef CONFIG_TFABOOT
1429 if (!tfa_dram_init_banksize())
1433 * gd->ram_size has the total size of DDR memory, less reserved secure
1434 * memory. The DDR extends from low region to high region(s) presuming
1435 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1436 * the location of secure memory. gd->arch.resv_ram tracks the location
1437 * of reserved memory for Management Complex (MC). Because gd->ram_size
1438 * is reduced by this function if secure memory is reserved, checking
1439 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1442 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1443 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1444 debug("No need to run again, skip %s\n", __func__);
1450 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1451 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1452 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1453 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1454 gd->bd->bi_dram[1].size = gd->ram_size -
1455 CONFIG_SYS_DDR_BLOCK1_SIZE;
1456 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1457 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1458 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1459 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1460 CONFIG_SYS_DDR_BLOCK2_SIZE;
1461 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1465 gd->bd->bi_dram[0].size = gd->ram_size;
1467 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1468 if (gd->bd->bi_dram[0].size >
1469 CONFIG_SYS_MEM_RESERVE_SECURE) {
1470 gd->bd->bi_dram[0].size -=
1471 CONFIG_SYS_MEM_RESERVE_SECURE;
1472 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1473 gd->bd->bi_dram[0].size;
1474 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1475 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1477 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1479 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1480 /* Assign memory for MC */
1481 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1482 if (gd->bd->bi_dram[2].size >=
1483 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1484 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1485 gd->bd->bi_dram[2].size -
1486 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1490 if (gd->bd->bi_dram[1].size >=
1491 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1492 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1493 gd->bd->bi_dram[1].size -
1494 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1495 } else if (gd->bd->bi_dram[0].size >
1496 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1497 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1498 gd->bd->bi_dram[0].size -
1499 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1502 #endif /* CONFIG_RESV_RAM */
1504 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1505 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1506 #error "This SoC shouldn't have DP DDR"
1508 if (soc_has_dp_ddr()) {
1509 /* initialize DP-DDR here */
1512 * DDR controller use 0 as the base address for binding.
1513 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1515 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1517 CONFIG_DP_DDR_NUM_CTRLS,
1518 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1521 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1522 gd->bd->bi_dram[2].size = dp_ddr_size;
1524 puts("Not detected");
1529 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1530 debug("%s is called. gd->ram_size is reduced to %lu\n",
1531 __func__, (ulong)gd->ram_size);
1537 #if CONFIG_IS_ENABLED(EFI_LOADER)
1538 void efi_add_known_memory(void)
1541 phys_addr_t ram_start;
1542 phys_size_t ram_size;
1545 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1546 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1547 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1548 #error "This SoC shouldn't have DP DDR"
1551 continue; /* skip DP-DDR */
1553 ram_start = gd->bd->bi_dram[i].start;
1554 ram_size = gd->bd->bi_dram[i].size;
1555 #ifdef CONFIG_RESV_RAM
1556 if (gd->arch.resv_ram >= ram_start &&
1557 gd->arch.resv_ram < ram_start + ram_size)
1558 ram_size = gd->arch.resv_ram - ram_start;
1560 efi_add_memory_map(ram_start, ram_size,
1561 EFI_CONVENTIONAL_MEMORY);
1567 * Before DDR size is known, early MMU table have DDR mapped as device memory
1568 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1569 * needs to be set for these mappings.
1570 * If a special case configures DDR with holes in the mapping, the holes need
1571 * to be marked as invalid. This is not implemented in this function.
1573 void update_early_mmu_table(void)
1575 if (!gd->arch.tlb_addr)
1578 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1579 mmu_change_region_attr(
1580 CONFIG_SYS_SDRAM_BASE,
1582 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1583 PTE_BLOCK_OUTER_SHARE |
1587 mmu_change_region_attr(
1588 CONFIG_SYS_SDRAM_BASE,
1589 CONFIG_SYS_DDR_BLOCK1_SIZE,
1590 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1591 PTE_BLOCK_OUTER_SHARE |
1594 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1595 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1596 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1598 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1599 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1600 mmu_change_region_attr(
1601 CONFIG_SYS_DDR_BLOCK2_BASE,
1602 CONFIG_SYS_DDR_BLOCK2_SIZE,
1603 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1604 PTE_BLOCK_OUTER_SHARE |
1607 mmu_change_region_attr(
1608 CONFIG_SYS_DDR_BLOCK3_BASE,
1610 CONFIG_SYS_DDR_BLOCK1_SIZE -
1611 CONFIG_SYS_DDR_BLOCK2_SIZE,
1612 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1613 PTE_BLOCK_OUTER_SHARE |
1619 mmu_change_region_attr(
1620 CONFIG_SYS_DDR_BLOCK2_BASE,
1622 CONFIG_SYS_DDR_BLOCK1_SIZE,
1623 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1624 PTE_BLOCK_OUTER_SHARE |
1631 __weak int dram_init(void)
1634 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1635 defined(CONFIG_SPL_BUILD)
1636 /* This will break-before-make MMU for DDR */
1637 update_early_mmu_table();
1643 #ifdef CONFIG_ARCH_MISC_INIT
1644 __weak int serdes_misc_init(void)
1649 int arch_misc_init(void)