1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2017-2020 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
10 #include <fsl_ddr_sdram.h>
16 #include <asm/cache.h>
18 #include <asm/ptrace.h>
19 #include <linux/errno.h>
20 #include <asm/system.h>
22 #include <asm/armv8/mmu.h>
24 #include <asm/arch/fsl_serdes.h>
25 #include <asm/arch/soc.h>
26 #include <asm/arch/cpu.h>
27 #include <asm/arch/speed.h>
28 #include <fsl_immap.h>
29 #include <asm/arch/mp.h>
30 #include <efi_loader.h>
31 #include <fsl-mc/fsl_mc.h>
32 #ifdef CONFIG_FSL_ESDHC
33 #include <fsl_esdhc.h>
35 #include <asm/armv8/sec_firmware.h>
36 #ifdef CONFIG_SYS_FSL_DDR
39 #include <asm/arch/clock.h>
41 #include <fsl_qbman.h>
44 #include <env_internal.h>
45 #ifdef CONFIG_CHAIN_OF_TRUST
46 #include <fsl_validate.h>
49 #include <linux/mii.h>
51 DECLARE_GLOBAL_DATA_PTR;
53 static struct cpu_type cpu_type_list[] = {
54 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
55 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
56 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
57 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
58 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
59 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
60 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
61 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
62 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
63 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
64 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
65 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
66 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
67 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
68 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
69 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
70 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
71 CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
72 CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
73 CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
74 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
75 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
76 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
77 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
78 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
79 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
80 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
81 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
84 #define EARLY_PGTABLE_SIZE 0x5000
85 static struct mm_region early_map[] = {
86 #ifdef CONFIG_FSL_LSCH3
87 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
88 CONFIG_SYS_FSL_CCSR_SIZE,
89 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
90 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
92 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
93 SYS_FSL_OCRAM_SPACE_SIZE,
94 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
96 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
97 CONFIG_SYS_FSL_QSPI_SIZE1,
98 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
100 /* For IFC Region #1, only the first 4MB is cache-enabled */
101 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
102 CONFIG_SYS_FSL_IFC_SIZE1_1,
103 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
105 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
106 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
107 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
108 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
110 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
111 CONFIG_SYS_FSL_IFC_SIZE1,
112 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
115 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
116 CONFIG_SYS_FSL_DRAM_SIZE1,
117 #if defined(CONFIG_TFABOOT) || \
118 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
119 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
120 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
121 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
123 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
125 #ifdef CONFIG_FSL_IFC
126 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
127 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
128 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
129 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
132 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
133 CONFIG_SYS_FSL_DCSR_SIZE,
134 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
135 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
137 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
138 CONFIG_SYS_FSL_DRAM_SIZE2,
139 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
140 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
142 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
143 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
144 CONFIG_SYS_FSL_DRAM_SIZE3,
145 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
146 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
149 #elif defined(CONFIG_FSL_LSCH2)
150 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
151 CONFIG_SYS_FSL_CCSR_SIZE,
152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
153 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
155 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
156 SYS_FSL_OCRAM_SPACE_SIZE,
157 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
159 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
160 CONFIG_SYS_FSL_DCSR_SIZE,
161 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
162 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
164 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
165 CONFIG_SYS_FSL_QSPI_SIZE,
166 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
168 #ifdef CONFIG_FSL_IFC
169 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
170 CONFIG_SYS_FSL_IFC_SIZE,
171 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
174 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
175 CONFIG_SYS_FSL_DRAM_SIZE1,
176 #if defined(CONFIG_TFABOOT) || \
177 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
178 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
179 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
180 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
182 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
184 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
185 CONFIG_SYS_FSL_DRAM_SIZE2,
186 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
187 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
190 {}, /* list terminator */
193 static struct mm_region final_map[] = {
194 #ifdef CONFIG_FSL_LSCH3
195 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
196 CONFIG_SYS_FSL_CCSR_SIZE,
197 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
198 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
200 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
201 SYS_FSL_OCRAM_SPACE_SIZE,
202 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
204 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
205 CONFIG_SYS_FSL_DRAM_SIZE1,
206 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
207 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
209 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
210 CONFIG_SYS_FSL_QSPI_SIZE1,
211 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
212 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
214 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
215 CONFIG_SYS_FSL_QSPI_SIZE2,
216 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
217 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
219 #ifdef CONFIG_FSL_IFC
220 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
221 CONFIG_SYS_FSL_IFC_SIZE2,
222 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
223 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
226 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
227 CONFIG_SYS_FSL_DCSR_SIZE,
228 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
229 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
231 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
232 CONFIG_SYS_FSL_MC_SIZE,
233 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
234 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
236 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
237 CONFIG_SYS_FSL_NI_SIZE,
238 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
239 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
241 /* For QBMAN portal, only the first 64MB is cache-enabled */
242 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
243 CONFIG_SYS_FSL_QBMAN_SIZE_1,
244 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
245 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
247 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
248 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
249 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
250 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
251 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
253 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
254 CONFIG_SYS_PCIE1_PHYS_SIZE,
255 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
256 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
258 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
259 CONFIG_SYS_PCIE2_PHYS_SIZE,
260 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
261 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
263 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
264 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
265 CONFIG_SYS_PCIE3_PHYS_SIZE,
266 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
267 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
270 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
271 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
272 CONFIG_SYS_PCIE4_PHYS_SIZE,
273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
274 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
277 #ifdef SYS_PCIE5_PHYS_ADDR
278 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
280 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
281 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
284 #ifdef SYS_PCIE6_PHYS_ADDR
285 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
287 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
288 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
291 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
292 CONFIG_SYS_FSL_WRIOP1_SIZE,
293 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
294 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
296 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
297 CONFIG_SYS_FSL_AIOP1_SIZE,
298 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
299 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
301 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
302 CONFIG_SYS_FSL_PEBUF_SIZE,
303 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
304 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
306 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
307 CONFIG_SYS_FSL_DRAM_SIZE2,
308 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
309 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
311 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
312 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
313 CONFIG_SYS_FSL_DRAM_SIZE3,
314 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
315 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
318 #elif defined(CONFIG_FSL_LSCH2)
319 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
320 CONFIG_SYS_FSL_BOOTROM_SIZE,
321 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
322 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
324 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
325 CONFIG_SYS_FSL_CCSR_SIZE,
326 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
327 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
329 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
330 SYS_FSL_OCRAM_SPACE_SIZE,
331 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
333 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
334 CONFIG_SYS_FSL_DCSR_SIZE,
335 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
336 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
338 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
339 CONFIG_SYS_FSL_QSPI_SIZE,
340 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
341 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
343 #ifdef CONFIG_FSL_IFC
344 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
345 CONFIG_SYS_FSL_IFC_SIZE,
346 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
349 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
350 CONFIG_SYS_FSL_DRAM_SIZE1,
351 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
352 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
354 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
355 CONFIG_SYS_FSL_QBMAN_SIZE,
356 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
357 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
359 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
360 CONFIG_SYS_FSL_DRAM_SIZE2,
361 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
362 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
364 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
365 CONFIG_SYS_PCIE1_PHYS_SIZE,
366 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
367 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
369 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
370 CONFIG_SYS_PCIE2_PHYS_SIZE,
371 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
372 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
374 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
375 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
376 CONFIG_SYS_PCIE3_PHYS_SIZE,
377 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
378 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
381 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
382 CONFIG_SYS_FSL_DRAM_SIZE3,
383 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
384 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
387 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
388 {}, /* space holder for secure mem */
393 struct mm_region *mem_map = early_map;
395 void cpu_name(char *name)
397 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
398 unsigned int i, svr, ver;
400 svr = gur_in32(&gur->svr);
401 ver = SVR_SOC_VER(svr);
403 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
404 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
405 strcpy(name, cpu_type_list[i].name);
406 #ifdef CONFIG_ARCH_LX2160A
407 if (IS_C_PROCESSOR(svr))
411 if (IS_E_PROCESSOR(svr))
414 sprintf(name + strlen(name), " Rev%d.%d",
415 SVR_MAJ(svr), SVR_MIN(svr));
419 if (i == ARRAY_SIZE(cpu_type_list))
420 strcpy(name, "unknown");
423 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
425 * To start MMU before DDR is available, we create MMU table in SRAM.
426 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
427 * levels of translation tables here to cover 40-bit address space.
428 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
429 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
430 * Note, the debug print in cache_v8.c is not usable for debugging
431 * these early MMU tables because UART is not yet available.
433 static inline void early_mmu_setup(void)
435 unsigned int el = current_el();
437 /* global data is already setup, no allocation yet */
439 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
441 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
442 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
443 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
445 /* Create early page tables */
448 /* point TTBR to the new table */
449 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
450 get_tcr(el, NULL, NULL) &
451 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
454 set_sctlr(get_sctlr() | CR_M);
457 static void fix_pcie_mmu_map(void)
459 #ifdef CONFIG_ARCH_LS2080A
462 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
464 svr = gur_in32(&gur->svr);
465 ver = SVR_SOC_VER(svr);
467 /* Fix PCIE base and size for LS2088A */
468 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
469 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
470 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
471 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
472 switch (final_map[i].phys) {
473 case CONFIG_SYS_PCIE1_PHYS_ADDR:
474 final_map[i].phys = 0x2000000000ULL;
475 final_map[i].virt = 0x2000000000ULL;
476 final_map[i].size = 0x800000000ULL;
478 case CONFIG_SYS_PCIE2_PHYS_ADDR:
479 final_map[i].phys = 0x2800000000ULL;
480 final_map[i].virt = 0x2800000000ULL;
481 final_map[i].size = 0x800000000ULL;
483 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
484 case CONFIG_SYS_PCIE3_PHYS_ADDR:
485 final_map[i].phys = 0x3000000000ULL;
486 final_map[i].virt = 0x3000000000ULL;
487 final_map[i].size = 0x800000000ULL;
490 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
491 case CONFIG_SYS_PCIE4_PHYS_ADDR:
492 final_map[i].phys = 0x3800000000ULL;
493 final_map[i].virt = 0x3800000000ULL;
494 final_map[i].size = 0x800000000ULL;
506 * The final tables look similar to early tables, but different in detail.
507 * These tables are in DRAM. Sub tables are added to enable cache for
510 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
511 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
513 static inline void final_mmu_setup(void)
515 u64 tlb_addr_save = gd->arch.tlb_addr;
516 unsigned int el = current_el();
519 /* fix the final_map before filling in the block entries */
524 /* Update mapping for DDR to actual size */
525 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
527 * Find the entry for DDR mapping and update the address and
528 * size. Zero-sized mapping will be skipped when creating MMU
531 switch (final_map[index].virt) {
532 case CONFIG_SYS_FSL_DRAM_BASE1:
533 final_map[index].virt = gd->bd->bi_dram[0].start;
534 final_map[index].phys = gd->bd->bi_dram[0].start;
535 final_map[index].size = gd->bd->bi_dram[0].size;
537 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
538 case CONFIG_SYS_FSL_DRAM_BASE2:
539 #if (CONFIG_NR_DRAM_BANKS >= 2)
540 final_map[index].virt = gd->bd->bi_dram[1].start;
541 final_map[index].phys = gd->bd->bi_dram[1].start;
542 final_map[index].size = gd->bd->bi_dram[1].size;
544 final_map[index].size = 0;
548 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
549 case CONFIG_SYS_FSL_DRAM_BASE3:
550 #if (CONFIG_NR_DRAM_BANKS >= 3)
551 final_map[index].virt = gd->bd->bi_dram[2].start;
552 final_map[index].phys = gd->bd->bi_dram[2].start;
553 final_map[index].size = gd->bd->bi_dram[2].size;
555 final_map[index].size = 0;
564 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
565 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
568 * Only use gd->arch.secure_ram if the address is
569 * recalculated. Align to 4KB for MMU table.
571 /* put page tables in secure ram */
572 index = ARRAY_SIZE(final_map) - 2;
573 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
574 final_map[index].virt = gd->arch.secure_ram & ~0x3;
575 final_map[index].phys = final_map[index].virt;
576 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
577 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
578 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
579 tlb_addr_save = gd->arch.tlb_addr;
581 /* Use allocated (board_f.c) memory for TLB */
582 tlb_addr_save = gd->arch.tlb_allocated;
583 gd->arch.tlb_addr = tlb_addr_save;
588 /* Reset the fill ptr */
589 gd->arch.tlb_fillptr = tlb_addr_save;
591 /* Create normal system page tables */
594 /* Create emergency page tables */
595 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
596 gd->arch.tlb_emerg = gd->arch.tlb_addr;
598 gd->arch.tlb_addr = tlb_addr_save;
600 /* Disable cache and MMU */
601 dcache_disable(); /* TLBs are invalidated */
602 invalidate_icache_all();
604 /* point TTBR to the new table */
605 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
608 set_sctlr(get_sctlr() | CR_M);
611 u64 get_page_table_size(void)
616 int arch_cpu_init(void)
619 * This function is called before U-Boot relocates itself to speed up
620 * on system running. It is not necessary to run if performance is not
621 * critical. Skip if MMU is already enabled by SPL or other means.
623 if (get_sctlr() & CR_M)
627 __asm_invalidate_dcache_all();
628 __asm_invalidate_tlb_all();
630 set_sctlr(get_sctlr() | CR_C);
640 * This function is called from common/board_r.c.
641 * It recreates MMU table in main memory.
643 void enable_caches(void)
646 __asm_invalidate_tlb_all();
650 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
652 #ifdef CONFIG_TFABOOT
653 enum boot_src __get_boot_src(u32 porsr1)
655 enum boot_src src = BOOT_SOURCE_RESERVED;
656 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
657 #if !defined(CONFIG_NXP_LSCH3_2)
660 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
662 #if defined(CONFIG_FSL_LSCH3)
663 #if defined(CONFIG_NXP_LSCH3_2)
665 case RCW_SRC_SDHC1_VAL:
666 src = BOOT_SOURCE_SD_MMC;
668 case RCW_SRC_SDHC2_VAL:
669 src = BOOT_SOURCE_SD_MMC2;
671 case RCW_SRC_I2C1_VAL:
672 src = BOOT_SOURCE_I2C1_EXTENDED;
674 case RCW_SRC_FLEXSPI_NAND2K_VAL:
675 src = BOOT_SOURCE_XSPI_NAND;
677 case RCW_SRC_FLEXSPI_NAND4K_VAL:
678 src = BOOT_SOURCE_XSPI_NAND;
680 case RCW_SRC_RESERVED_1_VAL:
681 src = BOOT_SOURCE_RESERVED;
683 case RCW_SRC_FLEXSPI_NOR_24B:
684 src = BOOT_SOURCE_XSPI_NOR;
687 src = BOOT_SOURCE_RESERVED;
690 val = rcw_src & RCW_SRC_TYPE_MASK;
691 if (val == RCW_SRC_NOR_VAL) {
692 val = rcw_src & NOR_TYPE_MASK;
697 src = BOOT_SOURCE_IFC_NOR;
700 src = BOOT_SOURCE_RESERVED;
703 /* RCW SRC Serial Flash */
704 val = rcw_src & RCW_SRC_SERIAL_MASK;
706 case RCW_SRC_QSPI_VAL:
707 /* RCW SRC Serial NOR (QSPI) */
708 src = BOOT_SOURCE_QSPI_NOR;
710 case RCW_SRC_SD_CARD_VAL:
711 /* RCW SRC SD Card */
712 src = BOOT_SOURCE_SD_MMC;
714 case RCW_SRC_EMMC_VAL:
716 src = BOOT_SOURCE_SD_MMC;
718 case RCW_SRC_I2C1_VAL:
719 /* RCW SRC I2C1 Extended */
720 src = BOOT_SOURCE_I2C1_EXTENDED;
723 src = BOOT_SOURCE_RESERVED;
727 #elif defined(CONFIG_FSL_LSCH2)
729 val = rcw_src & RCW_SRC_NAND_MASK;
730 if (val == RCW_SRC_NAND_VAL) {
731 val = rcw_src & NAND_RESERVED_MASK;
732 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
733 src = BOOT_SOURCE_IFC_NAND;
737 val = rcw_src & RCW_SRC_NOR_MASK;
738 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
739 src = BOOT_SOURCE_IFC_NOR;
744 src = BOOT_SOURCE_QSPI_NOR;
747 src = BOOT_SOURCE_SD_MMC;
750 src = BOOT_SOURCE_RESERVED;
756 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
757 src = BOOT_SOURCE_QSPI_NOR;
759 debug("%s: src 0x%x\n", __func__, src);
763 enum boot_src get_boot_src(void)
768 #if defined(CONFIG_FSL_LSCH3)
769 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
770 #elif defined(CONFIG_FSL_LSCH2)
771 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
774 if (current_el() == 2) {
775 regs.regs[0] = SIP_SVC_RCW;
779 porsr1 = regs.regs[1];
782 if (current_el() == 3 || !porsr1) {
783 #ifdef CONFIG_FSL_LSCH3
784 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
785 #elif defined(CONFIG_FSL_LSCH2)
786 porsr1 = in_be32(&gur->porsr1);
790 debug("%s: porsr1 0x%x\n", __func__, porsr1);
792 return __get_boot_src(porsr1);
795 #ifdef CONFIG_ENV_IS_IN_MMC
796 int mmc_get_env_dev(void)
798 enum boot_src src = get_boot_src();
799 int dev = CONFIG_SYS_MMC_ENV_DEV;
802 case BOOT_SOURCE_SD_MMC:
805 case BOOT_SOURCE_SD_MMC2:
816 enum env_location env_get_location(enum env_operation op, int prio)
818 enum boot_src src = get_boot_src();
819 enum env_location env_loc = ENVL_NOWHERE;
824 #ifdef CONFIG_ENV_IS_NOWHERE
829 case BOOT_SOURCE_IFC_NOR:
830 env_loc = ENVL_FLASH;
832 case BOOT_SOURCE_QSPI_NOR:
834 case BOOT_SOURCE_XSPI_NOR:
835 env_loc = ENVL_SPI_FLASH;
837 case BOOT_SOURCE_IFC_NAND:
839 case BOOT_SOURCE_QSPI_NAND:
841 case BOOT_SOURCE_XSPI_NAND:
844 case BOOT_SOURCE_SD_MMC:
846 case BOOT_SOURCE_SD_MMC2:
849 case BOOT_SOURCE_I2C1_EXTENDED:
857 #endif /* CONFIG_TFABOOT */
859 u32 initiator_type(u32 cluster, int init_id)
861 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
862 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
865 type = gur_in32(&gur->tp_ityp[idx]);
866 if (type & TP_ITYP_AV)
872 u32 cpu_pos_mask(void)
874 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
876 u32 cluster, type, mask = 0;
881 cluster = gur_in32(&gur->tp_cluster[i].lower);
882 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
883 type = initiator_type(cluster, j);
884 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
885 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
888 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
895 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
896 int i = 0, count = 0;
897 u32 cluster, type, mask = 0;
902 cluster = gur_in32(&gur->tp_cluster[i].lower);
903 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
904 type = initiator_type(cluster, j);
906 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
912 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
918 * Return the number of cores on this SOC.
920 int cpu_numcores(void)
922 return hweight32(cpu_mask());
925 int fsl_qoriq_core_to_cluster(unsigned int core)
927 struct ccsr_gur __iomem *gur =
928 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
929 int i = 0, count = 0;
935 cluster = gur_in32(&gur->tp_cluster[i].lower);
936 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
937 if (initiator_type(cluster, j)) {
944 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
946 return -1; /* cannot identify the cluster */
949 u32 fsl_qoriq_core_to_type(unsigned int core)
951 struct ccsr_gur __iomem *gur =
952 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
953 int i = 0, count = 0;
959 cluster = gur_in32(&gur->tp_cluster[i].lower);
960 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
961 type = initiator_type(cluster, j);
969 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
971 return -1; /* cannot identify the cluster */
974 #ifndef CONFIG_FSL_LSCH3
977 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
979 return gur_in32(&gur->svr);
983 #ifdef CONFIG_DISPLAY_CPUINFO
984 int print_cpuinfo(void)
986 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
987 struct sys_info sysinfo;
989 unsigned int i, core;
990 u32 type, rcw, svr = gur_in32(&gur->svr);
995 printf(" %s (0x%x)\n", buf, svr);
996 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
997 get_sys_info(&sysinfo);
998 puts("Clock Configuration:");
999 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
1002 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
1003 printf("CPU%d(%s):%-4s MHz ", core,
1004 type == TY_ITYP_VER_A7 ? "A7 " :
1005 (type == TY_ITYP_VER_A53 ? "A53" :
1006 (type == TY_ITYP_VER_A57 ? "A57" :
1007 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
1008 strmhz(buf, sysinfo.freq_processor[core]));
1010 /* Display platform clock as Bus frequency. */
1011 printf("\n Bus: %-4s MHz ",
1012 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1013 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1014 #ifdef CONFIG_SYS_DPAA_FMAN
1015 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1017 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1018 if (soc_has_dp_ddr()) {
1019 printf(" DP-DDR: %-4s MT/s",
1020 strmhz(buf, sysinfo.freq_ddrbus2));
1026 * Display the RCW, so that no one gets confused as to what RCW
1027 * we're actually using for this boot.
1029 puts("Reset Configuration Word (RCW):");
1030 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1031 rcw = gur_in32(&gur->rcwsr[i]);
1033 printf("\n %08x:", i * 4);
1034 printf(" %08x", rcw);
1042 #ifdef CONFIG_FSL_ESDHC
1043 int cpu_mmc_init(struct bd_info *bis)
1045 return fsl_esdhc_mmc_init(bis);
1049 int cpu_eth_init(struct bd_info *bis)
1053 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1054 error = fsl_mc_ldpaa_init(bis);
1056 #ifdef CONFIG_FMAN_ENET
1057 fm_standard_init(bis);
1062 static inline int check_psci(void)
1064 unsigned int psci_ver;
1066 psci_ver = sec_firmware_support_psci_version();
1067 if (psci_ver == PSCI_INVALID_VER)
1073 static void config_core_prefetch(void)
1076 char buffer[HWCONFIG_BUFFER_SIZE];
1077 const char *prefetch_arg = NULL;
1080 struct pt_regs regs;
1082 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1087 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1091 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1093 printf("Core0 prefetch can't be disabled\n");
1097 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1098 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1099 regs.regs[1] = mask;
1103 printf("Prefetch disable config failed for mask ");
1105 printf("Prefetch disable config passed for mask ");
1106 printf("0x%x\n", mask);
1110 #ifdef CONFIG_PCIE_ECAM_GENERIC
1111 __weak void set_ecam_icids(void)
1116 int arch_early_init_r(void)
1118 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1121 * erratum A009635 is valid only for LS2080A SoC and
1122 * its personalitiesi
1124 svr_dev_id = get_svr();
1125 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1128 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1129 erratum_a009942_check_cpo();
1132 debug("PSCI: PSCI does not exist.\n");
1134 /* if PSCI does not exist, boot secondary cores here */
1135 if (fsl_layerscape_wake_seconday_cores())
1136 printf("Did not wake secondary cores\n");
1139 config_core_prefetch();
1141 #ifdef CONFIG_SYS_HAS_SERDES
1144 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1145 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1146 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
1147 * EC*_PMUX(rgmii) bits in RCW.
1148 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1149 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1150 * Now if a dpmac is enabled by serdes bits then it takes precedence
1151 * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
1152 * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
1153 * then the dpmac is SGMII and not RGMII.
1155 * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
1156 * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
1157 * or not? if it is (fsl_serdes_init has already enabled the dpmac),
1158 * then don't enable it.
1162 #ifdef CONFIG_FMAN_ENET
1163 #ifndef CONFIG_DM_ETH
1167 #ifdef CONFIG_SYS_DPAA_QBMAN
1168 setup_qbman_portals();
1170 #ifdef CONFIG_PCIE_ECAM_GENERIC
1176 int timer_init(void)
1178 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1179 #ifdef CONFIG_FSL_LSCH3
1180 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1182 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1183 defined(CONFIG_ARCH_LS1028A)
1184 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1187 #ifdef COUNTER_FREQUENCY_REAL
1188 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1190 /* Update with accurate clock frequency */
1191 if (current_el() == 3)
1192 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1195 #ifdef CONFIG_FSL_LSCH3
1196 /* Enable timebase for all clusters.
1197 * It is safe to do so even some clusters are not enabled.
1199 out_le32(cltbenr, 0xf);
1202 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1203 defined(CONFIG_ARCH_LS1028A)
1205 * In certain Layerscape SoCs, the clock for each core's
1206 * has an enable bit in the PMU Physical Core Time Base Enable
1207 * Register (PCTBENR), which allows the watchdog to operate.
1209 setbits_le32(pctbenr, 0xff);
1211 * For LS2080A SoC and its personalities, timer controller
1212 * offset is different
1214 svr_dev_id = get_svr();
1215 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1216 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1220 /* Enable clock for timer
1221 * This is a global setting.
1223 out_le32(cntcr, 0x1);
1228 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1230 void __efi_runtime reset_cpu(ulong addr)
1232 #ifdef CONFIG_ARCH_LX2160A
1233 /* clear the RST_REQ_MSK and SW_RST_REQ */
1234 out_le32(rstcr, 0x0);
1236 /* initiate the sw reset request */
1237 out_le32(rstcr, 0x1);
1241 /* Raise RESET_REQ_B */
1242 val = scfg_in32(rstcr);
1244 scfg_out32(rstcr, val);
1248 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
1250 void __efi_runtime EFIAPI efi_reset_system(
1251 enum efi_reset_type reset_type,
1252 efi_status_t reset_status,
1253 unsigned long data_size, void *reset_data)
1255 switch (reset_type) {
1256 case EFI_RESET_COLD:
1257 case EFI_RESET_WARM:
1258 case EFI_RESET_PLATFORM_SPECIFIC:
1261 case EFI_RESET_SHUTDOWN:
1262 /* Nothing we can do */
1269 efi_status_t efi_reset_system_init(void)
1271 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1277 * Calculate reserved memory with given memory bank
1278 * Return aligned memory size on success
1279 * Return (ram_size + needed size) for failure
1281 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1283 phys_size_t ram_top = ram_size;
1285 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1286 ram_top = mc_get_dram_block_size();
1287 if (ram_top > ram_size)
1288 return ram_size + ram_top;
1290 ram_top = ram_size - ram_top;
1291 /* The start address of MC reserved memory needs to be aligned. */
1292 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1295 return ram_size - ram_top;
1298 phys_size_t get_effective_memsize(void)
1300 phys_size_t ea_size, rem = 0;
1303 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1304 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1305 * allocated from first region. If the memory extends to the second
1306 * region (or the third region if applicable), Management Complex (MC)
1307 * memory should be put into the highest region, i.e. the end of DDR
1308 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1309 * U-Boot doesn't relocate itself into higher address. Should DDR be
1310 * configured to skip the first region, this function needs to be
1313 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1314 ea_size = CONFIG_MAX_MEM_MAPPED;
1315 rem = gd->ram_size - ea_size;
1317 ea_size = gd->ram_size;
1320 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1321 /* Check if we have enough space for secure memory */
1322 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1323 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1325 printf("Error: No enough space for secure memory.\n");
1327 /* Check if we have enough memory for MC */
1328 if (rem < board_reserve_ram_top(rem)) {
1329 /* Not enough memory in high region to reserve */
1330 if (ea_size > board_reserve_ram_top(ea_size))
1331 ea_size -= board_reserve_ram_top(ea_size);
1333 printf("Error: No enough space for reserved memory.\n");
1339 #ifdef CONFIG_TFABOOT
1340 phys_size_t tfa_get_dram_size(void)
1342 struct pt_regs regs;
1343 phys_size_t dram_size = 0;
1345 regs.regs[0] = SMC_DRAM_BANK_INFO;
1352 dram_size = regs.regs[1];
1356 static int tfa_dram_init_banksize(void)
1359 struct pt_regs regs;
1360 phys_size_t dram_size = tfa_get_dram_size();
1362 debug("dram_size %llx\n", dram_size);
1368 regs.regs[0] = SMC_DRAM_BANK_INFO;
1377 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1379 gd->bd->bi_dram[i].start = regs.regs[1];
1380 gd->bd->bi_dram[i].size = regs.regs[2];
1382 dram_size -= gd->bd->bi_dram[i].size;
1385 } while (dram_size);
1390 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1391 /* Assign memory for MC */
1392 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1393 if (gd->bd->bi_dram[2].size >=
1394 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1395 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1396 gd->bd->bi_dram[2].size -
1397 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1401 if (gd->bd->bi_dram[1].size >=
1402 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1403 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1404 gd->bd->bi_dram[1].size -
1405 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1406 } else if (gd->bd->bi_dram[0].size >
1407 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1408 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1409 gd->bd->bi_dram[0].size -
1410 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1413 #endif /* CONFIG_RESV_RAM */
1419 int dram_init_banksize(void)
1421 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1422 phys_size_t dp_ddr_size;
1425 #ifdef CONFIG_TFABOOT
1426 if (!tfa_dram_init_banksize())
1430 * gd->ram_size has the total size of DDR memory, less reserved secure
1431 * memory. The DDR extends from low region to high region(s) presuming
1432 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1433 * the location of secure memory. gd->arch.resv_ram tracks the location
1434 * of reserved memory for Management Complex (MC). Because gd->ram_size
1435 * is reduced by this function if secure memory is reserved, checking
1436 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1439 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1440 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1441 debug("No need to run again, skip %s\n", __func__);
1447 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1448 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1449 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1450 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1451 gd->bd->bi_dram[1].size = gd->ram_size -
1452 CONFIG_SYS_DDR_BLOCK1_SIZE;
1453 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1454 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1455 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1456 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1457 CONFIG_SYS_DDR_BLOCK2_SIZE;
1458 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1462 gd->bd->bi_dram[0].size = gd->ram_size;
1464 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1465 if (gd->bd->bi_dram[0].size >
1466 CONFIG_SYS_MEM_RESERVE_SECURE) {
1467 gd->bd->bi_dram[0].size -=
1468 CONFIG_SYS_MEM_RESERVE_SECURE;
1469 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1470 gd->bd->bi_dram[0].size;
1471 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1472 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1474 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1476 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1477 /* Assign memory for MC */
1478 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1479 if (gd->bd->bi_dram[2].size >=
1480 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1481 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1482 gd->bd->bi_dram[2].size -
1483 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1487 if (gd->bd->bi_dram[1].size >=
1488 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1489 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1490 gd->bd->bi_dram[1].size -
1491 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1492 } else if (gd->bd->bi_dram[0].size >
1493 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1494 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1495 gd->bd->bi_dram[0].size -
1496 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1499 #endif /* CONFIG_RESV_RAM */
1501 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1502 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1503 #error "This SoC shouldn't have DP DDR"
1505 if (soc_has_dp_ddr()) {
1506 /* initialize DP-DDR here */
1509 * DDR controller use 0 as the base address for binding.
1510 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1512 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1514 CONFIG_DP_DDR_NUM_CTRLS,
1515 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1518 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1519 gd->bd->bi_dram[2].size = dp_ddr_size;
1521 puts("Not detected");
1526 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1527 debug("%s is called. gd->ram_size is reduced to %lu\n",
1528 __func__, (ulong)gd->ram_size);
1534 #if CONFIG_IS_ENABLED(EFI_LOADER)
1535 void efi_add_known_memory(void)
1538 phys_addr_t ram_start;
1539 phys_size_t ram_size;
1542 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1543 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1544 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1545 #error "This SoC shouldn't have DP DDR"
1548 continue; /* skip DP-DDR */
1550 ram_start = gd->bd->bi_dram[i].start;
1551 ram_size = gd->bd->bi_dram[i].size;
1552 #ifdef CONFIG_RESV_RAM
1553 if (gd->arch.resv_ram >= ram_start &&
1554 gd->arch.resv_ram < ram_start + ram_size)
1555 ram_size = gd->arch.resv_ram - ram_start;
1557 efi_add_memory_map(ram_start, ram_size,
1558 EFI_CONVENTIONAL_MEMORY);
1564 * Before DDR size is known, early MMU table have DDR mapped as device memory
1565 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1566 * needs to be set for these mappings.
1567 * If a special case configures DDR with holes in the mapping, the holes need
1568 * to be marked as invalid. This is not implemented in this function.
1570 void update_early_mmu_table(void)
1572 if (!gd->arch.tlb_addr)
1575 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1576 mmu_change_region_attr(
1577 CONFIG_SYS_SDRAM_BASE,
1579 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1580 PTE_BLOCK_OUTER_SHARE |
1584 mmu_change_region_attr(
1585 CONFIG_SYS_SDRAM_BASE,
1586 CONFIG_SYS_DDR_BLOCK1_SIZE,
1587 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1588 PTE_BLOCK_OUTER_SHARE |
1591 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1592 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1593 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1595 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1596 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1597 mmu_change_region_attr(
1598 CONFIG_SYS_DDR_BLOCK2_BASE,
1599 CONFIG_SYS_DDR_BLOCK2_SIZE,
1600 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1601 PTE_BLOCK_OUTER_SHARE |
1604 mmu_change_region_attr(
1605 CONFIG_SYS_DDR_BLOCK3_BASE,
1607 CONFIG_SYS_DDR_BLOCK1_SIZE -
1608 CONFIG_SYS_DDR_BLOCK2_SIZE,
1609 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1610 PTE_BLOCK_OUTER_SHARE |
1616 mmu_change_region_attr(
1617 CONFIG_SYS_DDR_BLOCK2_BASE,
1619 CONFIG_SYS_DDR_BLOCK1_SIZE,
1620 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1621 PTE_BLOCK_OUTER_SHARE |
1628 __weak int dram_init(void)
1631 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1632 defined(CONFIG_SPL_BUILD)
1633 /* This will break-before-make MMU for DDR */
1634 update_early_mmu_table();
1640 #ifdef CONFIG_ARCH_MISC_INIT
1641 __weak int serdes_misc_init(void)
1646 int arch_misc_init(void)