1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2017-2021 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <clock_legacy.h>
16 #include <asm/cache.h>
17 #include <asm/global_data.h>
19 #include <asm/ptrace.h>
20 #include <linux/errno.h>
21 #include <asm/system.h>
23 #include <asm/armv8/mmu.h>
25 #include <asm/arch/fsl_serdes.h>
26 #include <asm/arch/soc.h>
27 #include <asm/arch/cpu.h>
28 #include <asm/arch/speed.h>
29 #include <fsl_immap.h>
30 #include <asm/arch/mp.h>
31 #include <efi_loader.h>
32 #include <fsl-mc/fsl_mc.h>
33 #ifdef CONFIG_FSL_ESDHC
34 #include <fsl_esdhc.h>
36 #include <asm/armv8/sec_firmware.h>
37 #ifdef CONFIG_SYS_FSL_DDR
38 #include <fsl_ddr_sdram.h>
41 #include <asm/arch/clock.h>
43 #include <fsl_qbman.h>
46 #include <env_internal.h>
47 #ifdef CONFIG_CHAIN_OF_TRUST
48 #include <fsl_validate.h>
51 #include <linux/mii.h>
54 DECLARE_GLOBAL_DATA_PTR;
56 static struct cpu_type cpu_type_list[] = {
57 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
58 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
59 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
60 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
61 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
62 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
63 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
64 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
65 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
66 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
67 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
68 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
69 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
70 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
71 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
72 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
73 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
74 CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
75 CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
76 CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
77 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
78 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
79 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
80 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
81 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
82 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
83 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
84 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
85 CPU_TYPE_ENTRY(LX2162A, LX2162A, 16),
86 CPU_TYPE_ENTRY(LX2122A, LX2122A, 12),
87 CPU_TYPE_ENTRY(LX2082A, LX2082A, 8),
90 #define EARLY_PGTABLE_SIZE 0x5000
91 static struct mm_region early_map[] = {
92 #ifdef CONFIG_FSL_LSCH3
93 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
94 CONFIG_SYS_FSL_CCSR_SIZE,
95 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
96 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
98 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
99 SYS_FSL_OCRAM_SPACE_SIZE,
100 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
102 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
103 CONFIG_SYS_FSL_QSPI_SIZE1,
104 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
105 #ifdef CONFIG_FSL_IFC
106 /* For IFC Region #1, only the first 4MB is cache-enabled */
107 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
108 CONFIG_SYS_FSL_IFC_SIZE1_1,
109 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
111 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
112 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
113 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
114 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
116 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
117 CONFIG_SYS_FSL_IFC_SIZE1,
118 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
121 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
122 CONFIG_SYS_FSL_DRAM_SIZE1,
123 #if defined(CONFIG_TFABOOT) || \
124 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
125 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
126 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
127 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
129 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
131 #ifdef CONFIG_FSL_IFC
132 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
133 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
134 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
135 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
138 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
139 CONFIG_SYS_FSL_DCSR_SIZE,
140 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
141 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
143 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
144 CONFIG_SYS_FSL_DRAM_SIZE2,
145 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
146 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
148 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
149 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
150 CONFIG_SYS_FSL_DRAM_SIZE3,
151 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
152 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
155 #elif defined(CONFIG_FSL_LSCH2)
156 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
157 CONFIG_SYS_FSL_CCSR_SIZE,
158 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
159 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
161 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
162 SYS_FSL_OCRAM_SPACE_SIZE,
163 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
165 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
166 CONFIG_SYS_FSL_DCSR_SIZE,
167 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
168 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
170 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
171 CONFIG_SYS_FSL_QSPI_SIZE,
172 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
174 #ifdef CONFIG_FSL_IFC
175 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
176 CONFIG_SYS_FSL_IFC_SIZE,
177 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
180 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
181 CONFIG_SYS_FSL_DRAM_SIZE1,
182 #if defined(CONFIG_TFABOOT) || \
183 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
184 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
185 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
186 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
188 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
190 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
191 CONFIG_SYS_FSL_DRAM_SIZE2,
192 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
193 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
196 {}, /* list terminator */
199 static struct mm_region final_map[] = {
200 #ifdef CONFIG_FSL_LSCH3
201 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
202 CONFIG_SYS_FSL_CCSR_SIZE,
203 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
204 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
206 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
207 SYS_FSL_OCRAM_SPACE_SIZE,
208 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
210 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
211 CONFIG_SYS_FSL_DRAM_SIZE1,
212 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
213 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
215 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
216 CONFIG_SYS_FSL_QSPI_SIZE1,
217 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
218 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
220 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
221 CONFIG_SYS_FSL_QSPI_SIZE2,
222 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
223 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
225 #ifdef CONFIG_FSL_IFC
226 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
227 CONFIG_SYS_FSL_IFC_SIZE2,
228 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
229 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
232 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
233 CONFIG_SYS_FSL_DCSR_SIZE,
234 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
235 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
237 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
238 CONFIG_SYS_FSL_MC_SIZE,
239 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
240 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
242 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
243 CONFIG_SYS_FSL_NI_SIZE,
244 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
245 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
247 /* For QBMAN portal, only the first 64MB is cache-enabled */
248 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
249 CONFIG_SYS_FSL_QBMAN_SIZE_1,
250 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
251 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
253 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
254 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
255 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
256 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
257 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
259 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
260 CONFIG_SYS_PCIE1_PHYS_SIZE,
261 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
262 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
264 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
265 CONFIG_SYS_PCIE2_PHYS_SIZE,
266 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
267 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
269 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
270 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
271 CONFIG_SYS_PCIE3_PHYS_SIZE,
272 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
273 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
276 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
277 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
278 CONFIG_SYS_PCIE4_PHYS_SIZE,
279 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
280 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
283 #ifdef SYS_PCIE5_PHYS_ADDR
284 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
286 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
287 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
290 #ifdef SYS_PCIE6_PHYS_ADDR
291 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
293 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
294 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
297 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
298 CONFIG_SYS_FSL_WRIOP1_SIZE,
299 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
300 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
302 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
303 CONFIG_SYS_FSL_AIOP1_SIZE,
304 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
305 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
307 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
308 CONFIG_SYS_FSL_PEBUF_SIZE,
309 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
310 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
312 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
313 CONFIG_SYS_FSL_DRAM_SIZE2,
314 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
315 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
317 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
318 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
319 CONFIG_SYS_FSL_DRAM_SIZE3,
320 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
321 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
324 #elif defined(CONFIG_FSL_LSCH2)
325 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
326 CONFIG_SYS_FSL_BOOTROM_SIZE,
327 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
328 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
330 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
331 CONFIG_SYS_FSL_CCSR_SIZE,
332 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
333 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
335 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
336 SYS_FSL_OCRAM_SPACE_SIZE,
337 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
339 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
340 CONFIG_SYS_FSL_DCSR_SIZE,
341 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
342 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
344 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
345 CONFIG_SYS_FSL_QSPI_SIZE,
346 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
347 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
349 #ifdef CONFIG_FSL_IFC
350 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
351 CONFIG_SYS_FSL_IFC_SIZE,
352 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
355 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
356 CONFIG_SYS_FSL_DRAM_SIZE1,
357 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
358 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
360 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
361 CONFIG_SYS_FSL_QBMAN_SIZE,
362 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
363 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
365 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
366 CONFIG_SYS_FSL_DRAM_SIZE2,
367 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
368 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
370 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
371 CONFIG_SYS_PCIE1_PHYS_SIZE,
372 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
373 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
375 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
376 CONFIG_SYS_PCIE2_PHYS_SIZE,
377 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
378 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
380 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
381 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
382 CONFIG_SYS_PCIE3_PHYS_SIZE,
383 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
384 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
387 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
388 CONFIG_SYS_FSL_DRAM_SIZE3,
389 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
390 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
393 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
394 {}, /* space holder for secure mem */
399 struct mm_region *mem_map = early_map;
401 void cpu_name(char *name)
403 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
404 unsigned int i, svr, ver;
406 svr = gur_in32(&gur->svr);
407 ver = SVR_SOC_VER(svr);
409 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
410 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
411 strcpy(name, cpu_type_list[i].name);
412 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
413 if (IS_C_PROCESSOR(svr))
417 if (IS_E_PROCESSOR(svr))
420 sprintf(name + strlen(name), " Rev%d.%d",
421 SVR_MAJ(svr), SVR_MIN(svr));
425 if (i == ARRAY_SIZE(cpu_type_list))
426 strcpy(name, "unknown");
429 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
431 * To start MMU before DDR is available, we create MMU table in SRAM.
432 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
433 * levels of translation tables here to cover 40-bit address space.
434 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
435 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
436 * Note, the debug print in cache_v8.c is not usable for debugging
437 * these early MMU tables because UART is not yet available.
439 static inline void early_mmu_setup(void)
441 unsigned int el = current_el();
443 /* global data is already setup, no allocation yet */
445 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
447 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
448 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
449 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
451 /* Create early page tables */
454 /* point TTBR to the new table */
455 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
456 get_tcr(el, NULL, NULL) &
457 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
460 set_sctlr(get_sctlr() | CR_M);
463 static void fix_pcie_mmu_map(void)
465 #ifdef CONFIG_ARCH_LS2080A
468 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
470 svr = gur_in32(&gur->svr);
471 ver = SVR_SOC_VER(svr);
473 /* Fix PCIE base and size for LS2088A */
474 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
475 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
476 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
477 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
478 switch (final_map[i].phys) {
479 case CONFIG_SYS_PCIE1_PHYS_ADDR:
480 final_map[i].phys = 0x2000000000ULL;
481 final_map[i].virt = 0x2000000000ULL;
482 final_map[i].size = 0x800000000ULL;
484 case CONFIG_SYS_PCIE2_PHYS_ADDR:
485 final_map[i].phys = 0x2800000000ULL;
486 final_map[i].virt = 0x2800000000ULL;
487 final_map[i].size = 0x800000000ULL;
489 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
490 case CONFIG_SYS_PCIE3_PHYS_ADDR:
491 final_map[i].phys = 0x3000000000ULL;
492 final_map[i].virt = 0x3000000000ULL;
493 final_map[i].size = 0x800000000ULL;
496 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
497 case CONFIG_SYS_PCIE4_PHYS_ADDR:
498 final_map[i].phys = 0x3800000000ULL;
499 final_map[i].virt = 0x3800000000ULL;
500 final_map[i].size = 0x800000000ULL;
512 * The final tables look similar to early tables, but different in detail.
513 * These tables are in DRAM. Sub tables are added to enable cache for
516 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
517 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
519 static inline void final_mmu_setup(void)
521 u64 tlb_addr_save = gd->arch.tlb_addr;
522 unsigned int el = current_el();
525 /* fix the final_map before filling in the block entries */
530 /* Update mapping for DDR to actual size */
531 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
533 * Find the entry for DDR mapping and update the address and
534 * size. Zero-sized mapping will be skipped when creating MMU
537 switch (final_map[index].virt) {
538 case CONFIG_SYS_FSL_DRAM_BASE1:
539 final_map[index].virt = gd->bd->bi_dram[0].start;
540 final_map[index].phys = gd->bd->bi_dram[0].start;
541 final_map[index].size = gd->bd->bi_dram[0].size;
543 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
544 case CONFIG_SYS_FSL_DRAM_BASE2:
545 #if (CONFIG_NR_DRAM_BANKS >= 2)
546 final_map[index].virt = gd->bd->bi_dram[1].start;
547 final_map[index].phys = gd->bd->bi_dram[1].start;
548 final_map[index].size = gd->bd->bi_dram[1].size;
550 final_map[index].size = 0;
554 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
555 case CONFIG_SYS_FSL_DRAM_BASE3:
556 #if (CONFIG_NR_DRAM_BANKS >= 3)
557 final_map[index].virt = gd->bd->bi_dram[2].start;
558 final_map[index].phys = gd->bd->bi_dram[2].start;
559 final_map[index].size = gd->bd->bi_dram[2].size;
561 final_map[index].size = 0;
570 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
571 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
574 * Only use gd->arch.secure_ram if the address is
575 * recalculated. Align to 4KB for MMU table.
577 /* put page tables in secure ram */
578 index = ARRAY_SIZE(final_map) - 2;
579 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
580 final_map[index].virt = gd->arch.secure_ram & ~0x3;
581 final_map[index].phys = final_map[index].virt;
582 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
583 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
584 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
585 tlb_addr_save = gd->arch.tlb_addr;
587 /* Use allocated (board_f.c) memory for TLB */
588 tlb_addr_save = gd->arch.tlb_allocated;
589 gd->arch.tlb_addr = tlb_addr_save;
594 /* Reset the fill ptr */
595 gd->arch.tlb_fillptr = tlb_addr_save;
597 /* Create normal system page tables */
600 /* Create emergency page tables */
601 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
602 gd->arch.tlb_emerg = gd->arch.tlb_addr;
604 gd->arch.tlb_addr = tlb_addr_save;
606 /* Disable cache and MMU */
607 dcache_disable(); /* TLBs are invalidated */
608 invalidate_icache_all();
610 /* point TTBR to the new table */
611 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
614 set_sctlr(get_sctlr() | CR_M);
617 u64 get_page_table_size(void)
622 int arch_cpu_init(void)
625 * This function is called before U-Boot relocates itself to speed up
626 * on system running. It is not necessary to run if performance is not
627 * critical. Skip if MMU is already enabled by SPL or other means.
629 if (get_sctlr() & CR_M)
633 __asm_invalidate_dcache_all();
634 __asm_invalidate_tlb_all();
636 set_sctlr(get_sctlr() | CR_C);
646 * This function is called from common/board_r.c.
647 * It recreates MMU table in main memory.
649 void enable_caches(void)
652 __asm_invalidate_tlb_all();
656 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
658 #ifdef CONFIG_TFABOOT
659 enum boot_src __get_boot_src(u32 porsr1)
661 enum boot_src src = BOOT_SOURCE_RESERVED;
662 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
663 #if !defined(CONFIG_NXP_LSCH3_2)
666 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
668 #if defined(CONFIG_FSL_LSCH3)
669 #if defined(CONFIG_NXP_LSCH3_2)
671 case RCW_SRC_SDHC1_VAL:
672 src = BOOT_SOURCE_SD_MMC;
674 case RCW_SRC_SDHC2_VAL:
675 src = BOOT_SOURCE_SD_MMC2;
677 case RCW_SRC_I2C1_VAL:
678 src = BOOT_SOURCE_I2C1_EXTENDED;
680 case RCW_SRC_FLEXSPI_NAND2K_VAL:
681 src = BOOT_SOURCE_XSPI_NAND;
683 case RCW_SRC_FLEXSPI_NAND4K_VAL:
684 src = BOOT_SOURCE_XSPI_NAND;
686 case RCW_SRC_RESERVED_1_VAL:
687 src = BOOT_SOURCE_RESERVED;
689 case RCW_SRC_FLEXSPI_NOR_24B:
690 src = BOOT_SOURCE_XSPI_NOR;
693 src = BOOT_SOURCE_RESERVED;
696 val = rcw_src & RCW_SRC_TYPE_MASK;
697 if (val == RCW_SRC_NOR_VAL) {
698 val = rcw_src & NOR_TYPE_MASK;
703 src = BOOT_SOURCE_IFC_NOR;
706 src = BOOT_SOURCE_RESERVED;
709 /* RCW SRC Serial Flash */
710 val = rcw_src & RCW_SRC_SERIAL_MASK;
712 case RCW_SRC_QSPI_VAL:
713 /* RCW SRC Serial NOR (QSPI) */
714 src = BOOT_SOURCE_QSPI_NOR;
716 case RCW_SRC_SD_CARD_VAL:
717 /* RCW SRC SD Card */
718 src = BOOT_SOURCE_SD_MMC;
720 case RCW_SRC_EMMC_VAL:
722 src = BOOT_SOURCE_SD_MMC;
724 case RCW_SRC_I2C1_VAL:
725 /* RCW SRC I2C1 Extended */
726 src = BOOT_SOURCE_I2C1_EXTENDED;
729 src = BOOT_SOURCE_RESERVED;
733 #elif defined(CONFIG_FSL_LSCH2)
735 val = rcw_src & RCW_SRC_NAND_MASK;
736 if (val == RCW_SRC_NAND_VAL) {
737 val = rcw_src & NAND_RESERVED_MASK;
738 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
739 src = BOOT_SOURCE_IFC_NAND;
743 val = rcw_src & RCW_SRC_NOR_MASK;
744 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
745 src = BOOT_SOURCE_IFC_NOR;
750 src = BOOT_SOURCE_QSPI_NOR;
753 src = BOOT_SOURCE_SD_MMC;
756 src = BOOT_SOURCE_RESERVED;
762 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
763 src = BOOT_SOURCE_QSPI_NOR;
765 debug("%s: src 0x%x\n", __func__, src);
769 enum boot_src get_boot_src(void)
774 #if defined(CONFIG_FSL_LSCH3)
775 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
776 #elif defined(CONFIG_FSL_LSCH2)
777 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
780 if (current_el() == 2) {
781 regs.regs[0] = SIP_SVC_RCW;
785 porsr1 = regs.regs[1];
788 if (current_el() == 3 || !porsr1) {
789 #ifdef CONFIG_FSL_LSCH3
790 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
791 #elif defined(CONFIG_FSL_LSCH2)
792 porsr1 = in_be32(&gur->porsr1);
796 debug("%s: porsr1 0x%x\n", __func__, porsr1);
798 return __get_boot_src(porsr1);
801 #ifdef CONFIG_ENV_IS_IN_MMC
802 int mmc_get_env_dev(void)
804 enum boot_src src = get_boot_src();
805 int dev = CONFIG_SYS_MMC_ENV_DEV;
808 case BOOT_SOURCE_SD_MMC:
811 case BOOT_SOURCE_SD_MMC2:
822 enum env_location env_get_location(enum env_operation op, int prio)
824 enum boot_src src = get_boot_src();
825 enum env_location env_loc = ENVL_NOWHERE;
830 #ifdef CONFIG_ENV_IS_NOWHERE
835 case BOOT_SOURCE_IFC_NOR:
836 env_loc = ENVL_FLASH;
838 case BOOT_SOURCE_QSPI_NOR:
840 case BOOT_SOURCE_XSPI_NOR:
841 env_loc = ENVL_SPI_FLASH;
843 case BOOT_SOURCE_IFC_NAND:
845 case BOOT_SOURCE_QSPI_NAND:
847 case BOOT_SOURCE_XSPI_NAND:
850 case BOOT_SOURCE_SD_MMC:
852 case BOOT_SOURCE_SD_MMC2:
855 case BOOT_SOURCE_I2C1_EXTENDED:
863 #endif /* CONFIG_TFABOOT */
865 u32 initiator_type(u32 cluster, int init_id)
867 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
868 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
871 type = gur_in32(&gur->tp_ityp[idx]);
872 if (type & TP_ITYP_AV)
878 u32 cpu_pos_mask(void)
880 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
882 u32 cluster, type, mask = 0;
887 cluster = gur_in32(&gur->tp_cluster[i].lower);
888 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
889 type = initiator_type(cluster, j);
890 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
891 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
894 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
901 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
902 int i = 0, count = 0;
903 u32 cluster, type, mask = 0;
908 cluster = gur_in32(&gur->tp_cluster[i].lower);
909 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
910 type = initiator_type(cluster, j);
912 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
918 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
924 * Return the number of cores on this SOC.
926 int cpu_numcores(void)
928 return hweight32(cpu_mask());
931 int fsl_qoriq_core_to_cluster(unsigned int core)
933 struct ccsr_gur __iomem *gur =
934 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
935 int i = 0, count = 0;
941 cluster = gur_in32(&gur->tp_cluster[i].lower);
942 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
943 if (initiator_type(cluster, j)) {
950 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
952 return -1; /* cannot identify the cluster */
955 u32 fsl_qoriq_core_to_type(unsigned int core)
957 struct ccsr_gur __iomem *gur =
958 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
959 int i = 0, count = 0;
965 cluster = gur_in32(&gur->tp_cluster[i].lower);
966 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
967 type = initiator_type(cluster, j);
975 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
977 return -1; /* cannot identify the cluster */
980 #ifndef CONFIG_FSL_LSCH3
983 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
985 return gur_in32(&gur->svr);
989 #ifdef CONFIG_DISPLAY_CPUINFO
990 int print_cpuinfo(void)
992 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
993 struct sys_info sysinfo;
995 unsigned int i, core;
996 u32 type, rcw, svr = gur_in32(&gur->svr);
1001 printf(" %s (0x%x)\n", buf, svr);
1002 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
1003 get_sys_info(&sysinfo);
1004 puts("Clock Configuration:");
1005 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
1008 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
1009 printf("CPU%d(%s):%-4s MHz ", core,
1010 type == TY_ITYP_VER_A7 ? "A7 " :
1011 (type == TY_ITYP_VER_A53 ? "A53" :
1012 (type == TY_ITYP_VER_A57 ? "A57" :
1013 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
1014 strmhz(buf, sysinfo.freq_processor[core]));
1016 /* Display platform clock as Bus frequency. */
1017 printf("\n Bus: %-4s MHz ",
1018 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
1019 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
1020 #ifdef CONFIG_SYS_DPAA_FMAN
1021 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1023 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1024 if (soc_has_dp_ddr()) {
1025 printf(" DP-DDR: %-4s MT/s",
1026 strmhz(buf, sysinfo.freq_ddrbus2));
1032 * Display the RCW, so that no one gets confused as to what RCW
1033 * we're actually using for this boot.
1035 puts("Reset Configuration Word (RCW):");
1036 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1037 rcw = gur_in32(&gur->rcwsr[i]);
1039 printf("\n %08x:", i * 4);
1040 printf(" %08x", rcw);
1048 #ifdef CONFIG_FSL_ESDHC
1049 int cpu_mmc_init(struct bd_info *bis)
1051 return fsl_esdhc_mmc_init(bis);
1055 int cpu_eth_init(struct bd_info *bis)
1059 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1060 error = fsl_mc_ldpaa_init(bis);
1062 #ifdef CONFIG_FMAN_ENET
1063 fm_standard_init(bis);
1068 int check_psci(void)
1070 unsigned int psci_ver;
1072 psci_ver = sec_firmware_support_psci_version();
1073 if (psci_ver == PSCI_INVALID_VER)
1079 static void config_core_prefetch(void)
1082 char buffer[HWCONFIG_BUFFER_SIZE];
1083 const char *prefetch_arg = NULL;
1086 struct pt_regs regs;
1088 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1093 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1097 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1099 printf("Core0 prefetch can't be disabled\n");
1103 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1104 regs.regs[0] = SIP_PREFETCH_DISABLE_64;
1105 regs.regs[1] = mask;
1109 printf("Prefetch disable config failed for mask ");
1111 printf("Prefetch disable config passed for mask ");
1112 printf("0x%x\n", mask);
1116 #ifdef CONFIG_PCIE_ECAM_GENERIC
1117 __weak void set_ecam_icids(void)
1122 int arch_early_init_r(void)
1124 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1127 * erratum A009635 is valid only for LS2080A SoC and
1128 * its personalitiesi
1130 svr_dev_id = get_svr();
1131 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1134 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1135 erratum_a009942_check_cpo();
1138 debug("PSCI: PSCI does not exist.\n");
1140 /* if PSCI does not exist, boot secondary cores here */
1141 if (fsl_layerscape_wake_seconday_cores())
1142 printf("Did not wake secondary cores\n");
1145 config_core_prefetch();
1147 #ifdef CONFIG_SYS_HAS_SERDES
1150 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1151 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1152 * configured via both serdes(sgmii, 10gbase-r, xlaui etc) bits and via
1153 * EC*_PMUX(rgmii) bits in RCW.
1154 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1155 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1156 * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
1157 * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
1158 * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
1159 * RGMII, then the dpmac is RGMII and not SGMII.
1161 * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
1162 * function of SOC, the dpmac will be enabled as RGMII even if it was
1163 * also enabled before as SGMII. If ECx_PMUX is not configured for
1164 * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
1168 #ifdef CONFIG_FMAN_ENET
1169 #ifndef CONFIG_DM_ETH
1173 #ifdef CONFIG_SYS_DPAA_QBMAN
1174 setup_qbman_portals();
1176 #ifdef CONFIG_PCIE_ECAM_GENERIC
1182 int timer_init(void)
1184 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
1185 #ifdef CONFIG_FSL_LSCH3
1186 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
1188 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1189 defined(CONFIG_ARCH_LS1028A)
1190 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
1193 #ifdef COUNTER_FREQUENCY_REAL
1194 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1196 /* Update with accurate clock frequency */
1197 if (current_el() == 3)
1198 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
1201 #ifdef CONFIG_FSL_LSCH3
1202 /* Enable timebase for all clusters.
1203 * It is safe to do so even some clusters are not enabled.
1205 out_le32(cltbenr, 0xf);
1208 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1209 defined(CONFIG_ARCH_LS1028A)
1211 * In certain Layerscape SoCs, the clock for each core's
1212 * has an enable bit in the PMU Physical Core Time Base Enable
1213 * Register (PCTBENR), which allows the watchdog to operate.
1215 setbits_le32(pctbenr, 0xff);
1217 * For LS2080A SoC and its personalities, timer controller
1218 * offset is different
1220 svr_dev_id = get_svr();
1221 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
1222 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1226 /* Enable clock for timer
1227 * This is a global setting.
1229 out_le32(cntcr, 0x1);
1234 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
1236 void __efi_runtime reset_cpu(void)
1238 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
1239 /* clear the RST_REQ_MSK and SW_RST_REQ */
1240 out_le32(rstcr, 0x0);
1242 /* initiate the sw reset request */
1243 out_le32(rstcr, 0x1);
1247 /* Raise RESET_REQ_B */
1248 val = scfg_in32(rstcr);
1250 scfg_out32(rstcr, val);
1254 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
1256 void __efi_runtime EFIAPI efi_reset_system(
1257 enum efi_reset_type reset_type,
1258 efi_status_t reset_status,
1259 unsigned long data_size, void *reset_data)
1261 switch (reset_type) {
1262 case EFI_RESET_COLD:
1263 case EFI_RESET_WARM:
1264 case EFI_RESET_PLATFORM_SPECIFIC:
1267 case EFI_RESET_SHUTDOWN:
1268 /* Nothing we can do */
1275 efi_status_t efi_reset_system_init(void)
1277 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
1283 * Calculate reserved memory with given memory bank
1284 * Return aligned memory size on success
1285 * Return (ram_size + needed size) for failure
1287 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1289 phys_size_t ram_top = ram_size;
1291 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1292 ram_top = mc_get_dram_block_size();
1293 if (ram_top > ram_size)
1294 return ram_size + ram_top;
1296 ram_top = ram_size - ram_top;
1297 /* The start address of MC reserved memory needs to be aligned. */
1298 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1301 return ram_size - ram_top;
1304 phys_size_t get_effective_memsize(void)
1306 phys_size_t ea_size, rem = 0;
1309 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1310 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1311 * allocated from first region. If the memory extends to the second
1312 * region (or the third region if applicable), Management Complex (MC)
1313 * memory should be put into the highest region, i.e. the end of DDR
1314 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1315 * U-Boot doesn't relocate itself into higher address. Should DDR be
1316 * configured to skip the first region, this function needs to be
1319 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
1320 ea_size = CONFIG_MAX_MEM_MAPPED;
1321 rem = gd->ram_size - ea_size;
1323 ea_size = gd->ram_size;
1326 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1327 /* Check if we have enough space for secure memory */
1328 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
1329 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1331 printf("Error: No enough space for secure memory.\n");
1333 /* Check if we have enough memory for MC */
1334 if (rem < board_reserve_ram_top(rem)) {
1335 /* Not enough memory in high region to reserve */
1336 if (ea_size > board_reserve_ram_top(ea_size))
1337 ea_size -= board_reserve_ram_top(ea_size);
1339 printf("Error: No enough space for reserved memory.\n");
1345 #ifdef CONFIG_TFABOOT
1346 phys_size_t tfa_get_dram_size(void)
1348 struct pt_regs regs;
1349 phys_size_t dram_size = 0;
1351 regs.regs[0] = SMC_DRAM_BANK_INFO;
1358 dram_size = regs.regs[1];
1362 static int tfa_dram_init_banksize(void)
1365 struct pt_regs regs;
1366 phys_size_t dram_size = tfa_get_dram_size();
1368 debug("dram_size %llx\n", dram_size);
1374 regs.regs[0] = SMC_DRAM_BANK_INFO;
1383 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
1385 gd->bd->bi_dram[i].start = regs.regs[1];
1386 gd->bd->bi_dram[i].size = regs.regs[2];
1388 dram_size -= gd->bd->bi_dram[i].size;
1391 } while (dram_size);
1396 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1397 /* Assign memory for MC */
1398 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1399 if (gd->bd->bi_dram[2].size >=
1400 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1401 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1402 gd->bd->bi_dram[2].size -
1403 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1407 if (gd->bd->bi_dram[1].size >=
1408 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1409 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1410 gd->bd->bi_dram[1].size -
1411 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1412 } else if (gd->bd->bi_dram[0].size >
1413 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1414 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1415 gd->bd->bi_dram[0].size -
1416 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1419 #endif /* CONFIG_RESV_RAM */
1425 int dram_init_banksize(void)
1427 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1428 phys_size_t dp_ddr_size;
1431 #ifdef CONFIG_TFABOOT
1432 if (!tfa_dram_init_banksize())
1436 * gd->ram_size has the total size of DDR memory, less reserved secure
1437 * memory. The DDR extends from low region to high region(s) presuming
1438 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1439 * the location of secure memory. gd->arch.resv_ram tracks the location
1440 * of reserved memory for Management Complex (MC). Because gd->ram_size
1441 * is reduced by this function if secure memory is reserved, checking
1442 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1445 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1446 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1447 debug("No need to run again, skip %s\n", __func__);
1453 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
1454 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
1455 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
1456 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
1457 gd->bd->bi_dram[1].size = gd->ram_size -
1458 CONFIG_SYS_DDR_BLOCK1_SIZE;
1459 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1460 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1461 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1462 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1463 CONFIG_SYS_DDR_BLOCK2_SIZE;
1464 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1468 gd->bd->bi_dram[0].size = gd->ram_size;
1470 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1471 if (gd->bd->bi_dram[0].size >
1472 CONFIG_SYS_MEM_RESERVE_SECURE) {
1473 gd->bd->bi_dram[0].size -=
1474 CONFIG_SYS_MEM_RESERVE_SECURE;
1475 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1476 gd->bd->bi_dram[0].size;
1477 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
1478 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
1480 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1482 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1483 /* Assign memory for MC */
1484 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1485 if (gd->bd->bi_dram[2].size >=
1486 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1487 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1488 gd->bd->bi_dram[2].size -
1489 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1493 if (gd->bd->bi_dram[1].size >=
1494 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1495 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1496 gd->bd->bi_dram[1].size -
1497 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1498 } else if (gd->bd->bi_dram[0].size >
1499 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1500 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1501 gd->bd->bi_dram[0].size -
1502 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1505 #endif /* CONFIG_RESV_RAM */
1507 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1508 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1509 #error "This SoC shouldn't have DP DDR"
1511 if (soc_has_dp_ddr()) {
1512 /* initialize DP-DDR here */
1515 * DDR controller use 0 as the base address for binding.
1516 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1518 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1520 CONFIG_DP_DDR_NUM_CTRLS,
1521 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1524 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1525 gd->bd->bi_dram[2].size = dp_ddr_size;
1527 puts("Not detected");
1532 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1533 debug("%s is called. gd->ram_size is reduced to %lu\n",
1534 __func__, (ulong)gd->ram_size);
1540 #if CONFIG_IS_ENABLED(EFI_LOADER)
1541 void efi_add_known_memory(void)
1544 phys_addr_t ram_start;
1545 phys_size_t ram_size;
1548 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1549 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1550 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1551 #error "This SoC shouldn't have DP DDR"
1554 continue; /* skip DP-DDR */
1556 ram_start = gd->bd->bi_dram[i].start;
1557 ram_size = gd->bd->bi_dram[i].size;
1558 #ifdef CONFIG_RESV_RAM
1559 if (gd->arch.resv_ram >= ram_start &&
1560 gd->arch.resv_ram < ram_start + ram_size)
1561 ram_size = gd->arch.resv_ram - ram_start;
1563 efi_add_memory_map(ram_start, ram_size,
1564 EFI_CONVENTIONAL_MEMORY);
1570 * Before DDR size is known, early MMU table have DDR mapped as device memory
1571 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1572 * needs to be set for these mappings.
1573 * If a special case configures DDR with holes in the mapping, the holes need
1574 * to be marked as invalid. This is not implemented in this function.
1576 void update_early_mmu_table(void)
1578 if (!gd->arch.tlb_addr)
1581 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
1582 mmu_change_region_attr(
1583 CONFIG_SYS_SDRAM_BASE,
1585 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1586 PTE_BLOCK_OUTER_SHARE |
1590 mmu_change_region_attr(
1591 CONFIG_SYS_SDRAM_BASE,
1592 CONFIG_SYS_DDR_BLOCK1_SIZE,
1593 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1594 PTE_BLOCK_OUTER_SHARE |
1597 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1598 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1599 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1601 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
1602 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1603 mmu_change_region_attr(
1604 CONFIG_SYS_DDR_BLOCK2_BASE,
1605 CONFIG_SYS_DDR_BLOCK2_SIZE,
1606 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1607 PTE_BLOCK_OUTER_SHARE |
1610 mmu_change_region_attr(
1611 CONFIG_SYS_DDR_BLOCK3_BASE,
1613 CONFIG_SYS_DDR_BLOCK1_SIZE -
1614 CONFIG_SYS_DDR_BLOCK2_SIZE,
1615 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1616 PTE_BLOCK_OUTER_SHARE |
1622 mmu_change_region_attr(
1623 CONFIG_SYS_DDR_BLOCK2_BASE,
1625 CONFIG_SYS_DDR_BLOCK1_SIZE,
1626 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1627 PTE_BLOCK_OUTER_SHARE |
1634 __weak int dram_init(void)
1636 #ifdef CONFIG_SYS_FSL_DDR
1638 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1639 defined(CONFIG_SPL_BUILD)
1640 /* This will break-before-make MMU for DDR */
1641 update_early_mmu_table();
1648 #ifdef CONFIG_ARCH_MISC_INIT
1649 __weak int serdes_misc_init(void)
1654 int arch_misc_init(void)
1656 if (IS_ENABLED(CONFIG_FSL_CAAM)) {
1657 struct udevice *dev;
1660 ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
1662 printf("Failed to initialize %s: %d\n", dev->name, ret);