2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl_debug_server.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
27 DECLARE_GLOBAL_DATA_PTR;
29 void cpu_name(char *name)
31 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
32 unsigned int i, svr, ver;
34 svr = gur_in32(&gur->svr);
35 ver = SVR_SOC_VER(svr);
37 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
38 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
39 strcpy(name, cpu_type_list[i].name);
41 if (IS_E_PROCESSOR(svr))
46 if (i == ARRAY_SIZE(cpu_type_list))
47 strcpy(name, "unknown");
50 #ifndef CONFIG_SYS_DCACHE_OFF
52 * Set the block entries according to the information of the table.
54 static int set_block_entry(const struct sys_mmu_table *list,
55 struct table_info *table)
57 u64 block_size = 0, block_shift = 0;
58 u64 block_addr, index;
61 if (table->entry_size == BLOCK_SIZE_L1) {
62 block_size = BLOCK_SIZE_L1;
63 block_shift = SECTION_SHIFT_L1;
64 } else if (table->entry_size == BLOCK_SIZE_L2) {
65 block_size = BLOCK_SIZE_L2;
66 block_shift = SECTION_SHIFT_L2;
71 block_addr = list->phys_addr;
72 index = (list->virt_addr - table->table_base) >> block_shift;
74 for (j = 0; j < (list->size >> block_shift); j++) {
75 set_pgtable_section(table->ptr,
80 block_addr += block_size;
88 * Find the corresponding table entry for the list.
90 static int find_table(const struct sys_mmu_table *list,
91 struct table_info *table, u64 *level0_table)
93 u64 index = 0, level = 0;
94 u64 *level_table = level0_table;
95 u64 temp_base = 0, block_size = 0, block_shift = 0;
99 block_size = BLOCK_SIZE_L0;
100 block_shift = SECTION_SHIFT_L0;
101 } else if (level == 1) {
102 block_size = BLOCK_SIZE_L1;
103 block_shift = SECTION_SHIFT_L1;
104 } else if (level == 2) {
105 block_size = BLOCK_SIZE_L2;
106 block_shift = SECTION_SHIFT_L2;
110 while (list->virt_addr >= temp_base) {
112 temp_base += block_size;
115 temp_base -= block_size;
117 if ((level_table[index - 1] & PMD_TYPE_MASK) ==
119 level_table = (u64 *)(level_table[index - 1] &
127 if ((list->phys_addr + list->size) >
128 (temp_base + block_size * NUM_OF_ENTRY))
132 * Check the address and size of the list member is
133 * aligned with the block size.
135 if (((list->phys_addr & (block_size - 1)) != 0) ||
136 ((list->size & (block_size - 1)) != 0))
139 table->ptr = level_table;
140 table->table_base = temp_base -
141 ((index - 1) << block_shift);
142 table->entry_size = block_size;
151 * To start MMU before DDR is available, we create MMU table in SRAM.
152 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
153 * levels of translation tables here to cover 40-bit address space.
154 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
155 * Level 0 IA[39], table address @0
156 * Level 1 IA[38:30], table address @0x1000, 0x2000
157 * Level 2 IA[29:21], table address @0x3000, 0x4000
158 * Address above 0x5000 is free for other purpose.
160 static inline void early_mmu_setup(void)
163 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
164 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
165 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
166 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
167 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
169 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
171 /* Invalidate all table entries */
172 memset(level0_table, 0, 0x5000);
174 /* Fill in the table entries */
175 set_pgtable_table(level0_table, 0, level1_table0);
176 set_pgtable_table(level0_table, 1, level1_table1);
177 set_pgtable_table(level1_table0, 0, level2_table0);
179 #ifdef CONFIG_FSL_LSCH3
180 set_pgtable_table(level1_table0,
181 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1,
183 #elif defined(CONFIG_FSL_LSCH2)
184 set_pgtable_table(level1_table0, 1, level2_table1);
186 /* Find the table and fill in the block entries */
187 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) {
188 if (find_table(&early_mmu_table[i],
189 &table, level0_table) == 0) {
191 * If find_table() returns error, it cannot be dealt
192 * with here. Breakpoint can be added for debugging.
194 set_block_entry(&early_mmu_table[i], &table);
196 * If set_block_entry() returns error, it cannot be
197 * dealt with here too.
204 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR,
206 set_sctlr(get_sctlr() | CR_M);
209 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
211 * Called from final mmu setup. The phys_addr is new, non-existing
212 * address. A new sub table is created @level2_table_secure to cover
213 * size of CONFIG_SYS_MEM_RESERVE_SECURE memory.
215 static inline int final_secure_ddr(u64 *level0_table,
216 u64 *level2_table_secure,
217 phys_addr_t phys_addr)
220 struct table_info table = {};
221 struct sys_mmu_table ddr_entry = {
222 0, 0, BLOCK_SIZE_L1, MT_NORMAL,
223 PMD_SECT_OUTER_SHARE | PMD_SECT_NS
227 /* Need to create a new table */
228 ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
229 ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
230 ret = find_table(&ddr_entry, &table, level0_table);
233 index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1;
234 set_pgtable_table(table.ptr, index, level2_table_secure);
235 table.ptr = level2_table_secure;
236 table.table_base = ddr_entry.virt_addr;
237 table.entry_size = BLOCK_SIZE_L2;
238 ret = set_block_entry(&ddr_entry, &table);
240 printf("MMU error: could not fill non-secure ddr block entries\n");
243 ddr_entry.virt_addr = phys_addr;
244 ddr_entry.phys_addr = phys_addr;
245 ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE;
246 ddr_entry.attribute = PMD_SECT_OUTER_SHARE;
247 ret = find_table(&ddr_entry, &table, level0_table);
249 printf("MMU error: could not find secure ddr table\n");
252 ret = set_block_entry(&ddr_entry, &table);
254 printf("MMU error: could not set secure ddr block entry\n");
261 * The final tables look similar to early tables, but different in detail.
262 * These tables are in DRAM. Sub tables are added to enable cache for
265 * Put the MMU table in secure memory if gd->secure_ram is valid.
266 * OCRAM will be not used for this purpose so gd->secure_ram can't be 0.
268 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
269 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
270 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
273 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
275 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB.
276 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB.
278 static inline void final_mmu_setup(void)
280 unsigned int el = current_el();
282 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
287 #ifdef CONFIG_FSL_LSCH2
290 struct table_info table = {NULL, 0, BLOCK_SIZE_L0};
292 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
293 u64 *level2_table_secure;
297 * Only use gd->secure_ram if the address is recalculated
298 * Align to 4KB for MMU table
300 if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED)
301 level0_table = (u64 *)(gd->secure_ram & ~0xfff);
303 printf("MMU warning: gd->secure_ram is not maintained, disabled.\n");
306 level1_table0 = level0_table + 512;
307 level1_table1 = level1_table0 + 512;
308 level2_table0 = level1_table1 + 512;
309 level2_table1 = level2_table0 + 512;
310 #ifdef CONFIG_FSL_LSCH2
311 level2_table2 = level2_table1 + 512;
313 table.ptr = level0_table;
315 /* Invalidate all table entries */
316 memset(level0_table, 0, PGTABLE_SIZE);
318 /* Fill in the table entries */
319 set_pgtable_table(level0_table, 0, level1_table0);
320 set_pgtable_table(level0_table, 1, level1_table1);
321 set_pgtable_table(level1_table0, 0, level2_table0);
322 #ifdef CONFIG_FSL_LSCH3
323 set_pgtable_table(level1_table0,
324 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
326 #elif defined(CONFIG_FSL_LSCH2)
327 set_pgtable_table(level1_table0, 1, level2_table1);
328 set_pgtable_table(level1_table0,
329 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
333 /* Find the table and fill in the block entries */
334 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) {
335 if (find_table(&final_mmu_table[i],
336 &table, level0_table) == 0) {
337 if (set_block_entry(&final_mmu_table[i],
339 printf("MMU error: could not set block entry for %p\n",
340 &final_mmu_table[i]);
344 printf("MMU error: could not find the table for %p\n",
345 &final_mmu_table[i]);
348 /* Set the secure memory to secure in MMU */
349 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
350 if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
351 #ifdef CONFIG_FSL_LSCH3
352 level2_table_secure = level2_table1 + 512;
353 #elif defined(CONFIG_FSL_LSCH2)
354 level2_table_secure = level2_table2 + 512;
356 if (!final_secure_ddr(level0_table,
358 gd->secure_ram & ~0x3)) {
359 gd->secure_ram |= MEM_RESERVE_SECURE_SECURED;
360 debug("Now MMU table is in secured memory at 0x%llx\n",
361 gd->secure_ram & ~0x3);
363 printf("MMU warning: Failed to secure DDR\n");
368 /* flush new MMU table */
369 flush_dcache_range((ulong)level0_table,
370 (ulong)level0_table + gd->arch.tlb_size);
372 #ifdef CONFIG_SYS_DPAA_FMAN
375 /* point TTBR to the new table */
376 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL,
379 * MMU is already enabled, just need to invalidate TLB to load the
380 * new table. The new table is compatible with the current table, if
381 * MMU somehow walks through the new table before invalidation TLB,
382 * it still works. So we don't need to turn off MMU here.
386 int arch_cpu_init(void)
389 __asm_invalidate_dcache_all();
390 __asm_invalidate_tlb_all();
392 set_sctlr(get_sctlr() | CR_C);
397 * This function is called from lib/board.c.
398 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
399 * There is no need to disable d-cache for this operation.
401 void enable_caches(void)
404 __asm_invalidate_tlb_all();
408 static inline u32 initiator_type(u32 cluster, int init_id)
410 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
411 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
414 type = gur_in32(&gur->tp_ityp[idx]);
415 if (type & TP_ITYP_AV)
423 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
424 int i = 0, count = 0;
425 u32 cluster, type, mask = 0;
430 cluster = gur_in32(&gur->tp_cluster[i].lower);
431 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
432 type = initiator_type(cluster, j);
434 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
440 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
446 * Return the number of cores on this SOC.
448 int cpu_numcores(void)
450 return hweight32(cpu_mask());
453 int fsl_qoriq_core_to_cluster(unsigned int core)
455 struct ccsr_gur __iomem *gur =
456 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
457 int i = 0, count = 0;
463 cluster = gur_in32(&gur->tp_cluster[i].lower);
464 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
465 if (initiator_type(cluster, j)) {
472 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
474 return -1; /* cannot identify the cluster */
477 u32 fsl_qoriq_core_to_type(unsigned int core)
479 struct ccsr_gur __iomem *gur =
480 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
481 int i = 0, count = 0;
487 cluster = gur_in32(&gur->tp_cluster[i].lower);
488 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
489 type = initiator_type(cluster, j);
497 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
499 return -1; /* cannot identify the cluster */
502 #ifdef CONFIG_DISPLAY_CPUINFO
503 int print_cpuinfo(void)
505 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
506 struct sys_info sysinfo;
508 unsigned int i, core;
514 printf(" %s (0x%x)\n", buf, gur_in32(&gur->svr));
515 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
516 get_sys_info(&sysinfo);
517 puts("Clock Configuration:");
518 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
521 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
522 printf("CPU%d(%s):%-4s MHz ", core,
523 type == TY_ITYP_VER_A7 ? "A7 " :
524 (type == TY_ITYP_VER_A53 ? "A53" :
525 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
526 strmhz(buf, sysinfo.freq_processor[core]));
528 printf("\n Bus: %-4s MHz ",
529 strmhz(buf, sysinfo.freq_systembus));
530 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
531 #ifdef CONFIG_SYS_DPAA_FMAN
532 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
534 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
535 printf(" DP-DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2));
540 * Display the RCW, so that no one gets confused as to what RCW
541 * we're actually using for this boot.
543 puts("Reset Configuration Word (RCW):");
544 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
545 rcw = gur_in32(&gur->rcwsr[i]);
547 printf("\n %08x:", i * 4);
548 printf(" %08x", rcw);
556 #ifdef CONFIG_FSL_ESDHC
557 int cpu_mmc_init(bd_t *bis)
559 return fsl_esdhc_mmc_init(bis);
563 int cpu_eth_init(bd_t *bis)
567 #ifdef CONFIG_FSL_MC_ENET
568 error = fsl_mc_ldpaa_init(bis);
570 #ifdef CONFIG_FMAN_ENET
571 fm_standard_init(bis);
576 int arch_early_init_r(void)
582 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
587 rv = fsl_layerscape_wake_seconday_cores();
589 printf("Did not wake secondary cores\n");
592 #ifdef CONFIG_SYS_HAS_SERDES
595 #ifdef CONFIG_FMAN_ENET
603 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
604 #ifdef CONFIG_FSL_LSCH3
605 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
607 #ifdef COUNTER_FREQUENCY_REAL
608 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
610 /* Update with accurate clock frequency */
611 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
614 #ifdef CONFIG_FSL_LSCH3
615 /* Enable timebase for all clusters.
616 * It is safe to do so even some clusters are not enabled.
618 out_le32(cltbenr, 0xf);
621 /* Enable clock for timer
622 * This is a global setting.
624 out_le32(cntcr, 0x1);
629 void reset_cpu(ulong addr)
631 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
634 /* Raise RESET_REQ_B */
635 val = scfg_in32(rstcr);
637 scfg_out32(rstcr, val);