2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl_debug_server.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
27 DECLARE_GLOBAL_DATA_PTR;
29 void cpu_name(char *name)
31 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
32 unsigned int i, svr, ver;
34 svr = gur_in32(&gur->svr);
35 ver = SVR_SOC_VER(svr);
37 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
38 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
39 strcpy(name, cpu_type_list[i].name);
41 if (IS_E_PROCESSOR(svr))
46 if (i == ARRAY_SIZE(cpu_type_list))
47 strcpy(name, "unknown");
50 #ifndef CONFIG_SYS_DCACHE_OFF
52 * Set the block entries according to the information of the table.
54 static int set_block_entry(const struct sys_mmu_table *list,
55 struct table_info *table)
57 u64 block_size = 0, block_shift = 0;
58 u64 block_addr, index;
61 if (table->entry_size == BLOCK_SIZE_L1) {
62 block_size = BLOCK_SIZE_L1;
63 block_shift = SECTION_SHIFT_L1;
64 } else if (table->entry_size == BLOCK_SIZE_L2) {
65 block_size = BLOCK_SIZE_L2;
66 block_shift = SECTION_SHIFT_L2;
71 block_addr = list->phys_addr;
72 index = (list->virt_addr - table->table_base) >> block_shift;
74 for (j = 0; j < (list->size >> block_shift); j++) {
75 set_pgtable_section(table->ptr,
80 block_addr += block_size;
88 * Find the corresponding table entry for the list.
90 static int find_table(const struct sys_mmu_table *list,
91 struct table_info *table, u64 *level0_table)
93 u64 index = 0, level = 0;
94 u64 *level_table = level0_table;
95 u64 temp_base = 0, block_size = 0, block_shift = 0;
99 block_size = BLOCK_SIZE_L0;
100 block_shift = SECTION_SHIFT_L0;
101 } else if (level == 1) {
102 block_size = BLOCK_SIZE_L1;
103 block_shift = SECTION_SHIFT_L1;
104 } else if (level == 2) {
105 block_size = BLOCK_SIZE_L2;
106 block_shift = SECTION_SHIFT_L2;
110 while (list->virt_addr >= temp_base) {
112 temp_base += block_size;
115 temp_base -= block_size;
117 if ((level_table[index - 1] & PMD_TYPE_MASK) ==
119 level_table = (u64 *)(level_table[index - 1] &
127 if ((list->phys_addr + list->size) >
128 (temp_base + block_size * NUM_OF_ENTRY))
132 * Check the address and size of the list member is
133 * aligned with the block size.
135 if (((list->phys_addr & (block_size - 1)) != 0) ||
136 ((list->size & (block_size - 1)) != 0))
139 table->ptr = level_table;
140 table->table_base = temp_base -
141 ((index - 1) << block_shift);
142 table->entry_size = block_size;
151 * To start MMU before DDR is available, we create MMU table in SRAM.
152 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
153 * levels of translation tables here to cover 40-bit address space.
154 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
155 * Level 0 IA[39], table address @0
156 * Level 1 IA[38:30], table address @0x1000, 0x2000
157 * Level 2 IA[29:21], table address @0x3000, 0x4000
158 * Address above 0x5000 is free for other purpose.
160 static inline void early_mmu_setup(void)
163 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
164 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
165 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
166 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
167 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
169 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
171 /* Invalidate all table entries */
172 memset(level0_table, 0, 0x5000);
174 /* Fill in the table entries */
175 set_pgtable_table(level0_table, 0, level1_table0);
176 set_pgtable_table(level0_table, 1, level1_table1);
177 set_pgtable_table(level1_table0, 0, level2_table0);
179 #ifdef CONFIG_FSL_LSCH3
180 set_pgtable_table(level1_table0,
181 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1,
183 #elif defined(CONFIG_FSL_LSCH2)
184 set_pgtable_table(level1_table0, 1, level2_table1);
186 /* Find the table and fill in the block entries */
187 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) {
188 if (find_table(&early_mmu_table[i],
189 &table, level0_table) == 0) {
191 * If find_table() returns error, it cannot be dealt
192 * with here. Breakpoint can be added for debugging.
194 set_block_entry(&early_mmu_table[i], &table);
196 * If set_block_entry() returns error, it cannot be
197 * dealt with here too.
204 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR,
206 set_sctlr(get_sctlr() | CR_M);
210 * The final tables look similar to early tables, but different in detail.
211 * These tables are in DRAM. Sub tables are added to enable cache for
214 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
215 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
216 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
219 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
221 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB.
222 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB.
224 static inline void final_mmu_setup(void)
227 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
228 u64 *level1_table0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
229 u64 *level1_table1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
230 u64 *level2_table0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
231 #ifdef CONFIG_FSL_LSCH3
232 u64 *level2_table1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
233 #elif defined(CONFIG_FSL_LSCH2)
234 u64 *level2_table1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
235 u64 *level2_table2 = (u64 *)(gd->arch.tlb_addr + 0x5000);
237 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
239 /* Invalidate all table entries */
240 memset(level0_table, 0, PGTABLE_SIZE);
242 /* Fill in the table entries */
243 set_pgtable_table(level0_table, 0, level1_table0);
244 set_pgtable_table(level0_table, 1, level1_table1);
245 set_pgtable_table(level1_table0, 0, level2_table0);
246 #ifdef CONFIG_FSL_LSCH3
247 set_pgtable_table(level1_table0,
248 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
250 #elif defined(CONFIG_FSL_LSCH2)
251 set_pgtable_table(level1_table0, 1, level2_table1);
252 set_pgtable_table(level1_table0,
253 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
257 /* Find the table and fill in the block entries */
258 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) {
259 if (find_table(&final_mmu_table[i],
260 &table, level0_table) == 0) {
261 if (set_block_entry(&final_mmu_table[i],
263 printf("MMU error: could not set block entry for %p\n",
264 &final_mmu_table[i]);
268 printf("MMU error: could not find the table for %p\n",
269 &final_mmu_table[i]);
273 /* flush new MMU table */
274 flush_dcache_range(gd->arch.tlb_addr,
275 gd->arch.tlb_addr + gd->arch.tlb_size);
277 #ifdef CONFIG_SYS_DPAA_FMAN
280 /* point TTBR to the new table */
283 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL,
286 * MMU is already enabled, just need to invalidate TLB to load the
287 * new table. The new table is compatible with the current table, if
288 * MMU somehow walks through the new table before invalidation TLB,
289 * it still works. So we don't need to turn off MMU here.
293 int arch_cpu_init(void)
296 __asm_invalidate_dcache_all();
297 __asm_invalidate_tlb_all();
299 set_sctlr(get_sctlr() | CR_C);
304 * This function is called from lib/board.c.
305 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
306 * There is no need to disable d-cache for this operation.
308 void enable_caches(void)
311 __asm_invalidate_tlb_all();
315 static inline u32 initiator_type(u32 cluster, int init_id)
317 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
318 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
321 type = gur_in32(&gur->tp_ityp[idx]);
322 if (type & TP_ITYP_AV)
330 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
331 int i = 0, count = 0;
332 u32 cluster, type, mask = 0;
337 cluster = gur_in32(&gur->tp_cluster[i].lower);
338 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
339 type = initiator_type(cluster, j);
341 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
347 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
353 * Return the number of cores on this SOC.
355 int cpu_numcores(void)
357 return hweight32(cpu_mask());
360 int fsl_qoriq_core_to_cluster(unsigned int core)
362 struct ccsr_gur __iomem *gur =
363 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
364 int i = 0, count = 0;
370 cluster = gur_in32(&gur->tp_cluster[i].lower);
371 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
372 if (initiator_type(cluster, j)) {
379 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
381 return -1; /* cannot identify the cluster */
384 u32 fsl_qoriq_core_to_type(unsigned int core)
386 struct ccsr_gur __iomem *gur =
387 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
388 int i = 0, count = 0;
394 cluster = gur_in32(&gur->tp_cluster[i].lower);
395 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
396 type = initiator_type(cluster, j);
404 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
406 return -1; /* cannot identify the cluster */
409 #ifdef CONFIG_DISPLAY_CPUINFO
410 int print_cpuinfo(void)
412 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
413 struct sys_info sysinfo;
415 unsigned int i, core;
421 printf(" %s (0x%x)\n", buf, gur_in32(&gur->svr));
422 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
423 get_sys_info(&sysinfo);
424 puts("Clock Configuration:");
425 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
428 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
429 printf("CPU%d(%s):%-4s MHz ", core,
430 type == TY_ITYP_VER_A7 ? "A7 " :
431 (type == TY_ITYP_VER_A53 ? "A53" :
432 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
433 strmhz(buf, sysinfo.freq_processor[core]));
435 printf("\n Bus: %-4s MHz ",
436 strmhz(buf, sysinfo.freq_systembus));
437 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
438 #ifdef CONFIG_SYS_DPAA_FMAN
439 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
441 #ifdef CONFIG_FSL_LSCH3
442 printf(" DP-DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2));
447 * Display the RCW, so that no one gets confused as to what RCW
448 * we're actually using for this boot.
450 puts("Reset Configuration Word (RCW):");
451 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
452 rcw = gur_in32(&gur->rcwsr[i]);
454 printf("\n %08x:", i * 4);
455 printf(" %08x", rcw);
463 #ifdef CONFIG_FSL_ESDHC
464 int cpu_mmc_init(bd_t *bis)
466 return fsl_esdhc_mmc_init(bis);
470 int cpu_eth_init(bd_t *bis)
474 #ifdef CONFIG_FSL_MC_ENET
475 error = fsl_mc_ldpaa_init(bis);
477 #ifdef CONFIG_FMAN_ENET
478 fm_standard_init(bis);
483 int arch_early_init_r(void)
488 rv = fsl_layerscape_wake_seconday_cores();
490 printf("Did not wake secondary cores\n");
493 #ifdef CONFIG_SYS_HAS_SERDES
496 #ifdef CONFIG_FMAN_ENET
504 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
505 #ifdef CONFIG_FSL_LSCH3
506 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
508 #ifdef COUNTER_FREQUENCY_REAL
509 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
511 /* Update with accurate clock frequency */
512 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
515 #ifdef CONFIG_FSL_LSCH3
516 /* Enable timebase for all clusters.
517 * It is safe to do so even some clusters are not enabled.
519 out_le32(cltbenr, 0xf);
522 /* Enable clock for timer
523 * This is a global setting.
525 out_le32(cntcr, 0x1);
530 void reset_cpu(ulong addr)
532 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
535 /* Raise RESET_REQ_B */
536 val = scfg_in32(rstcr);
538 scfg_out32(rstcr, val);