X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Ffsl-layerscape%2Fcpu.c;h=c6fede31bab73d06aaf36b35aceda16dd915164e;hb=235c5b8315c6a9eb566fd3d99a098cc6db869fc5;hp=8062106e3e90fad3df513ab7e4a4e26fbdab2115;hpb=2313d48445e59f063ec9a3b4940fe8252737db76;p=platform%2Fkernel%2Fu-boot.git diff --git a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c index 8062106..c6fede3 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c +++ b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c @@ -1,12 +1,14 @@ /* + * Copyright 2017 NXP * Copyright 2014-2015 Freescale Semiconductor, Inc. * * SPDX-License-Identifier: GPL-2.0+ */ #include +#include #include -#include +#include #include #include #include @@ -14,25 +16,22 @@ #include #include #include -#ifdef CONFIG_MP #include -#endif +#include #include -#include #include #ifdef CONFIG_FSL_ESDHC #include #endif +#include +#ifdef CONFIG_SYS_FSL_DDR +#include +#endif +#include DECLARE_GLOBAL_DATA_PTR; -static struct mm_region layerscape_mem_map[] = { - { - /* List terminator */ - 0, - } -}; -struct mm_region *mem_map = layerscape_mem_map; +struct mm_region *mem_map = early_map; void cpu_name(char *name) { @@ -48,6 +47,9 @@ void cpu_name(char *name) if (IS_E_PROCESSOR(svr)) strcat(name, "E"); + + sprintf(name + strlen(name), " Rev%d.%d", + SVR_MAJ(svr), SVR_MIN(svr)); break; } @@ -56,355 +58,184 @@ void cpu_name(char *name) } #ifndef CONFIG_SYS_DCACHE_OFF -static void set_pgtable_section(u64 *page_table, u64 index, u64 section, - u64 memory_type, u64 attribute) -{ - u64 value; - - value = section | PTE_TYPE_BLOCK | PTE_BLOCK_AF; - value |= PMD_ATTRINDX(memory_type); - value |= attribute; - page_table[index] = value; -} - -static void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) -{ - u64 value; - - value = (u64)table_addr | PTE_TYPE_TABLE; - page_table[index] = value; -} - -/* - * Set the block entries according to the information of the table. - */ -static int set_block_entry(const struct sys_mmu_table *list, - struct table_info *table) -{ - u64 block_size = 0, block_shift = 0; - u64 block_addr, index; - int j; - - if (table->entry_size == BLOCK_SIZE_L1) { - block_size = BLOCK_SIZE_L1; - block_shift = SECTION_SHIFT_L1; - } else if (table->entry_size == BLOCK_SIZE_L2) { - block_size = BLOCK_SIZE_L2; - block_shift = SECTION_SHIFT_L2; - } else { - return -EINVAL; - } - - block_addr = list->phys_addr; - index = (list->virt_addr - table->table_base) >> block_shift; - - for (j = 0; j < (list->size >> block_shift); j++) { - set_pgtable_section(table->ptr, - index, - block_addr, - list->memory_type, - list->attribute); - block_addr += block_size; - index++; - } - - return 0; -} - -/* - * Find the corresponding table entry for the list. - */ -static int find_table(const struct sys_mmu_table *list, - struct table_info *table, u64 *level0_table) -{ - u64 index = 0, level = 0; - u64 *level_table = level0_table; - u64 temp_base = 0, block_size = 0, block_shift = 0; - - while (level < 3) { - if (level == 0) { - block_size = BLOCK_SIZE_L0; - block_shift = SECTION_SHIFT_L0; - } else if (level == 1) { - block_size = BLOCK_SIZE_L1; - block_shift = SECTION_SHIFT_L1; - } else if (level == 2) { - block_size = BLOCK_SIZE_L2; - block_shift = SECTION_SHIFT_L2; - } - - index = 0; - while (list->virt_addr >= temp_base) { - index++; - temp_base += block_size; - } - - temp_base -= block_size; - - if ((level_table[index - 1] & PTE_TYPE_MASK) == - PTE_TYPE_TABLE) { - level_table = (u64 *)(level_table[index - 1] & - ~PTE_TYPE_MASK); - level++; - continue; - } else { - if (level == 0) - return -EINVAL; - - if ((list->phys_addr + list->size) > - (temp_base + block_size * NUM_OF_ENTRY)) - return -EINVAL; - - /* - * Check the address and size of the list member is - * aligned with the block size. - */ - if (((list->phys_addr & (block_size - 1)) != 0) || - ((list->size & (block_size - 1)) != 0)) - return -EINVAL; - - table->ptr = level_table; - table->table_base = temp_base - - ((index - 1) << block_shift); - table->entry_size = block_size; - - return 0; - } - } - return -EINVAL; -} - /* * To start MMU before DDR is available, we create MMU table in SRAM. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three * levels of translation tables here to cover 40-bit address space. * We use 4KB granule size, with 40 bits physical address, T0SZ=24 - * Level 0 IA[39], table address @0 - * Level 1 IA[38:30], table address @0x1000, 0x2000 - * Level 2 IA[29:21], table address @0x3000, 0x4000 - * Address above 0x5000 is free for other purpose. + * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. + * Note, the debug print in cache_v8.c is not usable for debugging + * these early MMU tables because UART is not yet available. */ static inline void early_mmu_setup(void) { - unsigned int el, i; - u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; - u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); - u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); - u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); - u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000); - - struct table_info table = {level0_table, 0, BLOCK_SIZE_L0}; - - /* Invalidate all table entries */ - memset(level0_table, 0, 0x5000); - - /* Fill in the table entries */ - set_pgtable_table(level0_table, 0, level1_table0); - set_pgtable_table(level0_table, 1, level1_table1); - set_pgtable_table(level1_table0, 0, level2_table0); + unsigned int el = current_el(); -#ifdef CONFIG_FSL_LSCH3 - set_pgtable_table(level1_table0, - CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1, - level2_table1); -#elif defined(CONFIG_FSL_LSCH2) - set_pgtable_table(level1_table0, 1, level2_table1); -#endif - /* Find the table and fill in the block entries */ - for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) { - if (find_table(&early_mmu_table[i], - &table, level0_table) == 0) { - /* - * If find_table() returns error, it cannot be dealt - * with here. Breakpoint can be added for debugging. - */ - set_block_entry(&early_mmu_table[i], &table); - /* - * If set_block_entry() returns error, it cannot be - * dealt with here too. - */ - } - } + /* global data is already setup, no allocation yet */ + gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; + gd->arch.tlb_fillptr = gd->arch.tlb_addr; + gd->arch.tlb_size = EARLY_PGTABLE_SIZE; - el = current_el(); + /* Create early page tables */ + setup_pgtables(); - set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR, + /* point TTBR to the new table */ + set_ttbr_tcr_mair(el, gd->arch.tlb_addr, + get_tcr(el, NULL, NULL) & + ~(TCR_ORGN_MASK | TCR_IRGN_MASK), MEMORY_ATTRIBUTES); + set_sctlr(get_sctlr() | CR_M); } -#ifdef CONFIG_SYS_MEM_RESERVE_SECURE -/* - * Called from final mmu setup. The phys_addr is new, non-existing - * address. A new sub table is created @level2_table_secure to cover - * size of CONFIG_SYS_MEM_RESERVE_SECURE memory. - */ -static inline int final_secure_ddr(u64 *level0_table, - u64 *level2_table_secure, - phys_addr_t phys_addr) +static void fix_pcie_mmu_map(void) { - int ret = -EINVAL; - struct table_info table = {}; - struct sys_mmu_table ddr_entry = { - 0, 0, BLOCK_SIZE_L1, MT_NORMAL, - PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS - }; - u64 index; - - /* Need to create a new table */ - ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); - ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); - ret = find_table(&ddr_entry, &table, level0_table); - if (ret) - return ret; - index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1; - set_pgtable_table(table.ptr, index, level2_table_secure); - table.ptr = level2_table_secure; - table.table_base = ddr_entry.virt_addr; - table.entry_size = BLOCK_SIZE_L2; - ret = set_block_entry(&ddr_entry, &table); - if (ret) { - printf("MMU error: could not fill non-secure ddr block entries\n"); - return ret; - } - ddr_entry.virt_addr = phys_addr; - ddr_entry.phys_addr = phys_addr; - ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE; - ddr_entry.attribute = PTE_BLOCK_OUTER_SHARE; - ret = find_table(&ddr_entry, &table, level0_table); - if (ret) { - printf("MMU error: could not find secure ddr table\n"); - return ret; - } - ret = set_block_entry(&ddr_entry, &table); - if (ret) - printf("MMU error: could not set secure ddr block entry\n"); +#ifdef CONFIG_ARCH_LS2080A + unsigned int i; + u32 svr, ver; + struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); - return ret; -} + svr = gur_in32(&gur->svr); + ver = SVR_SOC_VER(svr); + + /* Fix PCIE base and size for LS2088A */ + if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) || + (ver == SVR_LS2048A) || (ver == SVR_LS2044A) || + (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) { + for (i = 0; i < ARRAY_SIZE(final_map); i++) { + switch (final_map[i].phys) { + case CONFIG_SYS_PCIE1_PHYS_ADDR: + final_map[i].phys = 0x2000000000ULL; + final_map[i].virt = 0x2000000000ULL; + final_map[i].size = 0x800000000ULL; + break; + case CONFIG_SYS_PCIE2_PHYS_ADDR: + final_map[i].phys = 0x2800000000ULL; + final_map[i].virt = 0x2800000000ULL; + final_map[i].size = 0x800000000ULL; + break; + case CONFIG_SYS_PCIE3_PHYS_ADDR: + final_map[i].phys = 0x3000000000ULL; + final_map[i].virt = 0x3000000000ULL; + final_map[i].size = 0x800000000ULL; + break; + case CONFIG_SYS_PCIE4_PHYS_ADDR: + final_map[i].phys = 0x3800000000ULL; + final_map[i].virt = 0x3800000000ULL; + final_map[i].size = 0x800000000ULL; + break; + default: + break; + } + } + } #endif +} /* * The final tables look similar to early tables, but different in detail. * These tables are in DRAM. Sub tables are added to enable cache for * QBMan and OCRAM. * - * Put the MMU table in secure memory if gd->secure_ram is valid. - * OCRAM will be not used for this purpose so gd->secure_ram can't be 0. - * - * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB. - * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB. - * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB. - * - * For LSCH3: - * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB. - * For LSCH2: - * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB. - * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB. + * Put the MMU table in secure memory if gd->arch.secure_ram is valid. + * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. */ static inline void final_mmu_setup(void) { + u64 tlb_addr_save = gd->arch.tlb_addr; unsigned int el = current_el(); - unsigned int i; - u64 *level0_table = (u64 *)gd->arch.tlb_addr; - u64 *level1_table0; - u64 *level1_table1; - u64 *level2_table0; - u64 *level2_table1; -#ifdef CONFIG_FSL_LSCH2 - u64 *level2_table2; -#endif - struct table_info table = {NULL, 0, BLOCK_SIZE_L0}; + int index; -#ifdef CONFIG_SYS_MEM_RESERVE_SECURE - u64 *level2_table_secure; + /* fix the final_map before filling in the block entries */ + fix_pcie_mmu_map(); + + mem_map = final_map; - if (el == 3) { + /* Update mapping for DDR to actual size */ + for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) { /* - * Only use gd->secure_ram if the address is recalculated - * Align to 4KB for MMU table + * Find the entry for DDR mapping and update the address and + * size. Zero-sized mapping will be skipped when creating MMU + * table. */ - if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) - level0_table = (u64 *)(gd->secure_ram & ~0xfff); - else - printf("MMU warning: gd->secure_ram is not maintained, disabled.\n"); - } + switch (final_map[index].virt) { + case CONFIG_SYS_FSL_DRAM_BASE1: + final_map[index].virt = gd->bd->bi_dram[0].start; + final_map[index].phys = gd->bd->bi_dram[0].start; + final_map[index].size = gd->bd->bi_dram[0].size; + break; +#ifdef CONFIG_SYS_FSL_DRAM_BASE2 + case CONFIG_SYS_FSL_DRAM_BASE2: +#if (CONFIG_NR_DRAM_BANKS >= 2) + final_map[index].virt = gd->bd->bi_dram[1].start; + final_map[index].phys = gd->bd->bi_dram[1].start; + final_map[index].size = gd->bd->bi_dram[1].size; +#else + final_map[index].size = 0; #endif - level1_table0 = level0_table + 512; - level1_table1 = level1_table0 + 512; - level2_table0 = level1_table1 + 512; - level2_table1 = level2_table0 + 512; -#ifdef CONFIG_FSL_LSCH2 - level2_table2 = level2_table1 + 512; + break; #endif - table.ptr = level0_table; - - /* Invalidate all table entries */ - memset(level0_table, 0, PGTABLE_SIZE); - - /* Fill in the table entries */ - set_pgtable_table(level0_table, 0, level1_table0); - set_pgtable_table(level0_table, 1, level1_table1); - set_pgtable_table(level1_table0, 0, level2_table0); -#ifdef CONFIG_FSL_LSCH3 - set_pgtable_table(level1_table0, - CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, - level2_table1); -#elif defined(CONFIG_FSL_LSCH2) - set_pgtable_table(level1_table0, 1, level2_table1); - set_pgtable_table(level1_table0, - CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, - level2_table2); -#endif - - /* Find the table and fill in the block entries */ - for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) { - if (find_table(&final_mmu_table[i], - &table, level0_table) == 0) { - if (set_block_entry(&final_mmu_table[i], - &table) != 0) { - printf("MMU error: could not set block entry for %p\n", - &final_mmu_table[i]); - } - - } else { - printf("MMU error: could not find the table for %p\n", - &final_mmu_table[i]); +#ifdef CONFIG_SYS_FSL_DRAM_BASE3 + case CONFIG_SYS_FSL_DRAM_BASE3: +#if (CONFIG_NR_DRAM_BANKS >= 3) + final_map[index].virt = gd->bd->bi_dram[2].start; + final_map[index].phys = gd->bd->bi_dram[2].start; + final_map[index].size = gd->bd->bi_dram[2].size; +#else + final_map[index].size = 0; +#endif + break; +#endif + default: + break; } } - /* Set the secure memory to secure in MMU */ + #ifdef CONFIG_SYS_MEM_RESERVE_SECURE - if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { -#ifdef CONFIG_FSL_LSCH3 - level2_table_secure = level2_table1 + 512; -#elif defined(CONFIG_FSL_LSCH2) - level2_table_secure = level2_table2 + 512; -#endif - if (!final_secure_ddr(level0_table, - level2_table_secure, - gd->secure_ram & ~0x3)) { - gd->secure_ram |= MEM_RESERVE_SECURE_SECURED; - debug("Now MMU table is in secured memory at 0x%llx\n", - gd->secure_ram & ~0x3); + if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { + if (el == 3) { + /* + * Only use gd->arch.secure_ram if the address is + * recalculated. Align to 4KB for MMU table. + */ + /* put page tables in secure ram */ + index = ARRAY_SIZE(final_map) - 2; + gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; + final_map[index].virt = gd->arch.secure_ram & ~0x3; + final_map[index].phys = final_map[index].virt; + final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; + final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; + gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; + tlb_addr_save = gd->arch.tlb_addr; } else { - printf("MMU warning: Failed to secure DDR\n"); + /* Use allocated (board_f.c) memory for TLB */ + tlb_addr_save = gd->arch.tlb_allocated; + gd->arch.tlb_addr = tlb_addr_save; } } #endif - /* flush new MMU table */ - flush_dcache_range((ulong)level0_table, - (ulong)level0_table + gd->arch.tlb_size); + /* Reset the fill ptr */ + gd->arch.tlb_fillptr = tlb_addr_save; + + /* Create normal system page tables */ + setup_pgtables(); + + /* Create emergency page tables */ + gd->arch.tlb_addr = gd->arch.tlb_fillptr; + gd->arch.tlb_emerg = gd->arch.tlb_addr; + setup_pgtables(); + gd->arch.tlb_addr = tlb_addr_save; + + /* Disable cache and MMU */ + dcache_disable(); /* TLBs are invalidated */ + invalidate_icache_all(); /* point TTBR to the new table */ - set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL, + set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), MEMORY_ATTRIBUTES); - /* - * MMU is already enabled, just need to invalidate TLB to load the - * new table. The new table is compatible with the current table, if - * MMU somehow walks through the new table before invalidation TLB, - * it still works. So we don't need to turn off MMU here. - */ + + set_sctlr(get_sctlr() | CR_M); } u64 get_page_table_size(void) @@ -414,6 +245,14 @@ u64 get_page_table_size(void) int arch_cpu_init(void) { + /* + * This function is called before U-Boot relocates itself to speed up + * on system running. It is not necessary to run if performance is not + * critical. Skip if MMU is already enabled by SPL or other means. + */ + if (get_sctlr() & CR_M) + return 0; + icache_enable(); __asm_invalidate_dcache_all(); __asm_invalidate_tlb_all(); @@ -422,19 +261,25 @@ int arch_cpu_init(void) return 0; } +void mmu_setup(void) +{ + final_mmu_setup(); +} + /* - * This function is called from lib/board.c. - * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. - * There is no need to disable d-cache for this operation. + * This function is called from common/board_r.c. + * It recreates MMU table in main memory. */ void enable_caches(void) { - final_mmu_setup(); + mmu_setup(); __asm_invalidate_tlb_all(); + icache_enable(); + dcache_enable(); } #endif -static inline u32 initiator_type(u32 cluster, int init_id) +u32 initiator_type(u32 cluster, int init_id) { struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; @@ -447,6 +292,27 @@ static inline u32 initiator_type(u32 cluster, int init_id) return 0; } +u32 cpu_pos_mask(void) +{ + struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + int i = 0; + u32 cluster, type, mask = 0; + + do { + int j; + + cluster = gur_in32(&gur->tp_cluster[i].lower); + for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { + type = initiator_type(cluster, j); + if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) + mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); + } + i++; + } while ((cluster & TP_CLUSTER_EOC) == 0x0); + + return mask; +} + u32 cpu_mask(void) { struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); @@ -528,12 +394,14 @@ u32 fsl_qoriq_core_to_type(unsigned int core) return -1; /* cannot identify the cluster */ } +#ifndef CONFIG_FSL_LSCH3 uint get_svr(void) { struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); return gur_in32(&gur->svr); } +#endif #ifdef CONFIG_DISPLAY_CPUINFO int print_cpuinfo(void) @@ -558,11 +426,13 @@ int print_cpuinfo(void) printf("CPU%d(%s):%-4s MHz ", core, type == TY_ITYP_VER_A7 ? "A7 " : (type == TY_ITYP_VER_A53 ? "A53" : - (type == TY_ITYP_VER_A57 ? "A57" : " ")), + (type == TY_ITYP_VER_A57 ? "A57" : + (type == TY_ITYP_VER_A72 ? "A72" : " "))), strmhz(buf, sysinfo.freq_processor[core])); } + /* Display platform clock as Bus frequency. */ printf("\n Bus: %-4s MHz ", - strmhz(buf, sysinfo.freq_systembus)); + strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); #ifdef CONFIG_SYS_DPAA_FMAN printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); @@ -603,7 +473,7 @@ int cpu_eth_init(bd_t *bis) { int error = 0; -#ifdef CONFIG_FSL_MC_ENET +#if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) error = fsl_mc_ldpaa_init(bis); #endif #ifdef CONFIG_FMAN_ENET @@ -612,21 +482,39 @@ int cpu_eth_init(bd_t *bis) return error; } -int arch_early_init_r(void) +static inline int check_psci(void) { -#ifdef CONFIG_MP - int rv = 1; -#endif + unsigned int psci_ver; + + psci_ver = sec_firmware_support_psci_version(); + if (psci_ver == PSCI_INVALID_VER) + return 1; + return 0; +} + +int arch_early_init_r(void) +{ #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 - erratum_a009635(); + u32 svr_dev_id; + /* + * erratum A009635 is valid only for LS2080A SoC and + * its personalitiesi + */ + svr_dev_id = get_svr() >> 16; + if (svr_dev_id == SVR_DEV_LS2080A) + erratum_a009635(); #endif - -#ifdef CONFIG_MP - rv = fsl_layerscape_wake_seconday_cores(); - if (rv) - printf("Did not wake secondary cores\n"); +#if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) + erratum_a009942_check_cpo(); #endif + if (check_psci()) { + debug("PSCI: PSCI does not exist.\n"); + + /* if PSCI does not exist, boot secondary cores here */ + if (fsl_layerscape_wake_seconday_cores()) + printf("Did not wake secondary cores\n"); + } #ifdef CONFIG_SYS_HAS_SERDES fsl_serdes_init(); @@ -643,14 +531,16 @@ int timer_init(void) #ifdef CONFIG_FSL_LSCH3 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; #endif -#ifdef CONFIG_LS2080A +#ifdef CONFIG_ARCH_LS2080A u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; + u32 svr_dev_id; #endif #ifdef COUNTER_FREQUENCY_REAL unsigned long cntfrq = COUNTER_FREQUENCY_REAL; /* Update with accurate clock frequency */ - asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); + if (current_el() == 3) + asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); #endif #ifdef CONFIG_FSL_LSCH3 @@ -660,13 +550,21 @@ int timer_init(void) out_le32(cltbenr, 0xf); #endif -#ifdef CONFIG_LS2080A +#ifdef CONFIG_ARCH_LS2080A /* * In certain Layerscape SoCs, the clock for each core's * has an enable bit in the PMU Physical Core Time Base Enable * Register (PCTBENR), which allows the watchdog to operate. */ setbits_le32(pctbenr, 0xff); + /* + * For LS2080A SoC and its personalities, timer controller + * offset is different + */ + svr_dev_id = get_svr() >> 16; + if (svr_dev_id == SVR_DEV_LS2080A) + cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; + #endif /* Enable clock for timer @@ -677,9 +575,10 @@ int timer_init(void) return 0; } -void reset_cpu(ulong addr) +__efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; + +void __efi_runtime reset_cpu(ulong addr) { - u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; u32 val; /* Raise RESET_REQ_B */ @@ -688,23 +587,310 @@ void reset_cpu(ulong addr) scfg_out32(rstcr, val); } +#ifdef CONFIG_EFI_LOADER + +void __efi_runtime EFIAPI efi_reset_system( + enum efi_reset_type reset_type, + efi_status_t reset_status, + unsigned long data_size, void *reset_data) +{ + switch (reset_type) { + case EFI_RESET_COLD: + case EFI_RESET_WARM: + reset_cpu(0); + break; + case EFI_RESET_SHUTDOWN: + /* Nothing we can do */ + break; + } + + while (1) { } +} + +void efi_reset_system_init(void) +{ + efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); +} + +#endif + phys_size_t board_reserve_ram_top(phys_size_t ram_size) { phys_size_t ram_top = ram_size; -#ifdef CONFIG_SYS_MEM_TOP_HIDE -#error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function +#if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) + /* The start address of MC reserved memory needs to be aligned. */ + ram_top -= mc_get_dram_block_size(); + ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); #endif -/* Carve the Debug Server private DRAM block from the end of DRAM */ -#ifdef CONFIG_FSL_DEBUG_SERVER - ram_top -= debug_server_get_dram_block_size(); + + return ram_size - ram_top; +} + +phys_size_t get_effective_memsize(void) +{ + phys_size_t ea_size, rem = 0; + + /* + * For ARMv8 SoCs, DDR memory is split into two or three regions. The + * first region is 2GB space at 0x8000_0000. If the memory extends to + * the second region (or the third region if applicable), the secure + * memory and Management Complex (MC) memory should be put into the + * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED + * is set to the size of first region so U-Boot doesn't relocate itself + * into higher address. Should DDR be configured to skip the first + * region, this function needs to be adjusted. + */ + if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { + ea_size = CONFIG_MAX_MEM_MAPPED; + rem = gd->ram_size - ea_size; + } else { + ea_size = gd->ram_size; + } + +#ifdef CONFIG_SYS_MEM_RESERVE_SECURE + /* Check if we have enough space for secure memory */ + if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) { + rem -= CONFIG_SYS_MEM_RESERVE_SECURE; + } else { + if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) { + ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE; + rem = 0; /* Presume MC requires more memory */ + } else { + printf("Error: No enough space for secure memory.\n"); + } + } #endif + /* Check if we have enough memory for MC */ + if (rem < board_reserve_ram_top(rem)) { + /* Not enough memory in high region to reserve */ + if (ea_size > board_reserve_ram_top(rem)) + ea_size -= board_reserve_ram_top(rem); + else + printf("Error: No enough space for reserved memory.\n"); + } -/* Carve the MC private DRAM block from the end of DRAM */ -#ifdef CONFIG_FSL_MC_ENET - ram_top -= mc_get_dram_block_size(); - ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); + return ea_size; +} + +int dram_init_banksize(void) +{ +#ifdef CONFIG_SYS_DP_DDR_BASE_PHY + phys_size_t dp_ddr_size; #endif - return ram_top; + /* + * gd->ram_size has the total size of DDR memory, less reserved secure + * memory. The DDR extends from low region to high region(s) presuming + * no hole is created with DDR configuration. gd->arch.secure_ram tracks + * the location of secure memory. gd->arch.resv_ram tracks the location + * of reserved memory for Management Complex (MC). + */ + gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; + if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) { + gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE; + gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE; + gd->bd->bi_dram[1].size = gd->ram_size - + CONFIG_SYS_DDR_BLOCK1_SIZE; +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE + if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) { + gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE; + gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size - + CONFIG_SYS_DDR_BLOCK2_SIZE; + gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE; + } +#endif + } else { + gd->bd->bi_dram[0].size = gd->ram_size; + } +#ifdef CONFIG_SYS_MEM_RESERVE_SECURE +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE + if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) { + gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE; + gd->arch.secure_ram = gd->bd->bi_dram[2].start + + gd->bd->bi_dram[2].size; + gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; + gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; + } else +#endif + { + if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) { + gd->bd->bi_dram[1].size -= + CONFIG_SYS_MEM_RESERVE_SECURE; + gd->arch.secure_ram = gd->bd->bi_dram[1].start + + gd->bd->bi_dram[1].size; + gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; + gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; + } else if (gd->bd->bi_dram[0].size > + CONFIG_SYS_MEM_RESERVE_SECURE) { + gd->bd->bi_dram[0].size -= + CONFIG_SYS_MEM_RESERVE_SECURE; + gd->arch.secure_ram = gd->bd->bi_dram[0].start + + gd->bd->bi_dram[0].size; + gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; + gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; + } + } +#endif /* CONFIG_SYS_MEM_RESERVE_SECURE */ + +#if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) + /* Assign memory for MC */ +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE + if (gd->bd->bi_dram[2].size >= + board_reserve_ram_top(gd->bd->bi_dram[2].size)) { + gd->arch.resv_ram = gd->bd->bi_dram[2].start + + gd->bd->bi_dram[2].size - + board_reserve_ram_top(gd->bd->bi_dram[2].size); + } else +#endif + { + if (gd->bd->bi_dram[1].size >= + board_reserve_ram_top(gd->bd->bi_dram[1].size)) { + gd->arch.resv_ram = gd->bd->bi_dram[1].start + + gd->bd->bi_dram[1].size - + board_reserve_ram_top(gd->bd->bi_dram[1].size); + } else if (gd->bd->bi_dram[0].size > + board_reserve_ram_top(gd->bd->bi_dram[0].size)) { + gd->arch.resv_ram = gd->bd->bi_dram[0].start + + gd->bd->bi_dram[0].size - + board_reserve_ram_top(gd->bd->bi_dram[0].size); + } + } +#endif /* CONFIG_FSL_MC_ENET */ + +#ifdef CONFIG_SYS_DP_DDR_BASE_PHY +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE +#error "This SoC shouldn't have DP DDR" +#endif + if (soc_has_dp_ddr()) { + /* initialize DP-DDR here */ + puts("DP-DDR: "); + /* + * DDR controller use 0 as the base address for binding. + * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access. + */ + dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY, + CONFIG_DP_DDR_CTRL, + CONFIG_DP_DDR_NUM_CTRLS, + CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR, + NULL, NULL, NULL); + if (dp_ddr_size) { + gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE; + gd->bd->bi_dram[2].size = dp_ddr_size; + } else { + puts("Not detected"); + } + } +#endif + + return 0; +} + +#if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD) +void efi_add_known_memory(void) +{ + int i; + phys_addr_t ram_start, start; + phys_size_t ram_size; + u64 pages; + + /* Add RAM */ + for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { +#ifdef CONFIG_SYS_DP_DDR_BASE_PHY +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE +#error "This SoC shouldn't have DP DDR" +#endif + if (i == 2) + continue; /* skip DP-DDR */ +#endif + ram_start = gd->bd->bi_dram[i].start; + ram_size = gd->bd->bi_dram[i].size; +#ifdef CONFIG_RESV_RAM + if (gd->arch.resv_ram >= ram_start && + gd->arch.resv_ram < ram_start + ram_size) + ram_size = gd->arch.resv_ram - ram_start; +#endif + start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; + pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; + + efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, + false); + } +} +#endif + +/* + * Before DDR size is known, early MMU table have DDR mapped as device memory + * to avoid speculative access. To relocate U-Boot to DDR, "normal memory" + * needs to be set for these mappings. + * If a special case configures DDR with holes in the mapping, the holes need + * to be marked as invalid. This is not implemented in this function. + */ +void update_early_mmu_table(void) +{ + if (!gd->arch.tlb_addr) + return; + + if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) { + mmu_change_region_attr( + CONFIG_SYS_SDRAM_BASE, + gd->ram_size, + PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_OUTER_SHARE | + PTE_BLOCK_NS | + PTE_TYPE_VALID); + } else { + mmu_change_region_attr( + CONFIG_SYS_SDRAM_BASE, + CONFIG_SYS_DDR_BLOCK1_SIZE, + PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_OUTER_SHARE | + PTE_BLOCK_NS | + PTE_TYPE_VALID); +#ifdef CONFIG_SYS_DDR_BLOCK3_BASE +#ifndef CONFIG_SYS_DDR_BLOCK2_SIZE +#error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE" +#endif + if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE > + CONFIG_SYS_DDR_BLOCK2_SIZE) { + mmu_change_region_attr( + CONFIG_SYS_DDR_BLOCK2_BASE, + CONFIG_SYS_DDR_BLOCK2_SIZE, + PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_OUTER_SHARE | + PTE_BLOCK_NS | + PTE_TYPE_VALID); + mmu_change_region_attr( + CONFIG_SYS_DDR_BLOCK3_BASE, + gd->ram_size - + CONFIG_SYS_DDR_BLOCK1_SIZE - + CONFIG_SYS_DDR_BLOCK2_SIZE, + PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_OUTER_SHARE | + PTE_BLOCK_NS | + PTE_TYPE_VALID); + } else +#endif + { + mmu_change_region_attr( + CONFIG_SYS_DDR_BLOCK2_BASE, + gd->ram_size - + CONFIG_SYS_DDR_BLOCK1_SIZE, + PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_OUTER_SHARE | + PTE_BLOCK_NS | + PTE_TYPE_VALID); + } + } +} + +__weak int dram_init(void) +{ + fsl_initdram(); +#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD) + /* This will break-before-make MMU for DDR */ + update_early_mmu_table(); +#endif + + return 0; }