X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Ffsl-layerscape%2Fcpu.c;h=c11341a1d380eb85545946d986f86b8e015f7dc6;hb=6cc04547cb3bbd3a3d78947f200acbae19e3c67f;hp=b3f5c2f641fe311a37e2bf9bbdba6c00d0b1a944;hpb=6eb32a03e0da4b6bdd09d114738c5f2bfe011459;p=platform%2Fkernel%2Fu-boot.git diff --git a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c index b3f5c2f..c11341a 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c +++ b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c @@ -1,17 +1,23 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright 2017-2019 NXP + * Copyright 2017-2021 NXP * Copyright 2014-2015 Freescale Semiconductor, Inc. */ #include +#include #include #include -#include #include #include +#include +#include #include +#include +#include #include +#include +#include #include #include #include @@ -30,6 +36,7 @@ #endif #include #ifdef CONFIG_SYS_FSL_DDR +#include #include #endif #include @@ -43,6 +50,7 @@ #endif #endif #include +#include DECLARE_GLOBAL_DATA_PTR; @@ -75,6 +83,9 @@ static struct cpu_type cpu_type_list[] = { CPU_TYPE_ENTRY(LX2160A, LX2160A, 16), CPU_TYPE_ENTRY(LX2120A, LX2120A, 12), CPU_TYPE_ENTRY(LX2080A, LX2080A, 8), + CPU_TYPE_ENTRY(LX2162A, LX2162A, 16), + CPU_TYPE_ENTRY(LX2122A, LX2122A, 12), + CPU_TYPE_ENTRY(LX2082A, LX2082A, 8), }; #define EARLY_PGTABLE_SIZE 0x5000 @@ -85,11 +96,11 @@ static struct mm_region early_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, + { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE, SYS_FSL_OCRAM_SPACE_SIZE, PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE }, - { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, + { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_SIZE1, PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE}, #ifdef CONFIG_FSL_IFC @@ -148,7 +159,7 @@ static struct mm_region early_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, + { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE, SYS_FSL_OCRAM_SPACE_SIZE, PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE }, @@ -157,7 +168,7 @@ static struct mm_region early_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, + { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_SIZE, PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE }, @@ -193,7 +204,7 @@ static struct mm_region final_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, + { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE, SYS_FSL_OCRAM_SPACE_SIZE, PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE }, @@ -202,12 +213,12 @@ static struct mm_region final_map[] = { PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS }, - { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, + { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_SIZE1, PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2, + { CFG_SYS_FSL_QSPI_BASE2, CFG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_SIZE2, PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN @@ -322,7 +333,7 @@ static struct mm_region final_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, + { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE, SYS_FSL_OCRAM_SPACE_SIZE, PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE }, @@ -331,7 +342,7 @@ static struct mm_region final_map[] = { PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN }, - { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, + { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_SIZE, PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN @@ -390,7 +401,7 @@ struct mm_region *mem_map = early_map; void cpu_name(char *name) { - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); unsigned int i, svr, ver; svr = gur_in32(&gur->svr); @@ -399,7 +410,7 @@ void cpu_name(char *name) for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { strcpy(name, cpu_type_list[i].name); -#ifdef CONFIG_ARCH_LX2160A +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A) if (IS_C_PROCESSOR(svr)) strcat(name, "C"); #endif @@ -419,7 +430,7 @@ void cpu_name(char *name) #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) /* * To start MMU before DDR is available, we create MMU table in SRAM. - * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three + * The base address of SRAM is CFG_SYS_FSL_OCRAM_BASE. We use three * levels of translation tables here to cover 40-bit address space. * We use 4KB granule size, with 40 bits physical address, T0SZ=24 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. @@ -432,7 +443,7 @@ static inline void early_mmu_setup(void) /* global data is already setup, no allocation yet */ if (el == 3) - gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; + gd->arch.tlb_addr = CFG_SYS_FSL_OCRAM_BASE; else gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE; gd->arch.tlb_fillptr = gd->arch.tlb_addr; @@ -443,7 +454,7 @@ static inline void early_mmu_setup(void) /* point TTBR to the new table */ set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - get_tcr(el, NULL, NULL) & + get_tcr(NULL, NULL) & ~(TCR_ORGN_MASK | TCR_IRGN_MASK), MEMORY_ATTRIBUTES); @@ -455,7 +466,7 @@ static void fix_pcie_mmu_map(void) #ifdef CONFIG_ARCH_LS2080A unsigned int i; u32 svr, ver; - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); svr = gur_in32(&gur->svr); ver = SVR_SOC_VER(svr); @@ -598,7 +609,7 @@ static inline void final_mmu_setup(void) invalidate_icache_all(); /* point TTBR to the new table */ - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), + set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL), MEMORY_ATTRIBUTES); set_sctlr(get_sctlr() | CR_M); @@ -758,21 +769,19 @@ enum boot_src __get_boot_src(u32 porsr1) enum boot_src get_boot_src(void) { - struct pt_regs regs; + struct arm_smccc_res res; u32 porsr1 = 0; #if defined(CONFIG_FSL_LSCH3) u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; #elif defined(CONFIG_FSL_LSCH2) - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); #endif if (current_el() == 2) { - regs.regs[0] = SIP_SVC_RCW; - - smc_call(®s); - if (!regs.regs[0]) - porsr1 = regs.regs[1]; + arm_smccc_smc(SIP_SVC_RCW, 0, 0, 0, 0, 0, 0, 0, &res); + if (!res.a0) + porsr1 = res.a1; } if (current_el() == 3 || !porsr1) { @@ -809,7 +818,7 @@ int mmc_get_env_dev(void) } #endif -enum env_location env_get_location(enum env_operation op, int prio) +enum env_location arch_env_get_location(enum env_operation op, int prio) { enum boot_src src = get_boot_src(); enum env_location env_loc = ENVL_NOWHERE; @@ -854,7 +863,7 @@ enum env_location env_get_location(enum env_operation op, int prio) u32 initiator_type(u32 cluster, int init_id) { - struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; u32 type = 0; @@ -867,7 +876,7 @@ u32 initiator_type(u32 cluster, int init_id) u32 cpu_pos_mask(void) { - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); int i = 0; u32 cluster, type, mask = 0; @@ -888,7 +897,7 @@ u32 cpu_pos_mask(void) u32 cpu_mask(void) { - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); int i = 0, count = 0; u32 cluster, type, mask = 0; @@ -921,7 +930,7 @@ int cpu_numcores(void) int fsl_qoriq_core_to_cluster(unsigned int core) { struct ccsr_gur __iomem *gur = - (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); + (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR); int i = 0, count = 0; u32 cluster; @@ -945,7 +954,7 @@ int fsl_qoriq_core_to_cluster(unsigned int core) u32 fsl_qoriq_core_to_type(unsigned int core) { struct ccsr_gur __iomem *gur = - (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); + (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR); int i = 0, count = 0; u32 cluster, type; @@ -970,7 +979,7 @@ u32 fsl_qoriq_core_to_type(unsigned int core) #ifndef CONFIG_FSL_LSCH3 uint get_svr(void) { - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); return gur_in32(&gur->svr); } @@ -979,7 +988,7 @@ uint get_svr(void) #ifdef CONFIG_DISPLAY_CPUINFO int print_cpuinfo(void) { - struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); + struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR); struct sys_info sysinfo; char buf[32]; unsigned int i, core; @@ -1036,13 +1045,13 @@ int print_cpuinfo(void) #endif #ifdef CONFIG_FSL_ESDHC -int cpu_mmc_init(bd_t *bis) +int cpu_mmc_init(struct bd_info *bis) { return fsl_esdhc_mmc_init(bis); } #endif -int cpu_eth_init(bd_t *bis) +int cpu_eth_init(struct bd_info *bis) { int error = 0; @@ -1055,7 +1064,7 @@ int cpu_eth_init(bd_t *bis) return error; } -static inline int check_psci(void) +int check_psci(void) { unsigned int psci_ver; @@ -1071,9 +1080,9 @@ static void config_core_prefetch(void) char *buf = NULL; char buffer[HWCONFIG_BUFFER_SIZE]; const char *prefetch_arg = NULL; + struct arm_smccc_res res; size_t arglen; unsigned int mask; - struct pt_regs regs; if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0) buf = buffer; @@ -1091,11 +1100,10 @@ static void config_core_prefetch(void) } #define SIP_PREFETCH_DISABLE_64 0xC200FF13 - regs.regs[0] = SIP_PREFETCH_DISABLE_64; - regs.regs[1] = mask; - smc_call(®s); + arm_smccc_smc(SIP_PREFETCH_DISABLE_64, mask, 0, 0, 0, 0, 0, 0, + &res); - if (regs.regs[0]) + if (res.a0) printf("Prefetch disable config failed for mask "); else printf("Prefetch disable config passed for mask "); @@ -1139,19 +1147,19 @@ int arch_early_init_r(void) #endif #ifdef CONFIG_SYS_FSL_HAS_RGMII /* some dpmacs in armv8a based freescale layerscape SOCs can be - * configured via both serdes(sgmii, xfi, xlaui etc) bits and via + * configured via both serdes(sgmii, 10gbase-r, xlaui etc) bits and via * EC*_PMUX(rgmii) bits in RCW. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits - * Now if a dpmac is enabled by serdes bits then it takes precedence - * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol - * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII, - * then the dpmac is SGMII and not RGMII. + * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes + * precedence over SerDes protocol. i.e. in LX2160A if we select serdes + * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as + * RGMII, then the dpmac is RGMII and not SGMII. * - * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in - * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled - * or not? if it is (fsl_serdes_init has already enabled the dpmac), - * then don't enable it. + * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init + * function of SOC, the dpmac will be enabled as RGMII even if it was + * also enabled before as SGMII. If ECx_PMUX is not configured for + * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init(). */ fsl_rgmii_init(); #endif @@ -1171,9 +1179,9 @@ int arch_early_init_r(void) int timer_init(void) { - u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; + u32 __iomem *cntcr = (u32 *)CFG_SYS_FSL_TIMER_ADDR; #ifdef CONFIG_FSL_LSCH3 - u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; + u32 __iomem *cltbenr = (u32 *)CFG_SYS_FSL_PMU_CLTBENR; #endif #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \ defined(CONFIG_ARCH_LS1028A) @@ -1221,23 +1229,27 @@ int timer_init(void) return 0; } -__efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; +#if !CONFIG_IS_ENABLED(SYSRESET) +__efi_runtime_data u32 __iomem *rstcr = (u32 *)CFG_SYS_FSL_RST_ADDR; -void __efi_runtime reset_cpu(ulong addr) +void __efi_runtime reset_cpu(void) { - u32 val; +#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A) + /* clear the RST_REQ_MSK and SW_RST_REQ */ + out_le32(rstcr, 0x0); -#ifdef CONFIG_ARCH_LX2160A - val = in_le32(rstcr); - val |= 0x01; - out_le32(rstcr, val); + /* initiate the sw reset request */ + out_le32(rstcr, 0x1); #else + u32 val; + /* Raise RESET_REQ_B */ val = scfg_in32(rstcr); val |= 0x02; scfg_out32(rstcr, val); #endif } +#endif #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET) @@ -1250,7 +1262,7 @@ void __efi_runtime EFIAPI efi_reset_system( case EFI_RESET_COLD: case EFI_RESET_WARM: case EFI_RESET_PLATFORM_SPECIFIC: - reset_cpu(0); + reset_cpu(); break; case EFI_RESET_SHUTDOWN: /* Nothing we can do */ @@ -1333,25 +1345,20 @@ phys_size_t get_effective_memsize(void) #ifdef CONFIG_TFABOOT phys_size_t tfa_get_dram_size(void) { - struct pt_regs regs; - phys_size_t dram_size = 0; + struct arm_smccc_res res; - regs.regs[0] = SMC_DRAM_BANK_INFO; - regs.regs[1] = -1; - - smc_call(®s); - if (regs.regs[0]) + arm_smccc_smc(SMC_DRAM_BANK_INFO, -1, 0, 0, 0, 0, 0, 0, &res); + if (res.a0) return 0; - dram_size = regs.regs[1]; - return dram_size; + return res.a1; } static int tfa_dram_init_banksize(void) { int i = 0, ret = 0; - struct pt_regs regs; phys_size_t dram_size = tfa_get_dram_size(); + struct arm_smccc_res res; debug("dram_size %llx\n", dram_size); @@ -1359,19 +1366,15 @@ static int tfa_dram_init_banksize(void) return -EINVAL; do { - regs.regs[0] = SMC_DRAM_BANK_INFO; - regs.regs[1] = i; - - smc_call(®s); - if (regs.regs[0]) { + arm_smccc_smc(SMC_DRAM_BANK_INFO, i, 0, 0, 0, 0, 0, 0, &res); + if (res.a0) { ret = -EINVAL; break; } - debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1], - regs.regs[2]); - gd->bd->bi_dram[i].start = regs.regs[1]; - gd->bd->bi_dram[i].size = regs.regs[2]; + debug("bank[%d]: start %lx, size %lx\n", i, res.a1, res.a2); + gd->bd->bi_dram[i].start = res.a1; + gd->bd->bi_dram[i].size = res.a2; dram_size -= gd->bd->bi_dram[i].size; @@ -1529,9 +1532,8 @@ int dram_init_banksize(void) void efi_add_known_memory(void) { int i; - phys_addr_t ram_start, start; + phys_addr_t ram_start; phys_size_t ram_size; - u64 pages; /* Add RAM */ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { @@ -1549,11 +1551,8 @@ void efi_add_known_memory(void) gd->arch.resv_ram < ram_start + ram_size) ram_size = gd->arch.resv_ram - ram_start; #endif - start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; - pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; - - efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, - false); + efi_add_memory_map(ram_start, ram_size, + EFI_CONVENTIONAL_MEMORY); } } #endif @@ -1625,12 +1624,14 @@ void update_early_mmu_table(void) __weak int dram_init(void) { +#ifdef CONFIG_SYS_FSL_DDR fsl_initdram(); #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \ defined(CONFIG_SPL_BUILD) /* This will break-before-make MMU for DDR */ update_early_mmu_table(); #endif +#endif return 0; } @@ -1643,6 +1644,14 @@ __weak int serdes_misc_init(void) int arch_misc_init(void) { + if (IS_ENABLED(CONFIG_FSL_CAAM)) { + struct udevice *dev; + int ret; + + ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev); + if (ret) + printf("Failed to initialize caam_jr: %d\n", ret); + } serdes_misc_init(); return 0;