1 // SPDX-License-Identifier: GPL-2.0+
10 #include <dm/device-internal.h>
12 #include <dm/uclass.h>
15 #include <asm/arch/sci/sci.h>
16 #include <asm/arch/sys_proto.h>
17 #include <asm/arch-imx/cpu.h>
18 #include <asm/armv8/cpu.h>
19 #include <asm/armv8/mmu.h>
20 #include <asm/mach-imx/boot_mode.h>
22 DECLARE_GLOBAL_DATA_PTR;
24 #define BT_PASSOVER_TAG 0x504F
25 struct pass_over_info_t *get_pass_over_info(void)
27 struct pass_over_info_t *p =
28 (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
30 if (p->barker != BT_PASSOVER_TAG ||
31 p->len != sizeof(struct pass_over_info_t))
37 int arch_cpu_init(void)
39 #ifdef CONFIG_SPL_BUILD
40 struct pass_over_info_t *pass_over;
42 if (is_soc_rev(CHIP_REV_A)) {
43 pass_over = get_pass_over_info();
44 if (pass_over && pass_over->g_ap_mu == 0) {
46 * When ap_mu is 0, means the U-Boot booted
47 * from first container
49 sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
57 int arch_cpu_init_dm(void)
62 node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
64 ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
66 printf("could not get scu %d\n", ret);
73 int print_bootinfo(void)
75 enum boot_device bt_dev = get_boot_device();
110 printf("Unknown device %u\n", bt_dev);
117 enum boot_device get_boot_device(void)
119 enum boot_device boot_dev = SD1_BOOT;
123 sc_misc_get_boot_dev(-1, &dev_rsrc);
127 boot_dev = MMC1_BOOT;
136 boot_dev = NAND_BOOT;
139 boot_dev = FLEXSPI_BOOT;
142 boot_dev = SATA_BOOT;
156 #ifdef CONFIG_ENV_IS_IN_MMC
157 __weak int board_mmc_get_env_dev(int devno)
159 return CONFIG_SYS_MMC_ENV_DEV;
162 int mmc_get_env_dev(void)
167 sc_misc_get_boot_dev(-1, &dev_rsrc);
180 /* If not boot from sd/mmc, use default value */
181 return CONFIG_SYS_MMC_ENV_DEV;
184 return board_mmc_get_env_dev(devno);
188 #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
190 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
191 sc_faddr_t *addr_end)
193 sc_faddr_t start, end;
197 owned = sc_rm_is_memreg_owned(-1, mr);
199 ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
201 printf("Memreg get info failed, %d\n", ret);
204 debug("0x%llx -- 0x%llx\n", start, end);
214 phys_size_t get_effective_memsize(void)
217 sc_faddr_t start, end, end1;
220 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
222 for (mr = 0; mr < 64; mr++) {
223 err = get_owned_memreg(mr, &start, &end);
225 start = roundup(start, MEMSTART_ALIGNMENT);
226 /* Too small memory region, not use it */
230 /* Find the memory region runs the U-Boot */
231 if (start >= PHYS_SDRAM_1 && start <= end1 &&
232 (start <= CONFIG_SYS_TEXT_BASE &&
233 end >= CONFIG_SYS_TEXT_BASE)) {
234 if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
236 return (end - PHYS_SDRAM_1 + 1);
238 return PHYS_SDRAM_1_SIZE;
243 return PHYS_SDRAM_1_SIZE;
249 sc_faddr_t start, end, end1, end2;
252 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
253 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
254 for (mr = 0; mr < 64; mr++) {
255 err = get_owned_memreg(mr, &start, &end);
257 start = roundup(start, MEMSTART_ALIGNMENT);
258 /* Too small memory region, not use it */
262 if (start >= PHYS_SDRAM_1 && start <= end1) {
263 if ((end + 1) <= end1)
264 gd->ram_size += end - start + 1;
266 gd->ram_size += end1 - start;
267 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
268 if ((end + 1) <= end2)
269 gd->ram_size += end - start + 1;
271 gd->ram_size += end2 - start;
276 /* If error, set to the default value */
278 gd->ram_size = PHYS_SDRAM_1_SIZE;
279 gd->ram_size += PHYS_SDRAM_2_SIZE;
284 static void dram_bank_sort(int current_bank)
289 while (current_bank > 0) {
290 if (gd->bd->bi_dram[current_bank - 1].start >
291 gd->bd->bi_dram[current_bank].start) {
292 start = gd->bd->bi_dram[current_bank - 1].start;
293 size = gd->bd->bi_dram[current_bank - 1].size;
295 gd->bd->bi_dram[current_bank - 1].start =
296 gd->bd->bi_dram[current_bank].start;
297 gd->bd->bi_dram[current_bank - 1].size =
298 gd->bd->bi_dram[current_bank].size;
300 gd->bd->bi_dram[current_bank].start = start;
301 gd->bd->bi_dram[current_bank].size = size;
307 int dram_init_banksize(void)
310 sc_faddr_t start, end, end1, end2;
314 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
315 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
317 for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
318 err = get_owned_memreg(mr, &start, &end);
320 start = roundup(start, MEMSTART_ALIGNMENT);
321 if (start > end) /* Small memory region, no use it */
324 if (start >= PHYS_SDRAM_1 && start <= end1) {
325 gd->bd->bi_dram[i].start = start;
327 if ((end + 1) <= end1)
328 gd->bd->bi_dram[i].size =
331 gd->bd->bi_dram[i].size = end1 - start;
335 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
336 gd->bd->bi_dram[i].start = start;
338 if ((end + 1) <= end2)
339 gd->bd->bi_dram[i].size =
342 gd->bd->bi_dram[i].size = end2 - start;
350 /* If error, set to the default value */
352 gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
353 gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
354 gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
355 gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
361 static u64 get_block_attrs(sc_faddr_t addr_start)
363 u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
364 PTE_BLOCK_PXN | PTE_BLOCK_UXN;
366 if ((addr_start >= PHYS_SDRAM_1 &&
367 addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
368 (addr_start >= PHYS_SDRAM_2 &&
369 addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
370 return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
375 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
377 sc_faddr_t end1, end2;
379 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
380 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
382 if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
383 if ((addr_end + 1) > end1)
384 return end1 - addr_start;
385 } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
386 if ((addr_end + 1) > end2)
387 return end2 - addr_start;
390 return (addr_end - addr_start + 1);
393 #define MAX_PTE_ENTRIES 512
394 #define MAX_MEM_MAP_REGIONS 16
396 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
397 struct mm_region *mem_map = imx8_mem_map;
399 void enable_caches(void)
402 sc_faddr_t start, end;
405 /* Create map for registers access from 0x1c000000 to 0x80000000*/
406 imx8_mem_map[0].virt = 0x1c000000UL;
407 imx8_mem_map[0].phys = 0x1c000000UL;
408 imx8_mem_map[0].size = 0x64000000UL;
409 imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
410 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
413 for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
414 err = get_owned_memreg(mr, &start, &end);
416 imx8_mem_map[i].virt = start;
417 imx8_mem_map[i].phys = start;
418 imx8_mem_map[i].size = get_block_size(start, end);
419 imx8_mem_map[i].attrs = get_block_attrs(start);
424 if (i < MAX_MEM_MAP_REGIONS) {
425 imx8_mem_map[i].size = 0;
426 imx8_mem_map[i].attrs = 0;
428 puts("Error, need more MEM MAP REGIONS reserved\n");
433 for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
434 debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
435 i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
436 imx8_mem_map[i].size, imx8_mem_map[i].attrs);
443 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
444 u64 get_page_table_size(void)
446 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
450 * For each memory region, the max table size:
451 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
453 size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
456 * We need to duplicate our page table once to have an emergency pt to
457 * resort to when splitting page tables later on
462 * We may need to split page tables later on if dcache settings change,
463 * so reserve up to 4 (random pick) page tables for that.
471 #define FUSE_MAC0_WORD0 708
472 #define FUSE_MAC0_WORD1 709
473 #define FUSE_MAC1_WORD0 710
474 #define FUSE_MAC1_WORD1 711
476 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
478 u32 word[2], val[2] = {};
482 word[0] = FUSE_MAC0_WORD0;
483 word[1] = FUSE_MAC0_WORD1;
485 word[0] = FUSE_MAC1_WORD0;
486 word[1] = FUSE_MAC1_WORD1;
489 for (i = 0; i < 2; i++) {
490 ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
496 mac[1] = val[0] >> 8;
497 mac[2] = val[0] >> 16;
498 mac[3] = val[0] >> 24;
500 mac[5] = val[1] >> 8;
502 debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
503 __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
506 printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
509 u32 get_cpu_rev(void)
514 ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
518 rev = (id >> 5) & 0xf;
519 id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
521 return (id << 12) | rev;
524 #if CONFIG_IS_ENABLED(CPU)
525 struct cpu_imx_platdata {
533 const char *get_imx8_type(u32 imxtype)
536 case MXC_CPU_IMX8QXP:
537 case MXC_CPU_IMX8QXP_A0:
546 const char *get_imx8_rev(u32 rev)
558 const char *get_core_name(void)
562 else if (is_cortex_a53())
564 else if (is_cortex_a72())
570 #if IS_ENABLED(CONFIG_IMX_SCU_THERMAL)
571 static int cpu_imx_get_temp(void)
573 struct udevice *thermal_dev;
576 ret = uclass_get_device_by_name(UCLASS_THERMAL, "cpu-thermal0",
580 ret = thermal_get_temp(thermal_dev, &cpu_tmp);
590 static int cpu_imx_get_temp(void)
596 int cpu_imx_get_desc(struct udevice *dev, char *buf, int size)
598 struct cpu_imx_platdata *plat = dev_get_platdata(dev);
604 ret = snprintf(buf, size, "NXP i.MX8%s Rev%s %s at %u MHz",
605 plat->type, plat->rev, plat->name, plat->freq_mhz);
607 if (IS_ENABLED(CONFIG_IMX_SCU_THERMAL)) {
610 ret = snprintf(buf, size, " at %dC", cpu_imx_get_temp());
613 snprintf(buf + ret, size - ret, "\n");
618 static int cpu_imx_get_info(struct udevice *dev, struct cpu_info *info)
620 struct cpu_imx_platdata *plat = dev_get_platdata(dev);
622 info->cpu_freq = plat->freq_mhz * 1000;
623 info->features = BIT(CPU_FEAT_L1_CACHE) | BIT(CPU_FEAT_MMU);
627 static int cpu_imx_get_count(struct udevice *dev)
632 static int cpu_imx_get_vendor(struct udevice *dev, char *buf, int size)
634 snprintf(buf, size, "NXP");
638 static const struct cpu_ops cpu_imx8_ops = {
639 .get_desc = cpu_imx_get_desc,
640 .get_info = cpu_imx_get_info,
641 .get_count = cpu_imx_get_count,
642 .get_vendor = cpu_imx_get_vendor,
645 static const struct udevice_id cpu_imx8_ids[] = {
646 { .compatible = "arm,cortex-a35" },
647 { .compatible = "arm,cortex-a53" },
651 static ulong imx8_get_cpu_rate(void)
655 int type = is_cortex_a35() ? SC_R_A35 : is_cortex_a53() ?
658 ret = sc_pm_get_clock_rate(-1, type, SC_PM_CLK_CPU,
659 (sc_pm_clock_rate_t *)&rate);
661 printf("Could not read CPU frequency: %d\n", ret);
668 static int imx8_cpu_probe(struct udevice *dev)
670 struct cpu_imx_platdata *plat = dev_get_platdata(dev);
673 cpurev = get_cpu_rev();
674 plat->cpurev = cpurev;
675 plat->name = get_core_name();
676 plat->rev = get_imx8_rev(cpurev & 0xFFF);
677 plat->type = get_imx8_type((cpurev & 0xFF000) >> 12);
678 plat->freq_mhz = imx8_get_cpu_rate() / 1000000;
682 U_BOOT_DRIVER(cpu_imx8_drv) = {
685 .of_match = cpu_imx8_ids,
686 .ops = &cpu_imx8_ops,
687 .probe = imx8_cpu_probe,
688 .platdata_auto_alloc_size = sizeof(struct cpu_imx_platdata),
689 .flags = DM_FLAG_PRE_RELOC,