1 // SPDX-License-Identifier: GPL-2.0+
9 #include <dm/device-internal.h>
11 #include <dm/uclass.h>
13 #include <asm/arch/sci/sci.h>
14 #include <asm/arch/sys_proto.h>
15 #include <asm/arch-imx/cpu.h>
16 #include <asm/armv8/cpu.h>
17 #include <asm/armv8/mmu.h>
18 #include <asm/mach-imx/boot_mode.h>
20 DECLARE_GLOBAL_DATA_PTR;
27 ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
31 rev = (id >> 5) & 0xf;
32 id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
34 return (id << 12) | rev;
37 #ifdef CONFIG_DISPLAY_CPUINFO
38 const char *get_imx8_type(u32 imxtype)
48 const char *get_imx8_rev(u32 rev)
60 const char *get_core_name(void)
68 int print_cpuinfo(void)
74 ret = uclass_get_device(UCLASS_CPU, 0, &dev);
78 ret = clk_get_by_index(dev, 0, &cpu_clk);
80 dev_err(dev, "failed to clk\n");
86 cpurev = get_cpu_rev();
88 printf("CPU: Freescale i.MX%s rev%s %s at %ld MHz\n",
89 get_imx8_type((cpurev & 0xFF000) >> 12),
90 get_imx8_rev((cpurev & 0xFFF)),
92 clk_get_rate(&cpu_clk) / 1000000);
98 #define BT_PASSOVER_TAG 0x504F
99 struct pass_over_info_t *get_pass_over_info(void)
101 struct pass_over_info_t *p =
102 (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
104 if (p->barker != BT_PASSOVER_TAG ||
105 p->len != sizeof(struct pass_over_info_t))
111 int arch_cpu_init(void)
113 struct pass_over_info_t *pass_over = get_pass_over_info();
115 if (pass_over && pass_over->g_ap_mu == 0) {
117 * When ap_mu is 0, means the U-Boot booted
118 * from first container
120 sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
126 int arch_cpu_init_dm(void)
128 struct udevice *devp;
131 node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
132 ret = device_bind_driver_to_node(gd->dm_root, "imx8_scu", "imx8_scu",
133 offset_to_ofnode(node), &devp);
136 printf("could not find scu %d\n", ret);
140 ret = device_probe(devp);
142 printf("scu probe failed %d\n", ret);
149 int print_bootinfo(void)
151 enum boot_device bt_dev = get_boot_device();
186 printf("Unknown device %u\n", bt_dev);
193 enum boot_device get_boot_device(void)
195 enum boot_device boot_dev = SD1_BOOT;
199 sc_misc_get_boot_dev(-1, &dev_rsrc);
203 boot_dev = MMC1_BOOT;
212 boot_dev = NAND_BOOT;
215 boot_dev = FLEXSPI_BOOT;
218 boot_dev = SATA_BOOT;
232 #ifdef CONFIG_ENV_IS_IN_MMC
233 __weak int board_mmc_get_env_dev(int devno)
235 return CONFIG_SYS_MMC_ENV_DEV;
238 int mmc_get_env_dev(void)
243 sc_misc_get_boot_dev(-1, &dev_rsrc);
256 /* If not boot from sd/mmc, use default value */
257 return CONFIG_SYS_MMC_ENV_DEV;
260 return board_mmc_get_env_dev(devno);
264 #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
266 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
267 sc_faddr_t *addr_end)
269 sc_faddr_t start, end;
273 owned = sc_rm_is_memreg_owned(-1, mr);
275 ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
277 printf("Memreg get info failed, %d\n", ret);
280 debug("0x%llx -- 0x%llx\n", start, end);
290 phys_size_t get_effective_memsize(void)
293 sc_faddr_t start, end, end1;
296 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
298 for (mr = 0; mr < 64; mr++) {
299 err = get_owned_memreg(mr, &start, &end);
301 start = roundup(start, MEMSTART_ALIGNMENT);
302 /* Too small memory region, not use it */
306 /* Find the memory region runs the U-Boot */
307 if (start >= PHYS_SDRAM_1 && start <= end1 &&
308 (start <= CONFIG_SYS_TEXT_BASE &&
309 end >= CONFIG_SYS_TEXT_BASE)) {
310 if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
312 return (end - PHYS_SDRAM_1 + 1);
314 return PHYS_SDRAM_1_SIZE;
319 return PHYS_SDRAM_1_SIZE;
325 sc_faddr_t start, end, end1, end2;
328 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
329 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
330 for (mr = 0; mr < 64; mr++) {
331 err = get_owned_memreg(mr, &start, &end);
333 start = roundup(start, MEMSTART_ALIGNMENT);
334 /* Too small memory region, not use it */
338 if (start >= PHYS_SDRAM_1 && start <= end1) {
339 if ((end + 1) <= end1)
340 gd->ram_size += end - start + 1;
342 gd->ram_size += end1 - start;
343 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
344 if ((end + 1) <= end2)
345 gd->ram_size += end - start + 1;
347 gd->ram_size += end2 - start;
352 /* If error, set to the default value */
354 gd->ram_size = PHYS_SDRAM_1_SIZE;
355 gd->ram_size += PHYS_SDRAM_2_SIZE;
360 static void dram_bank_sort(int current_bank)
365 while (current_bank > 0) {
366 if (gd->bd->bi_dram[current_bank - 1].start >
367 gd->bd->bi_dram[current_bank].start) {
368 start = gd->bd->bi_dram[current_bank - 1].start;
369 size = gd->bd->bi_dram[current_bank - 1].size;
371 gd->bd->bi_dram[current_bank - 1].start =
372 gd->bd->bi_dram[current_bank].start;
373 gd->bd->bi_dram[current_bank - 1].size =
374 gd->bd->bi_dram[current_bank].size;
376 gd->bd->bi_dram[current_bank].start = start;
377 gd->bd->bi_dram[current_bank].size = size;
383 int dram_init_banksize(void)
386 sc_faddr_t start, end, end1, end2;
390 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
391 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
393 for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
394 err = get_owned_memreg(mr, &start, &end);
396 start = roundup(start, MEMSTART_ALIGNMENT);
397 if (start > end) /* Small memory region, no use it */
400 if (start >= PHYS_SDRAM_1 && start <= end1) {
401 gd->bd->bi_dram[i].start = start;
403 if ((end + 1) <= end1)
404 gd->bd->bi_dram[i].size =
407 gd->bd->bi_dram[i].size = end1 - start;
411 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
412 gd->bd->bi_dram[i].start = start;
414 if ((end + 1) <= end2)
415 gd->bd->bi_dram[i].size =
418 gd->bd->bi_dram[i].size = end2 - start;
426 /* If error, set to the default value */
428 gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
429 gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
430 gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
431 gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
437 static u64 get_block_attrs(sc_faddr_t addr_start)
439 u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
440 PTE_BLOCK_PXN | PTE_BLOCK_UXN;
442 if ((addr_start >= PHYS_SDRAM_1 &&
443 addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
444 (addr_start >= PHYS_SDRAM_2 &&
445 addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
446 return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
451 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
453 sc_faddr_t end1, end2;
455 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
456 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
458 if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
459 if ((addr_end + 1) > end1)
460 return end1 - addr_start;
461 } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
462 if ((addr_end + 1) > end2)
463 return end2 - addr_start;
466 return (addr_end - addr_start + 1);
469 #define MAX_PTE_ENTRIES 512
470 #define MAX_MEM_MAP_REGIONS 16
472 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
473 struct mm_region *mem_map = imx8_mem_map;
475 void enable_caches(void)
478 sc_faddr_t start, end;
481 /* Create map for registers access from 0x1c000000 to 0x80000000*/
482 imx8_mem_map[0].virt = 0x1c000000UL;
483 imx8_mem_map[0].phys = 0x1c000000UL;
484 imx8_mem_map[0].size = 0x64000000UL;
485 imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
486 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
489 for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
490 err = get_owned_memreg(mr, &start, &end);
492 imx8_mem_map[i].virt = start;
493 imx8_mem_map[i].phys = start;
494 imx8_mem_map[i].size = get_block_size(start, end);
495 imx8_mem_map[i].attrs = get_block_attrs(start);
500 if (i < MAX_MEM_MAP_REGIONS) {
501 imx8_mem_map[i].size = 0;
502 imx8_mem_map[i].attrs = 0;
504 puts("Error, need more MEM MAP REGIONS reserved\n");
509 for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
510 debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
511 i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
512 imx8_mem_map[i].size, imx8_mem_map[i].attrs);
519 #ifndef CONFIG_SYS_DCACHE_OFF
520 u64 get_page_table_size(void)
522 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
526 * For each memory region, the max table size:
527 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
529 size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
532 * We need to duplicate our page table once to have an emergency pt to
533 * resort to when splitting page tables later on
538 * We may need to split page tables later on if dcache settings change,
539 * so reserve up to 4 (random pick) page tables for that.