1 // SPDX-License-Identifier: GPL-2.0+
13 #include <asm/cache.h>
14 #include <dm/device-internal.h>
16 #include <dm/uclass.h>
20 #include <asm/arch/sci/sci.h>
21 #include <asm/arch/sys_proto.h>
22 #include <asm/arch-imx/cpu.h>
23 #include <asm/armv8/cpu.h>
24 #include <asm/armv8/mmu.h>
25 #include <asm/setup.h>
26 #include <asm/mach-imx/boot_mode.h>
29 DECLARE_GLOBAL_DATA_PTR;
31 #define BT_PASSOVER_TAG 0x504F
32 struct pass_over_info_t *get_pass_over_info(void)
34 struct pass_over_info_t *p =
35 (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
37 if (p->barker != BT_PASSOVER_TAG ||
38 p->len != sizeof(struct pass_over_info_t))
44 int arch_cpu_init(void)
46 #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_RECOVER_DATA_SECTION)
47 spl_save_restore_data();
50 #ifdef CONFIG_SPL_BUILD
51 struct pass_over_info_t *pass_over;
53 if (is_soc_rev(CHIP_REV_A)) {
54 pass_over = get_pass_over_info();
55 if (pass_over && pass_over->g_ap_mu == 0) {
57 * When ap_mu is 0, means the U-Boot booted
58 * from first container
60 sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
68 int arch_cpu_init_dm(void)
73 node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
75 ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
77 printf("could not get scu %d\n", ret);
82 ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
91 int print_bootinfo(void)
93 enum boot_device bt_dev = get_boot_device();
128 printf("Unknown device %u\n", bt_dev);
135 enum boot_device get_boot_device(void)
137 enum boot_device boot_dev = SD1_BOOT;
141 sc_misc_get_boot_dev(-1, &dev_rsrc);
145 boot_dev = MMC1_BOOT;
154 boot_dev = NAND_BOOT;
157 boot_dev = FLEXSPI_BOOT;
160 boot_dev = SATA_BOOT;
174 #ifdef CONFIG_SERIAL_TAG
175 #define FUSE_UNIQUE_ID_WORD0 16
176 #define FUSE_UNIQUE_ID_WORD1 17
177 void get_board_serial(struct tag_serialnr *serialnr)
180 u32 val1 = 0, val2 = 0;
186 word1 = FUSE_UNIQUE_ID_WORD0;
187 word2 = FUSE_UNIQUE_ID_WORD1;
189 err = sc_misc_otp_fuse_read(-1, word1, &val1);
190 if (err != SC_ERR_NONE) {
191 printf("%s fuse %d read error: %d\n", __func__, word1, err);
195 err = sc_misc_otp_fuse_read(-1, word2, &val2);
196 if (err != SC_ERR_NONE) {
197 printf("%s fuse %d read error: %d\n", __func__, word2, err);
200 serialnr->low = val1;
201 serialnr->high = val2;
203 #endif /*CONFIG_SERIAL_TAG*/
205 #ifdef CONFIG_ENV_IS_IN_MMC
206 __weak int board_mmc_get_env_dev(int devno)
208 return CONFIG_SYS_MMC_ENV_DEV;
211 int mmc_get_env_dev(void)
216 sc_misc_get_boot_dev(-1, &dev_rsrc);
229 /* If not boot from sd/mmc, use default value */
230 return CONFIG_SYS_MMC_ENV_DEV;
233 return board_mmc_get_env_dev(devno);
237 #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
239 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
240 sc_faddr_t *addr_end)
242 sc_faddr_t start, end;
246 owned = sc_rm_is_memreg_owned(-1, mr);
248 ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
250 printf("Memreg get info failed, %d\n", ret);
253 debug("0x%llx -- 0x%llx\n", start, end);
263 __weak void board_mem_get_layout(u64 *phys_sdram_1_start,
264 u64 *phys_sdram_1_size,
265 u64 *phys_sdram_2_start,
266 u64 *phys_sdram_2_size)
268 *phys_sdram_1_start = PHYS_SDRAM_1;
269 *phys_sdram_1_size = PHYS_SDRAM_1_SIZE;
270 *phys_sdram_2_start = PHYS_SDRAM_2;
271 *phys_sdram_2_size = PHYS_SDRAM_2_SIZE;
274 phys_size_t get_effective_memsize(void)
277 sc_faddr_t start, end, end1, start_aligned;
278 u64 phys_sdram_1_start, phys_sdram_1_size;
279 u64 phys_sdram_2_start, phys_sdram_2_size;
282 board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
283 &phys_sdram_2_start, &phys_sdram_2_size);
286 end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
287 for (mr = 0; mr < 64; mr++) {
288 err = get_owned_memreg(mr, &start, &end);
290 start_aligned = roundup(start, MEMSTART_ALIGNMENT);
291 /* Too small memory region, not use it */
292 if (start_aligned > end)
295 /* Find the memory region runs the U-Boot */
296 if (start >= phys_sdram_1_start && start <= end1 &&
297 (start <= CONFIG_SYS_TEXT_BASE &&
298 end >= CONFIG_SYS_TEXT_BASE)) {
300 ((sc_faddr_t)phys_sdram_1_start +
302 return (end - phys_sdram_1_start + 1);
304 return phys_sdram_1_size;
309 return phys_sdram_1_size;
315 sc_faddr_t start, end, end1, end2;
316 u64 phys_sdram_1_start, phys_sdram_1_size;
317 u64 phys_sdram_2_start, phys_sdram_2_size;
320 board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
321 &phys_sdram_2_start, &phys_sdram_2_size);
323 end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
324 end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
325 for (mr = 0; mr < 64; mr++) {
326 err = get_owned_memreg(mr, &start, &end);
328 start = roundup(start, MEMSTART_ALIGNMENT);
329 /* Too small memory region, not use it */
333 if (start >= phys_sdram_1_start && start <= end1) {
334 if ((end + 1) <= end1)
335 gd->ram_size += end - start + 1;
337 gd->ram_size += end1 - start;
338 } else if (start >= phys_sdram_2_start &&
340 if ((end + 1) <= end2)
341 gd->ram_size += end - start + 1;
343 gd->ram_size += end2 - start;
348 /* If error, set to the default value */
350 gd->ram_size = phys_sdram_1_size;
351 gd->ram_size += phys_sdram_2_size;
356 static void dram_bank_sort(int current_bank)
361 while (current_bank > 0) {
362 if (gd->bd->bi_dram[current_bank - 1].start >
363 gd->bd->bi_dram[current_bank].start) {
364 start = gd->bd->bi_dram[current_bank - 1].start;
365 size = gd->bd->bi_dram[current_bank - 1].size;
367 gd->bd->bi_dram[current_bank - 1].start =
368 gd->bd->bi_dram[current_bank].start;
369 gd->bd->bi_dram[current_bank - 1].size =
370 gd->bd->bi_dram[current_bank].size;
372 gd->bd->bi_dram[current_bank].start = start;
373 gd->bd->bi_dram[current_bank].size = size;
379 int dram_init_banksize(void)
382 sc_faddr_t start, end, end1, end2;
384 u64 phys_sdram_1_start, phys_sdram_1_size;
385 u64 phys_sdram_2_start, phys_sdram_2_size;
388 board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
389 &phys_sdram_2_start, &phys_sdram_2_size);
391 end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
392 end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
393 for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
394 err = get_owned_memreg(mr, &start, &end);
396 start = roundup(start, MEMSTART_ALIGNMENT);
397 if (start > end) /* Small memory region, no use it */
400 if (start >= phys_sdram_1_start && start <= end1) {
401 gd->bd->bi_dram[i].start = start;
403 if ((end + 1) <= end1)
404 gd->bd->bi_dram[i].size =
407 gd->bd->bi_dram[i].size = end1 - start;
411 } else if (start >= phys_sdram_2_start && start <= end2) {
412 gd->bd->bi_dram[i].start = start;
414 if ((end + 1) <= end2)
415 gd->bd->bi_dram[i].size =
418 gd->bd->bi_dram[i].size = end2 - start;
426 /* If error, set to the default value */
428 gd->bd->bi_dram[0].start = phys_sdram_1_start;
429 gd->bd->bi_dram[0].size = phys_sdram_1_size;
430 gd->bd->bi_dram[1].start = phys_sdram_2_start;
431 gd->bd->bi_dram[1].size = phys_sdram_2_size;
437 static u64 get_block_attrs(sc_faddr_t addr_start)
439 u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
440 PTE_BLOCK_PXN | PTE_BLOCK_UXN;
441 u64 phys_sdram_1_start, phys_sdram_1_size;
442 u64 phys_sdram_2_start, phys_sdram_2_size;
444 board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
445 &phys_sdram_2_start, &phys_sdram_2_size);
447 if ((addr_start >= phys_sdram_1_start &&
448 addr_start <= ((sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size)) ||
449 (addr_start >= phys_sdram_2_start &&
450 addr_start <= ((sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size)))
451 return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
456 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
458 sc_faddr_t end1, end2;
459 u64 phys_sdram_1_start, phys_sdram_1_size;
460 u64 phys_sdram_2_start, phys_sdram_2_size;
462 board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
463 &phys_sdram_2_start, &phys_sdram_2_size);
466 end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
467 end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
469 if (addr_start >= phys_sdram_1_start && addr_start <= end1) {
470 if ((addr_end + 1) > end1)
471 return end1 - addr_start;
472 } else if (addr_start >= phys_sdram_2_start && addr_start <= end2) {
473 if ((addr_end + 1) > end2)
474 return end2 - addr_start;
477 return (addr_end - addr_start + 1);
480 #define MAX_PTE_ENTRIES 512
481 #define MAX_MEM_MAP_REGIONS 16
483 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
484 struct mm_region *mem_map = imx8_mem_map;
486 void enable_caches(void)
489 sc_faddr_t start, end;
492 /* Create map for registers access from 0x1c000000 to 0x80000000*/
493 imx8_mem_map[0].virt = 0x1c000000UL;
494 imx8_mem_map[0].phys = 0x1c000000UL;
495 imx8_mem_map[0].size = 0x64000000UL;
496 imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
497 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
500 for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
501 err = get_owned_memreg(mr, &start, &end);
503 imx8_mem_map[i].virt = start;
504 imx8_mem_map[i].phys = start;
505 imx8_mem_map[i].size = get_block_size(start, end);
506 imx8_mem_map[i].attrs = get_block_attrs(start);
511 if (i < MAX_MEM_MAP_REGIONS) {
512 imx8_mem_map[i].size = 0;
513 imx8_mem_map[i].attrs = 0;
515 puts("Error, need more MEM MAP REGIONS reserved\n");
520 for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
521 debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
522 i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
523 imx8_mem_map[i].size, imx8_mem_map[i].attrs);
530 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
531 u64 get_page_table_size(void)
533 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
537 * For each memory region, the max table size:
538 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
540 size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
543 * We need to duplicate our page table once to have an emergency pt to
544 * resort to when splitting page tables later on
549 * We may need to split page tables later on if dcache settings change,
550 * so reserve up to 4 (random pick) page tables for that.
558 #if defined(CONFIG_IMX8QM)
559 #define FUSE_MAC0_WORD0 452
560 #define FUSE_MAC0_WORD1 453
561 #define FUSE_MAC1_WORD0 454
562 #define FUSE_MAC1_WORD1 455
563 #elif defined(CONFIG_IMX8QXP)
564 #define FUSE_MAC0_WORD0 708
565 #define FUSE_MAC0_WORD1 709
566 #define FUSE_MAC1_WORD0 710
567 #define FUSE_MAC1_WORD1 711
570 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
572 u32 word[2], val[2] = {};
576 word[0] = FUSE_MAC0_WORD0;
577 word[1] = FUSE_MAC0_WORD1;
579 word[0] = FUSE_MAC1_WORD0;
580 word[1] = FUSE_MAC1_WORD1;
583 for (i = 0; i < 2; i++) {
584 ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
590 mac[1] = val[0] >> 8;
591 mac[2] = val[0] >> 16;
592 mac[3] = val[0] >> 24;
594 mac[5] = val[1] >> 8;
596 debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
597 __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
600 printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
603 u32 get_cpu_rev(void)
608 ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
612 rev = (id >> 5) & 0xf;
613 id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
615 return (id << 12) | rev;
618 void board_boot_order(u32 *spl_boot_list)
620 spl_boot_list[0] = spl_boot_device();
622 if (spl_boot_list[0] == BOOT_DEVICE_SPI) {
623 /* Check whether we own the flexspi0, if not, use NOR boot */
624 if (!sc_rm_is_resource_owned(-1, SC_R_FSPI_0))
625 spl_boot_list[0] = BOOT_DEVICE_NOR;
629 bool m4_parts_booted(void)
631 sc_rm_pt_t m4_parts[2];
634 err = sc_rm_get_resource_owner(-1, SC_R_M4_0_PID0, &m4_parts[0]);
636 printf("%s get resource [%d] owner error: %d\n", __func__,
637 SC_R_M4_0_PID0, err);
641 if (sc_pm_is_partition_started(-1, m4_parts[0]))
645 err = sc_rm_get_resource_owner(-1, SC_R_M4_1_PID0, &m4_parts[1]);
647 printf("%s get resource [%d] owner error: %d\n",
648 __func__, SC_R_M4_1_PID0, err);
652 if (sc_pm_is_partition_started(-1, m4_parts[1]))