1 // SPDX-License-Identifier: GPL-2.0+
3 * K3: Common Architecture initialization
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
15 #include <asm/global_data.h>
18 #include <remoteproc.h>
19 #include <asm/cache.h>
20 #include <linux/soc/ti/ti_sci_protocol.h>
21 #include <fdt_support.h>
22 #include <asm/arch/sys_proto.h>
23 #include <asm/hardware.h>
25 #include <fs_loader.h>
31 #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
40 #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
41 static const char *image_os_match[IMAGE_AMT] = {
42 "arm-trusted-firmware",
49 static struct image_info fit_image_info[IMAGE_AMT];
52 struct ti_sci_handle *get_ti_sci_handle(void)
57 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
58 DM_DRIVER_GET(ti_sci), &dev);
60 panic("Failed to get SYSFW (%d)\n", ret);
62 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
65 void k3_sysfw_print_ver(void)
67 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
68 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
71 * Output System Firmware version info. Note that since the
72 * 'firmware_description' field is not guaranteed to be zero-
73 * terminated we manually add a \0 terminator if needed. Further
74 * note that we intentionally no longer rely on the extended
75 * printf() formatter '%.*s' to not having to require a more
76 * full-featured printf() implementation.
78 strncpy(fw_desc, ti_sci->version.firmware_description,
79 sizeof(ti_sci->version.firmware_description));
80 fw_desc[sizeof(fw_desc) - 1] = '\0';
82 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
83 ti_sci->version.abi_major, ti_sci->version.abi_minor,
84 ti_sci->version.firmware_revision, fw_desc);
87 void mmr_unlock(phys_addr_t base, u32 partition)
89 /* Translate the base address */
90 phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
92 /* Unlock the requested partition if locked using two-step sequence */
93 writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
94 writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
97 bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
99 if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
102 return data->num_components > 1;
105 DECLARE_GLOBAL_DATA_PTR;
107 #ifdef CONFIG_K3_EARLY_CONS
108 int early_console_init(void)
113 gd->baudrate = CONFIG_BAUDRATE;
115 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
118 printf("Error getting serial dev for early console! (%d)\n",
123 gd->cur_serial_dev = dev;
124 gd->flags |= GD_FLG_SERIAL_READY;
125 gd->have_console = 1;
131 #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
135 #ifdef CONFIG_SPL_ENV_SUPPORT
140 switch (spl_boot_device()) {
141 case BOOT_DEVICE_MMC2:
142 part = env_get("bootpart");
143 env_set("storage_interface", "mmc");
144 env_set("fw_dev_part", part);
146 case BOOT_DEVICE_SPI:
147 env_set("storage_interface", "ubi");
148 env_set("fw_ubi_mtdpart", "UBI");
149 env_set("fw_ubi_volume", "UBI0");
152 printf("%s from device %u not supported!\n",
153 __func__, spl_boot_device());
159 #ifdef CONFIG_FS_LOADER
160 int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
162 struct udevice *fsdev;
167 #ifdef CONFIG_SPL_ENV_SUPPORT
168 switch (spl_boot_device()) {
169 case BOOT_DEVICE_MMC2:
170 name = env_get(name_fw);
171 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
174 printf("Loading rproc fw image from device %u not supported!\n",
182 if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
183 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
190 int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
196 __weak void release_resources_for_core_shutdown(void)
198 debug("%s not implemented...\n", __func__);
201 void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
203 typedef void __noreturn (*image_entry_noargs_t)(void);
204 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
206 int ret, size = 0, shut_cpu = 0;
208 /* Release all the exclusive devices held by SPL before starting ATF */
209 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
213 panic("rproc failed to be initialized (%d)\n", ret);
217 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
218 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
223 * It is assumed that remoteproc device 1 is the corresponding
224 * Cortex-A core which runs ATF. Make sure DT reflects the same.
226 if (!fit_image_info[IMAGE_ID_ATF].image_start)
227 fit_image_info[IMAGE_ID_ATF].image_start =
228 spl_image->entry_point;
230 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
232 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
234 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
235 !(size > 0 && valid_elf_image(loadaddr))) {
240 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
241 loadaddr = load_elf_image_phdr(loadaddr);
243 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
244 if (valid_elf_image(loadaddr))
245 loadaddr = load_elf_image_phdr(loadaddr);
248 debug("%s: jumping to address %x\n", __func__, loadaddr);
251 /* Add an extra newline to differentiate the ATF logs from SPL */
252 printf("Starting ATF on ARM64 core...\n\n");
254 ret = rproc_start(1);
256 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
259 debug("Shutting down...\n");
260 release_resources_for_core_shutdown();
265 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
271 #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
272 void board_fit_image_post_process(const void *fit, int node, void **p_image,
275 #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
281 os = fdt_getprop(fit, node, "os", &len);
282 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
284 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
287 for (i = 0; i < IMAGE_AMT; i++) {
288 if (!strcmp(os, image_os_match[i])) {
289 fit_image_info[i].image_start = addr;
290 fit_image_info[i].image_len = *p_size;
291 debug("%s: matched image for ID %d\n", __func__, i);
297 #if IS_ENABLED(CONFIG_TI_SECURE_DEVICE)
298 ti_secure_image_post_process(p_image, p_size);
303 #if defined(CONFIG_OF_LIBFDT)
304 int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
306 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
307 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
308 int ret, node, subnode, len, prev_node;
309 u32 range[4], addr, size;
310 const fdt32_t *sub_reg;
312 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
313 msmc_size = msmc_end - msmc_start + 1;
314 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
315 msmc_start, msmc_size);
317 /* find or create "msmc_sram node */
318 ret = fdt_path_offset(blob, parent_path);
322 node = fdt_find_or_add_subnode(blob, ret, node_name);
326 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
330 reg[0] = cpu_to_fdt64(msmc_start);
331 reg[1] = cpu_to_fdt64(msmc_size);
332 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
336 fdt_setprop_cell(blob, node, "#address-cells", 1);
337 fdt_setprop_cell(blob, node, "#size-cells", 1);
340 range[1] = cpu_to_fdt32(msmc_start >> 32);
341 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
342 range[3] = cpu_to_fdt32(msmc_size);
343 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
347 subnode = fdt_first_subnode(blob, node);
350 /* Look for invalid subnodes and delete them */
351 while (subnode >= 0) {
352 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
353 addr = fdt_read_number(sub_reg, 1);
355 size = fdt_read_number(sub_reg, 1);
356 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
357 subnode, addr, size);
358 if (addr + size > msmc_size ||
359 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
360 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
361 fdt_del_node(blob, subnode);
362 debug("%s: deleting subnode %d\n", __func__, subnode);
364 subnode = fdt_first_subnode(blob, node);
366 subnode = fdt_next_subnode(blob, prev_node);
369 subnode = fdt_next_subnode(blob, prev_node);
376 int fdt_disable_node(void *blob, char *node_path)
381 offs = fdt_path_offset(blob, node_path);
383 printf("Node %s not found.\n", node_path);
386 ret = fdt_setprop_string(blob, offs, "status", "disabled");
388 printf("Could not add status property to node %s: %s\n",
389 node_path, fdt_strerror(ret));
397 #ifndef CONFIG_SYSRESET
403 #if defined(CONFIG_DISPLAY_CPUINFO)
404 int print_cpuinfo(void)
418 ret = soc_get_family(soc, name, 64);
423 ret = soc_get_revision(soc, name, 64);
425 printf("%s\n", name);
432 bool soc_is_j721e(void)
436 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
437 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
442 bool soc_is_j7200(void)
446 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
447 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
453 void board_prep_linux(bootm_headers_t *images)
455 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
456 images->os.start, images->os.end);
457 __asm_flush_dcache_range(images->os.start,
458 ROUND(images->os.end,
459 CONFIG_SYS_CACHELINE_SIZE));
463 #ifdef CONFIG_CPU_V7R
464 void disable_linefill_optimization(void)
469 * On K3 devices there are 2 conditions where R5F can deadlock:
470 * 1.When software is performing series of store operations to
471 * cacheable write back/write allocate memory region and later
472 * on software execute barrier operation (DSB or DMB). R5F may
473 * hang at the barrier instruction.
474 * 2.When software is performing a mix of load and store operations
475 * within a tight loop and store operations are all writing to
476 * cacheable write back/write allocates memory regions, R5F may
477 * hang at one of the load instruction.
479 * To avoid the above two conditions disable linefill optimization
482 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
483 actlr |= (1 << 13); /* Set DLFO bit */
484 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
488 void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
490 struct ti_sci_msg_fwl_region region;
491 struct ti_sci_fwl_ops *fwl_ops;
492 struct ti_sci_handle *ti_sci;
495 ti_sci = get_ti_sci_handle();
496 fwl_ops = &ti_sci->ops.fwl_ops;
497 for (i = 0; i < fwl_data_size; i++) {
498 for (j = 0; j < fwl_data[i].regions; j++) {
499 region.fwl_id = fwl_data[i].fwl_id;
501 region.n_permission_regs = 3;
503 fwl_ops->get_fwl_region(ti_sci, ®ion);
505 if (region.control != 0) {
506 pr_debug("Attempting to disable firewall %5d (%25s)\n",
507 region.fwl_id, fwl_data[i].name);
510 if (fwl_ops->set_fwl_region(ti_sci, ®ion))
511 pr_err("Could not disable firewall %5d (%25s)\n",
512 region.fwl_id, fwl_data[i].name);
518 void spl_enable_dcache(void)
520 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
521 phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE;
523 dram_init_banksize();
525 /* reserve TLB table */
526 gd->arch.tlb_size = PGTABLE_SIZE;
528 ram_top += get_effective_memsize();
529 /* keep ram_top in the 32-bit address space */
530 if (ram_top >= 0x100000000)
531 ram_top = (phys_addr_t) 0x100000000;
533 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
534 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
535 gd->arch.tlb_addr + gd->arch.tlb_size);
541 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
542 void spl_board_prepare_for_boot(void)
547 void spl_board_prepare_for_linux(void)