1 // SPDX-License-Identifier: GPL-2.0+
3 * Texas Instruments' K3 R5 Remoteproc driver
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 * Suman Anna <s-anna@ti.com>
14 #include <remoteproc.h>
19 #include <dm/device_compat.h>
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include "ti_sci_proc.h"
26 * R5F's view of this address can either be for ATCM or BTCM with the other
27 * at address 0x0 based on loczrama signal.
29 #define K3_R5_TCM_DEV_ADDR 0x41010000
31 /* R5 TI-SCI Processor Configuration Flags */
32 #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
33 #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
34 #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
35 #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
36 #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
37 #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
38 #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
39 #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
40 #define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
41 /* Available from J7200 SoCs onwards */
42 #define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
44 /* R5 TI-SCI Processor Control Flags */
45 #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
47 /* R5 TI-SCI Processor Status Flags */
48 #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
49 #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
50 #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
51 #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
56 CLUSTER_MODE_SPLIT = 0,
57 CLUSTER_MODE_LOCKSTEP,
61 * struct k3_r5f_ip_data - internal data structure used for IP variations
62 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
63 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
65 struct k3_r5f_ip_data {
67 bool tcm_ecc_autoinit;
71 * struct k3_r5_mem - internal memory structure
72 * @cpu_addr: MPU virtual address of the memory region
73 * @bus_addr: Bus address used to access the memory region
74 * @dev_addr: Device address from remoteproc view
75 * @size: Size of the memory region
78 void __iomem *cpu_addr;
85 * struct k3_r5f_core - K3 R5 core structure
86 * @dev: cached device pointer
87 * @cluster: pointer to the parent cluster.
88 * @reset: reset control handle
89 * @tsp: TI-SCI processor control handle
90 * @ipdata: cached pointer to R5F IP specific feature data
91 * @mem: Array of available internal memories
92 * @num_mem: Number of available memories
93 * @atcm_enable: flag to control ATCM enablement
94 * @btcm_enable: flag to control BTCM enablement
95 * @loczrama: flag to dictate which TCM is at device address 0x0
96 * @in_use: flag to tell if the core is already in use.
100 struct k3_r5f_cluster *cluster;
101 struct reset_ctl reset;
102 struct ti_sci_proc tsp;
103 struct k3_r5f_ip_data *ipdata;
104 struct k3_r5f_mem *mem;
113 * struct k3_r5f_cluster - K3 R5F Cluster structure
114 * @mode: Mode to configure the Cluster - Split or LockStep
115 * @cores: Array of pointers to R5 cores within the cluster
117 struct k3_r5f_cluster {
118 enum cluster_mode mode;
119 struct k3_r5f_core *cores[NR_CORES];
122 static bool is_primary_core(struct k3_r5f_core *core)
124 return core == core->cluster->cores[0];
127 static int k3_r5f_proc_request(struct k3_r5f_core *core)
129 struct k3_r5f_cluster *cluster = core->cluster;
132 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
133 for (i = 0; i < NR_CORES; i++) {
134 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
139 ret = ti_sci_proc_request(&core->tsp);
146 ti_sci_proc_release(&cluster->cores[i]->tsp);
152 static void k3_r5f_proc_release(struct k3_r5f_core *core)
154 struct k3_r5f_cluster *cluster = core->cluster;
157 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
158 for (i = 0; i < NR_CORES; i++)
159 ti_sci_proc_release(&cluster->cores[i]->tsp);
161 ti_sci_proc_release(&core->tsp);
164 static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
168 dev_dbg(dev, "%s\n", __func__);
170 for (c = NR_CORES - 1; c >= 0; c--) {
171 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
173 goto unroll_module_reset;
176 /* deassert local reset on all applicable cores */
177 for (c = NR_CORES - 1; c >= 0; c--) {
178 ret = reset_deassert(&cluster->cores[c]->reset);
180 goto unroll_local_reset;
186 while (c < NR_CORES) {
187 reset_assert(&cluster->cores[c]->reset);
192 while (c < NR_CORES) {
193 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
200 static int k3_r5f_split_release(struct k3_r5f_core *core)
204 dev_dbg(dev, "%s\n", __func__);
206 ret = ti_sci_proc_power_domain_on(&core->tsp);
208 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
213 ret = reset_deassert(&core->reset);
215 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
217 if (ti_sci_proc_power_domain_off(&core->tsp))
218 dev_warn(core->dev, "module-reset assert back failed\n");
224 static int k3_r5f_prepare(struct udevice *dev)
226 struct k3_r5f_core *core = dev_get_priv(dev);
227 struct k3_r5f_cluster *cluster = core->cluster;
230 dev_dbg(dev, "%s\n", __func__);
232 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
233 ret = k3_r5f_lockstep_release(cluster);
235 ret = k3_r5f_split_release(core);
238 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
244 static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
246 struct k3_r5f_cluster *cluster = core->cluster;
249 dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
254 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
255 printf("Secondary core is not probed in this cluster\n");
259 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
260 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
265 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
266 if (!core->cluster->cores[0]->in_use) {
267 dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
275 /* Zero out TCMs so that ECC can be effective on all TCM addresses */
276 void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
278 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
281 if (core->atcm_enable)
282 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
283 if (core->btcm_enable)
284 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
288 * k3_r5f_load() - Load up the Remote processor image
289 * @dev: rproc device pointer
290 * @addr: Address at which image is available
291 * @size: size of the image
293 * Return: 0 if all goes good, else appropriate error message.
295 static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
297 struct k3_r5f_core *core = dev_get_priv(dev);
299 u32 ctrl, sts, cfg = 0;
303 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
305 ret = k3_r5f_core_sanity_check(core);
309 ret = k3_r5f_proc_request(core);
313 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
317 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
319 ret = k3_r5f_prepare(dev);
321 dev_err(dev, "R5f prepare failed for core %d\n",
326 k3_r5f_init_tcm_memories(core, mem_auto_init);
328 ret = rproc_elf_load_image(dev, addr, size);
330 dev_err(dev, "Loading elf failedi %d\n", ret);
334 boot_vector = rproc_elf_get_boot_addr(dev, addr);
336 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
338 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
341 k3_r5f_proc_release(core);
346 static int k3_r5f_core_halt(struct k3_r5f_core *core)
350 ret = ti_sci_proc_set_control(&core->tsp,
351 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
353 dev_err(core->dev, "Core %d failed to stop\n",
359 static int k3_r5f_core_run(struct k3_r5f_core *core)
363 ret = ti_sci_proc_set_control(&core->tsp,
364 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
366 dev_err(core->dev, "Core %d failed to start\n",
375 * k3_r5f_start() - Start the remote processor
376 * @dev: rproc device pointer
378 * Return: 0 if all went ok, else return appropriate error
380 static int k3_r5f_start(struct udevice *dev)
382 struct k3_r5f_core *core = dev_get_priv(dev);
383 struct k3_r5f_cluster *cluster = core->cluster;
386 dev_dbg(dev, "%s\n", __func__);
388 ret = k3_r5f_core_sanity_check(core);
392 ret = k3_r5f_proc_request(core);
396 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
397 if (is_primary_core(core)) {
398 for (c = NR_CORES - 1; c >= 0; c--) {
399 ret = k3_r5f_core_run(cluster->cores[c]);
401 goto unroll_core_run;
404 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
410 ret = k3_r5f_core_run(core);
417 k3_r5f_proc_release(core);
421 while (c < NR_CORES) {
422 k3_r5f_core_halt(cluster->cores[c]);
426 k3_r5f_proc_release(core);
431 static int k3_r5f_split_reset(struct k3_r5f_core *core)
435 dev_dbg(dev, "%s\n", __func__);
437 if (reset_assert(&core->reset))
440 if (ti_sci_proc_power_domain_off(&core->tsp))
446 static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
450 dev_dbg(dev, "%s\n", __func__);
452 for (c = 0; c < NR_CORES; c++)
453 if (reset_assert(&cluster->cores[c]->reset))
456 /* disable PSC modules on all applicable cores */
457 for (c = 0; c < NR_CORES; c++)
458 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
464 static int k3_r5f_unprepare(struct udevice *dev)
466 struct k3_r5f_core *core = dev_get_priv(dev);
467 struct k3_r5f_cluster *cluster = core->cluster;
470 dev_dbg(dev, "%s\n", __func__);
472 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
473 if (is_primary_core(core))
474 ret = k3_r5f_lockstep_reset(cluster);
476 ret = k3_r5f_split_reset(core);
480 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
486 static int k3_r5f_stop(struct udevice *dev)
488 struct k3_r5f_core *core = dev_get_priv(dev);
489 struct k3_r5f_cluster *cluster = core->cluster;
492 dev_dbg(dev, "%s\n", __func__);
494 ret = k3_r5f_proc_request(core);
498 core->in_use = false;
500 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
501 if (is_primary_core(core)) {
502 for (c = 0; c < NR_CORES; c++)
503 k3_r5f_core_halt(cluster->cores[c]);
505 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
510 k3_r5f_core_halt(core);
513 ret = k3_r5f_unprepare(dev);
515 k3_r5f_proc_release(core);
519 static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
521 struct k3_r5f_core *core = dev_get_priv(dev);
522 void __iomem *va = NULL;
523 phys_addr_t bus_addr;
524 u32 dev_addr, offset;
528 dev_dbg(dev, "%s\n", __func__);
533 for (i = 0; i < core->num_mems; i++) {
534 bus_addr = core->mem[i].bus_addr;
535 dev_addr = core->mem[i].dev_addr;
536 mem_size = core->mem[i].size;
538 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
539 offset = da - bus_addr;
540 va = core->mem[i].cpu_addr + offset;
541 return (__force void *)va;
544 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
545 offset = da - dev_addr;
546 va = core->mem[i].cpu_addr + offset;
547 return (__force void *)va;
551 /* Assume it is DDR region and return da */
552 return map_physmem(da, size, MAP_NOCACHE);
555 static int k3_r5f_init(struct udevice *dev)
560 static int k3_r5f_reset(struct udevice *dev)
565 static const struct dm_rproc_ops k3_r5f_rproc_ops = {
567 .reset = k3_r5f_reset,
568 .start = k3_r5f_start,
571 .device_to_virt = k3_r5f_da_to_va,
574 static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
576 struct k3_r5f_cluster *cluster = core->cluster;
577 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
578 bool lockstep_permitted;
582 dev_dbg(dev, "%s\n", __func__);
584 ret = ti_sci_proc_request(&core->tsp);
588 /* Do not touch boot vector now. Load will take care of it. */
589 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
591 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
595 /* Sanity check for Lockstep mode */
596 lockstep_permitted = !!(sts &
597 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
598 if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
599 dev_err(core->dev, "LockStep mode not permitted on this device\n");
604 /* Primary core only configuration */
605 if (is_primary_core(core)) {
606 /* always enable ARM mode */
607 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
608 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
609 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
610 else if (lockstep_permitted)
611 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
614 if (core->atcm_enable)
615 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
617 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
619 if (core->btcm_enable)
620 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
622 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
625 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
627 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
629 ret = k3_r5f_core_halt(core);
633 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
635 ti_sci_proc_release(&core->tsp);
639 static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
644 dev_dbg(dev, "%s\n", __func__);
646 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
647 if (IS_ERR(tsp->sci)) {
648 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
649 return PTR_ERR(tsp->sci);
652 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
654 dev_err(dev, "Proc IDs not populated %d\n", ret);
658 tsp->ops = &tsp->sci->ops.proc_ops;
659 tsp->proc_id = ids[0];
660 tsp->host_id = ids[1];
661 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
662 TI_SCI_RESOURCE_NULL);
663 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
664 dev_err(dev, "Device ID not populated %d\n", ret);
671 static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
675 dev_dbg(dev, "%s\n", __func__);
677 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
678 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
679 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
681 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
685 ret = reset_get_by_index(core->dev, 0, &core->reset);
687 dev_err(core->dev, "Reset lines not available: %d\n", ret);
691 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
696 static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
698 static const char * const mem_names[] = {"atcm", "btcm"};
699 struct udevice *dev = core->dev;
702 dev_dbg(dev, "%s\n", __func__);
704 core->num_mems = ARRAY_SIZE(mem_names);
705 core->mem = calloc(core->num_mems, sizeof(*core->mem));
709 for (i = 0; i < core->num_mems; i++) {
710 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
712 (fdt_addr_t *)&core->mem[i].size);
713 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
714 dev_err(dev, "%s bus address not found\n",
718 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
721 if (!strcmp(mem_names[i], "atcm")) {
722 core->mem[i].dev_addr = core->loczrama ?
723 0 : K3_R5_TCM_DEV_ADDR;
725 core->mem[i].dev_addr = core->loczrama ?
726 K3_R5_TCM_DEV_ADDR : 0;
729 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
730 mem_names[i], &core->mem[i].bus_addr,
731 core->mem[i].size, core->mem[i].cpu_addr,
732 core->mem[i].dev_addr);
739 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
740 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
741 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
742 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
743 * leveraging the Core1 TCMs as well in certain modes where they would have
744 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
745 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
746 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
747 * dts representation reflects this increased size on supported SoCs. The Core0
748 * TCM sizes therefore have to be adjusted to only half the original size in
751 static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
753 struct k3_r5f_cluster *cluster = core->cluster;
755 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
758 if (!core->ipdata->tcm_is_double)
761 if (core == cluster->cores[0]) {
762 core->mem[0].size /= 2;
763 core->mem[1].size /= 2;
765 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
766 core->mem[0].size, core->mem[1].size);
771 * k3_r5f_probe() - Basic probe
772 * @dev: corresponding k3 remote processor device
774 * Return: 0 if all goes good, else appropriate error message.
776 static int k3_r5f_probe(struct udevice *dev)
778 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
779 struct k3_r5f_core *core = dev_get_priv(dev);
783 dev_dbg(dev, "%s\n", __func__);
786 ret = k3_r5f_of_to_priv(core);
790 core->cluster = cluster;
791 /* Assume Primary core gets probed first */
792 if (!cluster->cores[0])
793 cluster->cores[0] = core;
795 cluster->cores[1] = core;
797 ret = k3_r5f_core_of_get_memories(core);
799 dev_err(dev, "Rproc getting internal memories failed\n");
803 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
804 &r_state, &core->in_use);
809 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
814 /* Make sure Local reset is asserted. Redundant? */
815 reset_assert(&core->reset);
817 ret = k3_r5f_rproc_configure(core);
819 dev_err(dev, "rproc configure failed %d\n", ret);
823 k3_r5f_core_adjust_tcm_sizes(core);
825 dev_dbg(dev, "Remoteproc successfully probed\n");
830 static int k3_r5f_remove(struct udevice *dev)
832 struct k3_r5f_core *core = dev_get_priv(dev);
836 ti_sci_proc_release(&core->tsp);
841 static const struct k3_r5f_ip_data k3_data = {
842 .tcm_is_double = false,
843 .tcm_ecc_autoinit = false,
846 static const struct k3_r5f_ip_data j7200_data = {
847 .tcm_is_double = true,
848 .tcm_ecc_autoinit = true,
851 static const struct udevice_id k3_r5f_rproc_ids[] = {
852 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
853 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
854 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_data, },
858 U_BOOT_DRIVER(k3_r5f_rproc) = {
859 .name = "k3_r5f_rproc",
860 .of_match = k3_r5f_rproc_ids,
861 .id = UCLASS_REMOTEPROC,
862 .ops = &k3_r5f_rproc_ops,
863 .probe = k3_r5f_probe,
864 .remove = k3_r5f_remove,
865 .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
868 static int k3_r5f_cluster_probe(struct udevice *dev)
870 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
872 dev_dbg(dev, "%s\n", __func__);
874 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
875 CLUSTER_MODE_LOCKSTEP);
877 if (device_get_child_count(dev) != 2) {
878 dev_err(dev, "Invalid number of R5 cores");
882 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
883 __func__, cluster->mode ? "lockstep" : "split");
888 static const struct udevice_id k3_r5fss_ids[] = {
889 { .compatible = "ti,am654-r5fss"},
890 { .compatible = "ti,j721e-r5fss"},
891 { .compatible = "ti,j7200-r5fss"},
895 U_BOOT_DRIVER(k3_r5fss) = {
897 .of_match = k3_r5fss_ids,
899 .probe = k3_r5f_cluster_probe,
900 .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
901 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,