1 // SPDX-License-Identifier: GPL-2.0+
3 * Texas Instruments' K3 R5 Remoteproc driver
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 * Suman Anna <s-anna@ti.com>
14 #include <remoteproc.h>
19 #include <dm/device_compat.h>
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include "ti_sci_proc.h"
26 * R5F's view of this address can either be for ATCM or BTCM with the other
27 * at address 0x0 based on loczrama signal.
29 #define K3_R5_TCM_DEV_ADDR 0x41010000
31 /* R5 TI-SCI Processor Configuration Flags */
32 #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
33 #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
34 #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
35 #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
36 #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
37 #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
38 #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
39 #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
40 #define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
41 /* Available from J7200 SoCs onwards */
42 #define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
44 /* R5 TI-SCI Processor Control Flags */
45 #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
47 /* R5 TI-SCI Processor Status Flags */
48 #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
49 #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
50 #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
51 #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
56 CLUSTER_MODE_SPLIT = 0,
57 CLUSTER_MODE_LOCKSTEP,
61 * struct k3_r5f_ip_data - internal data structure used for IP variations
62 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
63 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
65 struct k3_r5f_ip_data {
67 bool tcm_ecc_autoinit;
71 * struct k3_r5_mem - internal memory structure
72 * @cpu_addr: MPU virtual address of the memory region
73 * @bus_addr: Bus address used to access the memory region
74 * @dev_addr: Device address from remoteproc view
75 * @size: Size of the memory region
78 void __iomem *cpu_addr;
85 * struct k3_r5f_core - K3 R5 core structure
86 * @dev: cached device pointer
87 * @cluster: pointer to the parent cluster.
88 * @reset: reset control handle
89 * @tsp: TI-SCI processor control handle
90 * @ipdata: cached pointer to R5F IP specific feature data
91 * @mem: Array of available internal memories
92 * @num_mem: Number of available memories
93 * @atcm_enable: flag to control ATCM enablement
94 * @btcm_enable: flag to control BTCM enablement
95 * @loczrama: flag to dictate which TCM is at device address 0x0
96 * @in_use: flag to tell if the core is already in use.
100 struct k3_r5f_cluster *cluster;
101 struct reset_ctl reset;
102 struct ti_sci_proc tsp;
103 struct k3_r5f_ip_data *ipdata;
104 struct k3_r5f_mem *mem;
113 * struct k3_r5f_cluster - K3 R5F Cluster structure
114 * @mode: Mode to configure the Cluster - Split or LockStep
115 * @cores: Array of pointers to R5 cores within the cluster
117 struct k3_r5f_cluster {
118 enum cluster_mode mode;
119 struct k3_r5f_core *cores[NR_CORES];
122 static bool is_primary_core(struct k3_r5f_core *core)
124 return core == core->cluster->cores[0];
127 static int k3_r5f_proc_request(struct k3_r5f_core *core)
129 struct k3_r5f_cluster *cluster = core->cluster;
132 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
133 for (i = 0; i < NR_CORES; i++) {
134 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
139 ret = ti_sci_proc_request(&core->tsp);
146 ti_sci_proc_release(&cluster->cores[i]->tsp);
152 static void k3_r5f_proc_release(struct k3_r5f_core *core)
154 struct k3_r5f_cluster *cluster = core->cluster;
157 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
158 for (i = 0; i < NR_CORES; i++)
159 ti_sci_proc_release(&cluster->cores[i]->tsp);
161 ti_sci_proc_release(&core->tsp);
164 static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
168 debug("%s\n", __func__);
170 for (c = NR_CORES - 1; c >= 0; c--) {
171 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
173 goto unroll_module_reset;
176 /* deassert local reset on all applicable cores */
177 for (c = NR_CORES - 1; c >= 0; c--) {
178 ret = reset_deassert(&cluster->cores[c]->reset);
180 goto unroll_local_reset;
186 while (c < NR_CORES) {
187 reset_assert(&cluster->cores[c]->reset);
192 while (c < NR_CORES) {
193 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
200 static int k3_r5f_split_release(struct k3_r5f_core *core)
204 dev_dbg(core->dev, "%s\n", __func__);
206 ret = ti_sci_proc_power_domain_on(&core->tsp);
208 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
213 ret = reset_deassert(&core->reset);
215 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
217 if (ti_sci_proc_power_domain_off(&core->tsp))
218 dev_warn(core->dev, "module-reset assert back failed\n");
224 static int k3_r5f_prepare(struct udevice *dev)
226 struct k3_r5f_core *core = dev_get_priv(dev);
227 struct k3_r5f_cluster *cluster = core->cluster;
230 dev_dbg(dev, "%s\n", __func__);
232 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
233 ret = k3_r5f_lockstep_release(cluster);
235 ret = k3_r5f_split_release(core);
238 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
244 static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
246 struct k3_r5f_cluster *cluster = core->cluster;
250 "Invalid op: Trying to load/start on already running core %d\n",
255 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
257 "Secondary core is not probed in this cluster\n");
261 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
263 "Invalid op: Trying to start secondary core %d in lockstep mode\n",
268 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
269 if (!core->cluster->cores[0]->in_use) {
271 "Invalid seq: Enable primary core before loading secondary core\n");
279 /* Zero out TCMs so that ECC can be effective on all TCM addresses */
280 void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
282 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
285 if (core->atcm_enable)
286 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
287 if (core->btcm_enable)
288 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
292 * k3_r5f_load() - Load up the Remote processor image
293 * @dev: rproc device pointer
294 * @addr: Address at which image is available
295 * @size: size of the image
297 * Return: 0 if all goes good, else appropriate error message.
299 static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
301 struct k3_r5f_core *core = dev_get_priv(dev);
303 u32 ctrl, sts, cfg = 0;
307 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
309 ret = k3_r5f_core_sanity_check(core);
313 ret = k3_r5f_proc_request(core);
317 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
321 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
323 ret = k3_r5f_prepare(dev);
325 dev_err(dev, "R5f prepare failed for core %d\n",
330 k3_r5f_init_tcm_memories(core, mem_auto_init);
332 ret = rproc_elf_load_image(dev, addr, size);
334 dev_err(dev, "Loading elf failedi %d\n", ret);
338 boot_vector = rproc_elf_get_boot_addr(dev, addr);
340 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
342 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
345 k3_r5f_proc_release(core);
350 static int k3_r5f_core_halt(struct k3_r5f_core *core)
354 ret = ti_sci_proc_set_control(&core->tsp,
355 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
357 dev_err(core->dev, "Core %d failed to stop\n",
363 static int k3_r5f_core_run(struct k3_r5f_core *core)
367 ret = ti_sci_proc_set_control(&core->tsp,
368 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
370 dev_err(core->dev, "Core %d failed to start\n",
379 * k3_r5f_start() - Start the remote processor
380 * @dev: rproc device pointer
382 * Return: 0 if all went ok, else return appropriate error
384 static int k3_r5f_start(struct udevice *dev)
386 struct k3_r5f_core *core = dev_get_priv(dev);
387 struct k3_r5f_cluster *cluster = core->cluster;
390 dev_dbg(dev, "%s\n", __func__);
392 ret = k3_r5f_core_sanity_check(core);
396 ret = k3_r5f_proc_request(core);
400 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
401 if (is_primary_core(core)) {
402 for (c = NR_CORES - 1; c >= 0; c--) {
403 ret = k3_r5f_core_run(cluster->cores[c]);
405 goto unroll_core_run;
408 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
414 ret = k3_r5f_core_run(core);
421 k3_r5f_proc_release(core);
425 while (c < NR_CORES) {
426 k3_r5f_core_halt(cluster->cores[c]);
430 k3_r5f_proc_release(core);
435 static int k3_r5f_split_reset(struct k3_r5f_core *core)
439 dev_dbg(core->dev, "%s\n", __func__);
441 if (reset_assert(&core->reset))
444 if (ti_sci_proc_power_domain_off(&core->tsp))
450 static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
454 debug("%s\n", __func__);
456 for (c = 0; c < NR_CORES; c++)
457 if (reset_assert(&cluster->cores[c]->reset))
460 /* disable PSC modules on all applicable cores */
461 for (c = 0; c < NR_CORES; c++)
462 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
468 static int k3_r5f_unprepare(struct udevice *dev)
470 struct k3_r5f_core *core = dev_get_priv(dev);
471 struct k3_r5f_cluster *cluster = core->cluster;
474 dev_dbg(dev, "%s\n", __func__);
476 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
477 if (is_primary_core(core))
478 ret = k3_r5f_lockstep_reset(cluster);
480 ret = k3_r5f_split_reset(core);
484 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
490 static int k3_r5f_stop(struct udevice *dev)
492 struct k3_r5f_core *core = dev_get_priv(dev);
493 struct k3_r5f_cluster *cluster = core->cluster;
496 dev_dbg(dev, "%s\n", __func__);
498 ret = k3_r5f_proc_request(core);
502 core->in_use = false;
504 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
505 if (is_primary_core(core)) {
506 for (c = 0; c < NR_CORES; c++)
507 k3_r5f_core_halt(cluster->cores[c]);
509 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
514 k3_r5f_core_halt(core);
517 ret = k3_r5f_unprepare(dev);
519 k3_r5f_proc_release(core);
523 static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
525 struct k3_r5f_core *core = dev_get_priv(dev);
526 void __iomem *va = NULL;
527 phys_addr_t bus_addr;
528 u32 dev_addr, offset;
532 dev_dbg(dev, "%s\n", __func__);
537 for (i = 0; i < core->num_mems; i++) {
538 bus_addr = core->mem[i].bus_addr;
539 dev_addr = core->mem[i].dev_addr;
540 mem_size = core->mem[i].size;
542 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
543 offset = da - bus_addr;
544 va = core->mem[i].cpu_addr + offset;
545 return (__force void *)va;
548 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
549 offset = da - dev_addr;
550 va = core->mem[i].cpu_addr + offset;
551 return (__force void *)va;
555 /* Assume it is DDR region and return da */
556 return map_physmem(da, size, MAP_NOCACHE);
559 static int k3_r5f_init(struct udevice *dev)
564 static int k3_r5f_reset(struct udevice *dev)
569 static const struct dm_rproc_ops k3_r5f_rproc_ops = {
571 .reset = k3_r5f_reset,
572 .start = k3_r5f_start,
575 .device_to_virt = k3_r5f_da_to_va,
578 static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
580 struct k3_r5f_cluster *cluster = core->cluster;
581 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
582 bool lockstep_permitted;
586 dev_dbg(core->dev, "%s\n", __func__);
588 ret = ti_sci_proc_request(&core->tsp);
592 /* Do not touch boot vector now. Load will take care of it. */
593 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
595 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
599 /* Sanity check for Lockstep mode */
600 lockstep_permitted = !!(sts &
601 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
602 if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
603 dev_err(core->dev, "LockStep mode not permitted on this device\n");
608 /* Primary core only configuration */
609 if (is_primary_core(core)) {
610 /* always enable ARM mode */
611 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
612 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
613 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
614 else if (lockstep_permitted)
615 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
618 if (core->atcm_enable)
619 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
621 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
623 if (core->btcm_enable)
624 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
626 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
629 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
631 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
633 ret = k3_r5f_core_halt(core);
637 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
639 ti_sci_proc_release(&core->tsp);
643 static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
648 dev_dbg(dev, "%s\n", __func__);
650 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
651 if (IS_ERR(tsp->sci)) {
652 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
653 return PTR_ERR(tsp->sci);
656 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
658 dev_err(dev, "Proc IDs not populated %d\n", ret);
662 tsp->ops = &tsp->sci->ops.proc_ops;
663 tsp->proc_id = ids[0];
664 tsp->host_id = ids[1];
665 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
666 TI_SCI_RESOURCE_NULL);
667 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
668 dev_err(dev, "Device ID not populated %d\n", ret);
675 static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
679 dev_dbg(core->dev, "%s\n", __func__);
681 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
682 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
683 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
685 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
689 ret = reset_get_by_index(core->dev, 0, &core->reset);
691 dev_err(core->dev, "Reset lines not available: %d\n", ret);
695 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
700 static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
702 static const char * const mem_names[] = {"atcm", "btcm"};
703 struct udevice *dev = core->dev;
706 dev_dbg(dev, "%s\n", __func__);
708 core->num_mems = ARRAY_SIZE(mem_names);
709 core->mem = calloc(core->num_mems, sizeof(*core->mem));
713 for (i = 0; i < core->num_mems; i++) {
714 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
716 (fdt_addr_t *)&core->mem[i].size);
717 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
718 dev_err(dev, "%s bus address not found\n",
722 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
725 if (!strcmp(mem_names[i], "atcm")) {
726 core->mem[i].dev_addr = core->loczrama ?
727 0 : K3_R5_TCM_DEV_ADDR;
729 core->mem[i].dev_addr = core->loczrama ?
730 K3_R5_TCM_DEV_ADDR : 0;
733 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
734 mem_names[i], &core->mem[i].bus_addr,
735 core->mem[i].size, core->mem[i].cpu_addr,
736 core->mem[i].dev_addr);
743 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
744 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
745 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
746 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
747 * leveraging the Core1 TCMs as well in certain modes where they would have
748 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
749 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
750 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
751 * dts representation reflects this increased size on supported SoCs. The Core0
752 * TCM sizes therefore have to be adjusted to only half the original size in
755 static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
757 struct k3_r5f_cluster *cluster = core->cluster;
759 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
762 if (!core->ipdata->tcm_is_double)
765 if (core == cluster->cores[0]) {
766 core->mem[0].size /= 2;
767 core->mem[1].size /= 2;
769 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
770 core->mem[0].size, core->mem[1].size);
775 * k3_r5f_probe() - Basic probe
776 * @dev: corresponding k3 remote processor device
778 * Return: 0 if all goes good, else appropriate error message.
780 static int k3_r5f_probe(struct udevice *dev)
782 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
783 struct k3_r5f_core *core = dev_get_priv(dev);
787 dev_dbg(dev, "%s\n", __func__);
790 ret = k3_r5f_of_to_priv(core);
794 core->cluster = cluster;
795 /* Assume Primary core gets probed first */
796 if (!cluster->cores[0])
797 cluster->cores[0] = core;
799 cluster->cores[1] = core;
801 ret = k3_r5f_core_of_get_memories(core);
803 dev_err(dev, "Rproc getting internal memories failed\n");
807 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
808 &r_state, &core->in_use);
813 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
818 /* Make sure Local reset is asserted. Redundant? */
819 reset_assert(&core->reset);
821 ret = k3_r5f_rproc_configure(core);
823 dev_err(dev, "rproc configure failed %d\n", ret);
827 k3_r5f_core_adjust_tcm_sizes(core);
829 dev_dbg(dev, "Remoteproc successfully probed\n");
834 static int k3_r5f_remove(struct udevice *dev)
836 struct k3_r5f_core *core = dev_get_priv(dev);
840 ti_sci_proc_release(&core->tsp);
845 static const struct k3_r5f_ip_data k3_data = {
846 .tcm_is_double = false,
847 .tcm_ecc_autoinit = false,
850 static const struct k3_r5f_ip_data j7200_data = {
851 .tcm_is_double = true,
852 .tcm_ecc_autoinit = true,
855 static const struct udevice_id k3_r5f_rproc_ids[] = {
856 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
857 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
858 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_data, },
862 U_BOOT_DRIVER(k3_r5f_rproc) = {
863 .name = "k3_r5f_rproc",
864 .of_match = k3_r5f_rproc_ids,
865 .id = UCLASS_REMOTEPROC,
866 .ops = &k3_r5f_rproc_ops,
867 .probe = k3_r5f_probe,
868 .remove = k3_r5f_remove,
869 .priv_auto = sizeof(struct k3_r5f_core),
872 static int k3_r5f_cluster_probe(struct udevice *dev)
874 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
876 dev_dbg(dev, "%s\n", __func__);
878 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
879 CLUSTER_MODE_LOCKSTEP);
881 if (device_get_child_count(dev) != 2) {
882 dev_err(dev, "Invalid number of R5 cores");
886 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
887 __func__, cluster->mode ? "lockstep" : "split");
892 static const struct udevice_id k3_r5fss_ids[] = {
893 { .compatible = "ti,am654-r5fss"},
894 { .compatible = "ti,j721e-r5fss"},
895 { .compatible = "ti,j7200-r5fss"},
899 U_BOOT_DRIVER(k3_r5fss) = {
901 .of_match = k3_r5fss_ids,
903 .probe = k3_r5f_cluster_probe,
904 .priv_auto = sizeof(struct k3_r5f_cluster),
905 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,