1 // SPDX-License-Identifier: GPL-2.0+
4 * Texas Instruments Incorporated - http://www.ti.com/
7 #define LOG_CATEGORY UCLASS_REMOTEPROC
9 #define pr_fmt(fmt) "%s: " fmt, __func__
15 #include <virtio_ring.h>
16 #include <remoteproc.h>
18 #include <dm/device-internal.h>
20 #include <dm/uclass.h>
21 #include <dm/uclass-internal.h>
22 #include <linux/compat.h>
24 DECLARE_GLOBAL_DATA_PTR;
26 struct resource_table {
33 typedef int (*handle_resource_t) (struct udevice *, void *, int offset, int avail);
35 static struct resource_table *rsc_table;
38 * for_each_remoteproc_device() - iterate through the list of rproc devices
39 * @fn: check function to call per match, if this function returns fail,
40 * iteration is aborted with the resultant error value
41 * @skip_dev: Device to skip calling the callback about.
42 * @data: Data to pass to the callback function
44 * Return: 0 if none of the callback returned a non 0 result, else returns the
45 * result from the callback function
47 static int for_each_remoteproc_device(int (*fn) (struct udevice *dev,
48 struct dm_rproc_uclass_pdata *uc_pdata,
50 struct udevice *skip_dev,
54 struct dm_rproc_uclass_pdata *uc_pdata;
57 for (ret = uclass_find_first_device(UCLASS_REMOTEPROC, &dev); dev;
58 ret = uclass_find_next_device(&dev)) {
59 if (ret || dev == skip_dev)
61 uc_pdata = dev_get_uclass_plat(dev);
62 ret = fn(dev, uc_pdata, data);
71 * _rproc_name_is_unique() - iteration helper to check if rproc name is unique
72 * @dev: device that we are checking name for
73 * @uc_pdata: uclass platform data
74 * @data: compare data (this is the name we want to ensure is unique)
76 * Return: 0 is there is no match(is unique); if there is a match(we dont
77 * have a unique name), return -EINVAL.
79 static int _rproc_name_is_unique(struct udevice *dev,
80 struct dm_rproc_uclass_pdata *uc_pdata,
83 const char *check_name = data;
85 /* devices not yet populated with data - so skip them */
86 if (!uc_pdata->name || !check_name)
89 /* Return 0 to search further if we dont match */
90 if (strlen(uc_pdata->name) != strlen(check_name))
93 if (!strcmp(uc_pdata->name, check_name))
100 * rproc_name_is_unique() - Check if the rproc name is unique
101 * @check_dev: Device we are attempting to ensure is unique
102 * @check_name: Name we are trying to ensure is unique.
104 * Return: true if we have a unique name, false if name is not unique.
106 static bool rproc_name_is_unique(struct udevice *check_dev,
107 const char *check_name)
111 ret = for_each_remoteproc_device(_rproc_name_is_unique,
112 check_dev, check_name);
113 return ret ? false : true;
117 * rproc_pre_probe() - Pre probe accessor for the uclass
118 * @dev: device for which we are preprobing
120 * Parses and fills up the uclass pdata for use as needed by core and
121 * remote proc drivers.
123 * Return: 0 if all wernt ok, else appropriate error value.
125 static int rproc_pre_probe(struct udevice *dev)
127 struct dm_rproc_uclass_pdata *uc_pdata;
128 const struct dm_rproc_ops *ops;
130 uc_pdata = dev_get_uclass_plat(dev);
132 /* See if we need to populate via fdt */
134 if (!dev_get_plat(dev)) {
135 #if CONFIG_IS_ENABLED(OF_CONTROL)
137 debug("'%s': using fdt\n", dev->name);
138 uc_pdata->name = dev_read_string(dev, "remoteproc-name");
140 /* Default is internal memory mapped */
141 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
142 tmp = dev_read_bool(dev, "remoteproc-internal-memory-mapped");
144 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
146 /* Nothing much we can do about this, can we? */
151 struct dm_rproc_uclass_pdata *pdata = dev_get_plat(dev);
153 debug("'%s': using legacy data\n", dev->name);
155 uc_pdata->name = pdata->name;
156 uc_pdata->mem_type = pdata->mem_type;
157 uc_pdata->driver_plat_data = pdata->driver_plat_data;
160 /* Else try using device Name */
162 uc_pdata->name = dev->name;
163 if (!uc_pdata->name) {
164 debug("Unnamed device!");
168 if (!rproc_name_is_unique(dev, uc_pdata->name)) {
169 debug("%s duplicate name '%s'\n", dev->name, uc_pdata->name);
173 ops = rproc_get_ops(dev);
175 debug("%s driver has no ops?\n", dev->name);
179 if (!ops->load || !ops->start) {
180 debug("%s driver has missing mandatory ops?\n", dev->name);
188 * rproc_post_probe() - post probe accessor for the uclass
189 * @dev: deivce we finished probing
191 * initiate init function after the probe is completed. This allows
192 * the remote processor drivers to split up the initializations between
193 * probe and init as needed.
195 * Return: if the remote proc driver has a init routine, invokes it and
196 * hands over the return value. overall, 0 if all went well, else appropriate
199 static int rproc_post_probe(struct udevice *dev)
201 const struct dm_rproc_ops *ops;
203 ops = rproc_get_ops(dev);
205 debug("%s driver has no ops?\n", dev->name);
210 return ops->init(dev);
216 * rproc_add_res() - After parsing the resource table add the mappings
217 * @dev: device we finished probing
218 * @mapping: rproc_mem_entry for the resource
220 * Return: if the remote proc driver has a add_res routine, invokes it and
221 * hands over the return value. overall, 0 if all went well, else appropriate
224 static int rproc_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
226 const struct dm_rproc_ops *ops = rproc_get_ops(dev);
231 return ops->add_res(dev, mapping);
235 * rproc_alloc_mem() - After parsing the resource table allocat mem
236 * @dev: device we finished probing
237 * @len: rproc_mem_entry for the resource
238 * @align: alignment for the resource
240 * Return: if the remote proc driver has a add_res routine, invokes it and
241 * hands over the return value. overall, 0 if all went well, else appropriate
244 static void *rproc_alloc_mem(struct udevice *dev, unsigned long len,
247 const struct dm_rproc_ops *ops;
249 ops = rproc_get_ops(dev);
251 debug("%s driver has no ops?\n", dev->name);
256 return ops->alloc_mem(dev, len, align);
262 * rproc_config_pagetable() - Configure page table for remote processor
263 * @dev: device we finished probing
264 * @virt: Virtual address of the resource
265 * @phys: Physical address the resource
266 * @len: length the resource
268 * Return: if the remote proc driver has a add_res routine, invokes it and
269 * hands over the return value. overall, 0 if all went well, else appropriate
272 static int rproc_config_pagetable(struct udevice *dev, unsigned int virt,
273 unsigned int phys, unsigned int len)
275 const struct dm_rproc_ops *ops;
277 ops = rproc_get_ops(dev);
279 debug("%s driver has no ops?\n", dev->name);
283 if (ops->config_pagetable)
284 return ops->config_pagetable(dev, virt, phys, len);
289 UCLASS_DRIVER(rproc) = {
290 .id = UCLASS_REMOTEPROC,
291 .name = "remoteproc",
292 .flags = DM_UC_FLAG_SEQ_ALIAS,
293 .pre_probe = rproc_pre_probe,
294 .post_probe = rproc_post_probe,
295 .per_device_plat_auto = sizeof(struct dm_rproc_uclass_pdata),
298 /* Remoteproc subsystem access functions */
300 * _rproc_probe_dev() - iteration helper to probe a rproc device
301 * @dev: device to probe
302 * @uc_pdata: uclass data allocated for the device
305 * Return: 0 if all ok, else appropriate error value.
307 static int _rproc_probe_dev(struct udevice *dev,
308 struct dm_rproc_uclass_pdata *uc_pdata,
313 ret = device_probe(dev);
316 debug("%s: Failed to initialize - %d\n", dev->name, ret);
321 * _rproc_dev_is_probed() - check if the device has been probed
322 * @dev: device to check
326 * Return: -EAGAIN if not probed else return 0
328 static int _rproc_dev_is_probed(struct udevice *dev,
329 struct dm_rproc_uclass_pdata *uc_pdata,
332 if (dev_get_flags(dev) & DM_FLAG_ACTIVATED)
338 bool rproc_is_initialized(void)
340 int ret = for_each_remoteproc_device(_rproc_dev_is_probed, NULL, NULL);
341 return ret ? false : true;
348 if (rproc_is_initialized()) {
349 debug("Already initialized\n");
353 ret = for_each_remoteproc_device(_rproc_probe_dev, NULL, NULL);
357 int rproc_dev_init(int id)
359 struct udevice *dev = NULL;
362 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
364 debug("Unknown remote processor id '%d' requested(%d)\n",
369 ret = device_probe(dev);
371 debug("%s: Failed to initialize - %d\n", dev->name, ret);
376 int rproc_load(int id, ulong addr, ulong size)
378 struct udevice *dev = NULL;
379 struct dm_rproc_uclass_pdata *uc_pdata;
380 const struct dm_rproc_ops *ops;
383 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
385 debug("Unknown remote processor id '%d' requested(%d)\n",
390 uc_pdata = dev_get_uclass_plat(dev);
392 ops = rproc_get_ops(dev);
394 debug("%s driver has no ops?\n", dev->name);
398 debug("Loading to '%s' from address 0x%08lX size of %lu bytes\n",
399 uc_pdata->name, addr, size);
401 return ops->load(dev, addr, size);
403 debug("%s: data corruption?? mandatory function is missing!\n",
410 * Completely internal helper enums..
411 * Keeping this isolated helps this code evolve independent of other
423 * _rproc_ops_wrapper() - wrapper for invoking remote proc driver callback
424 * @id: id of the remote processor
425 * @op: one of rproc_ops that indicate what operation to invoke
427 * Most of the checks and verification for remoteproc operations are more
428 * or less same for almost all operations. This allows us to put a wrapper
429 * and use the common checks to allow the driver to function appropriately.
431 * Return: 0 if all ok, else appropriate error value.
433 static int _rproc_ops_wrapper(int id, enum rproc_ops op)
435 struct udevice *dev = NULL;
436 struct dm_rproc_uclass_pdata *uc_pdata;
437 const struct dm_rproc_ops *ops;
438 int (*fn)(struct udevice *dev);
439 bool mandatory = false;
443 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
445 debug("Unknown remote processor id '%d' requested(%d)\n",
450 uc_pdata = dev_get_uclass_plat(dev);
452 ops = rproc_get_ops(dev);
454 debug("%s driver has no ops?\n", dev->name);
469 op_str = "Resetting";
472 fn = ops->is_running;
473 op_str = "Checking if running:";
480 debug("what is '%d' operation??\n", op);
484 debug("%s %s...\n", op_str, uc_pdata->name);
489 debug("%s: data corruption?? mandatory function is missing!\n",
495 int rproc_start(int id)
497 return _rproc_ops_wrapper(id, RPROC_START);
500 int rproc_stop(int id)
502 return _rproc_ops_wrapper(id, RPROC_STOP);
505 int rproc_reset(int id)
507 return _rproc_ops_wrapper(id, RPROC_RESET);
510 int rproc_ping(int id)
512 return _rproc_ops_wrapper(id, RPROC_PING);
515 int rproc_is_running(int id)
517 return _rproc_ops_wrapper(id, RPROC_RUNNING);
521 static int handle_trace(struct udevice *dev, struct fw_rsc_trace *rsc,
522 int offset, int avail)
524 if (sizeof(*rsc) > avail) {
525 debug("trace rsc is truncated\n");
530 * make sure reserved bytes are zeroes
533 debug("trace rsc has non zero reserved bytes\n");
537 debug("trace rsc: da 0x%x, len 0x%x\n", rsc->da, rsc->len);
542 static int handle_devmem(struct udevice *dev, struct fw_rsc_devmem *rsc,
543 int offset, int avail)
545 struct rproc_mem_entry *mapping;
547 if (sizeof(*rsc) > avail) {
548 debug("devmem rsc is truncated\n");
553 * make sure reserved bytes are zeroes
556 debug("devmem rsc has non zero reserved bytes\n");
560 debug("devmem rsc: pa 0x%x, da 0x%x, len 0x%x\n",
561 rsc->pa, rsc->da, rsc->len);
563 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
565 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
570 * We'll need this info later when we'll want to unmap everything
571 * (e.g. on shutdown).
573 * We can't trust the remote processor not to change the resource
574 * table, so we must maintain this info independently.
576 mapping->dma = rsc->pa;
577 mapping->da = rsc->da;
578 mapping->len = rsc->len;
579 rproc_add_res(dev, mapping);
581 debug("mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
582 rsc->pa, rsc->da, rsc->len);
587 static int handle_carveout(struct udevice *dev, struct fw_rsc_carveout *rsc,
588 int offset, int avail)
590 struct rproc_mem_entry *mapping;
592 if (sizeof(*rsc) > avail) {
593 debug("carveout rsc is truncated\n");
598 * make sure reserved bytes are zeroes
601 debug("carveout rsc has non zero reserved bytes\n");
605 debug("carveout rsc: da %x, pa %x, len %x, flags %x\n",
606 rsc->da, rsc->pa, rsc->len, rsc->flags);
608 rsc->pa = (uintptr_t)rproc_alloc_mem(dev, rsc->len, 8);
611 ("failed to allocate carveout rsc: da %x, pa %x, len %x, flags %x\n",
612 rsc->da, rsc->pa, rsc->len, rsc->flags);
615 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
618 * Ok, this is non-standard.
620 * Sometimes we can't rely on the generic iommu-based DMA API
621 * to dynamically allocate the device address and then set the IOMMU
622 * tables accordingly, because some remote processors might
623 * _require_ us to use hard coded device addresses that their
624 * firmware was compiled with.
626 * In this case, we must use the IOMMU API directly and map
627 * the memory to the device address as expected by the remote
630 * Obviously such remote processor devices should not be configured
631 * to use the iommu-based DMA API: we expect 'dma' to contain the
632 * physical address in this case.
634 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
639 * We'll need this info later when we'll want to unmap
640 * everything (e.g. on shutdown).
642 * We can't trust the remote processor not to change the
643 * resource table, so we must maintain this info independently.
645 mapping->dma = rsc->pa;
646 mapping->da = rsc->da;
647 mapping->len = rsc->len;
648 rproc_add_res(dev, mapping);
650 debug("carveout mapped 0x%x to 0x%x\n", rsc->da, rsc->pa);
655 #define RPROC_PAGE_SHIFT 12
656 #define RPROC_PAGE_SIZE BIT(RPROC_PAGE_SHIFT)
657 #define RPROC_PAGE_ALIGN(x) (((x) + (RPROC_PAGE_SIZE - 1)) & ~(RPROC_PAGE_SIZE - 1))
659 static int alloc_vring(struct udevice *dev, struct fw_rsc_vdev *rsc, int i)
661 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
666 debug("vdev rsc: vring%d: da %x, qsz %d, align %d\n",
667 i, vring->da, vring->num, vring->align);
670 * verify queue size and vring alignment are sane
672 if (!vring->num || !vring->align) {
673 debug("invalid qsz (%d) or alignment (%d)\n", vring->num,
679 * actual size of vring (in bytes)
681 size = RPROC_PAGE_ALIGN(vring_size(vring->num, vring->align));
682 order = vring->align >> RPROC_PAGE_SHIFT;
684 pa = rproc_alloc_mem(dev, size, order);
686 debug("failed to allocate vring rsc\n");
689 debug("alloc_mem(%#x, %d): %p\n", size, order, pa);
690 vring->da = (uintptr_t)pa;
695 static int handle_vdev(struct udevice *dev, struct fw_rsc_vdev *rsc,
696 int offset, int avail)
702 * make sure resource isn't truncated
704 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
705 + rsc->config_len > avail) {
706 debug("vdev rsc is truncated\n");
711 * make sure reserved bytes are zeroes
713 if (rsc->reserved[0] || rsc->reserved[1]) {
714 debug("vdev rsc has non zero reserved bytes\n");
718 debug("vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
719 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
722 * we currently support only two vrings per rvdev
724 if (rsc->num_of_vrings > 2) {
725 debug("too many vrings: %d\n", rsc->num_of_vrings);
730 * allocate the vrings
732 for (i = 0; i < rsc->num_of_vrings; i++) {
733 ret = alloc_vring(dev, rsc, i);
738 pa = rproc_alloc_mem(dev, RPMSG_TOTAL_BUF_SPACE, 6);
740 debug("failed to allocate vdev rsc\n");
743 debug("vring buffer alloc_mem(%#x, 6): %p\n", RPMSG_TOTAL_BUF_SPACE,
753 * A lookup table for resource handlers. The indices are defined in
754 * enum fw_resource_type.
756 static handle_resource_t loading_handlers[RSC_LAST] = {
757 [RSC_CARVEOUT] = (handle_resource_t)handle_carveout,
758 [RSC_DEVMEM] = (handle_resource_t)handle_devmem,
759 [RSC_TRACE] = (handle_resource_t)handle_trace,
760 [RSC_VDEV] = (handle_resource_t)handle_vdev,
764 * handle firmware resource entries before booting the remote processor
766 static int handle_resources(struct udevice *dev, int len,
767 handle_resource_t handlers[RSC_LAST])
769 handle_resource_t handler;
772 for (i = 0; i < rsc_table->num; i++) {
773 int offset = rsc_table->offset[i];
774 struct fw_rsc_hdr *hdr = (void *)rsc_table + offset;
775 int avail = len - offset - sizeof(*hdr);
776 void *rsc = (void *)hdr + sizeof(*hdr);
779 * make sure table isn't truncated
782 debug("rsc table is truncated\n");
786 debug("rsc: type %d\n", hdr->type);
788 if (hdr->type >= RSC_LAST) {
789 debug("unsupported resource %d\n", hdr->type);
793 handler = handlers[hdr->type];
797 ret = handler(dev, rsc, offset + sizeof(*hdr), avail);
806 handle_intmem_to_l3_mapping(struct udevice *dev,
807 struct rproc_intmem_to_l3_mapping *l3_mapping)
811 for (i = 0; i < l3_mapping->num_entries; i++) {
812 struct l3_map *curr_map = &l3_mapping->mappings[i];
813 struct rproc_mem_entry *mapping;
815 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
819 mapping->dma = curr_map->l3_addr;
820 mapping->da = curr_map->priv_addr;
821 mapping->len = curr_map->len;
822 rproc_add_res(dev, mapping);
828 static Elf32_Shdr *rproc_find_table(unsigned int addr)
830 Elf32_Ehdr *ehdr; /* Elf header structure pointer */
831 Elf32_Shdr *shdr; /* Section header structure pointer */
832 Elf32_Shdr sectionheader;
836 struct resource_table *ptable;
838 ehdr = (Elf32_Ehdr *)(uintptr_t)addr;
839 elf_data = (u8 *)ehdr;
840 shdr = (Elf32_Shdr *)(elf_data + ehdr->e_shoff);
841 memcpy(§ionheader, &shdr[ehdr->e_shstrndx], sizeof(sectionheader));
842 name_table = (char *)(elf_data + sectionheader.sh_offset);
844 for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
845 memcpy(§ionheader, shdr, sizeof(sectionheader));
846 u32 size = sectionheader.sh_size;
847 u32 offset = sectionheader.sh_offset;
850 (name_table + sectionheader.sh_name, ".resource_table"))
853 ptable = (struct resource_table *)(elf_data + offset);
856 * make sure table has at least the header
858 if (sizeof(struct resource_table) > size) {
859 debug("header-less resource table\n");
864 * we don't support any version beyond the first
866 if (ptable->ver != 1) {
867 debug("unsupported fw ver: %d\n", ptable->ver);
872 * make sure reserved bytes are zeroes
874 if (ptable->reserved[0] || ptable->reserved[1]) {
875 debug("non zero reserved bytes\n");
880 * make sure the offsets array isn't truncated
882 if (ptable->num * sizeof(ptable->offset[0]) +
883 sizeof(struct resource_table) > size) {
884 debug("resource table incomplete\n");
894 struct resource_table *rproc_find_resource_table(struct udevice *dev,
899 Elf32_Shdr sectionheader;
900 struct resource_table *ptable;
901 u8 *elf_data = (u8 *)(uintptr_t)addr;
903 shdr = rproc_find_table(addr);
905 debug("%s: failed to get resource section header\n", __func__);
909 memcpy(§ionheader, shdr, sizeof(sectionheader));
910 ptable = (struct resource_table *)(elf_data + sectionheader.sh_offset);
912 *tablesz = sectionheader.sh_size;
917 unsigned long rproc_parse_resource_table(struct udevice *dev, struct rproc *cfg)
919 struct resource_table *ptable = NULL;
924 addr = cfg->load_addr;
926 ptable = rproc_find_resource_table(dev, addr, &tablesz);
928 debug("%s : failed to find resource table\n", __func__);
932 debug("%s : found resource table\n", __func__);
933 rsc_table = kzalloc(tablesz, GFP_KERNEL);
935 debug("resource table alloc failed!\n");
940 * Copy the resource table into a local buffer before handling the
943 memcpy(rsc_table, ptable, tablesz);
944 if (cfg->intmem_to_l3_mapping)
945 handle_intmem_to_l3_mapping(dev, cfg->intmem_to_l3_mapping);
946 ret = handle_resources(dev, tablesz, loading_handlers);
948 debug("handle_resources failed: %d\n", ret);
953 * Instead of trying to mimic the kernel flow of copying the
954 * processed resource table into its post ELF load location in DDR
955 * copying it into its original location.
957 memcpy(ptable, rsc_table, tablesz);