1 // SPDX-License-Identifier: GPL-2.0
5 * The Virtual I/O Translation Table (VIOT) describes the topology of
6 * para-virtual IOMMUs and the endpoints they manage. The OS uses it to
7 * initialize devices in the right order, preventing endpoints from issuing DMA
8 * before their IOMMU is ready.
10 * When binding a driver to a device, before calling the device driver's probe()
11 * method, the driver infrastructure calls dma_configure(). At that point the
12 * VIOT driver looks for an IOMMU associated to the device in the VIOT table.
13 * If an IOMMU exists and has been initialized, the VIOT driver initializes the
14 * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
15 * ops when the device driver configures DMA mappings. If an IOMMU exists and
16 * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
17 * the device until the IOMMU is available.
19 #define pr_fmt(fmt) "ACPI: VIOT: " fmt
21 #include <linux/acpi_viot.h>
22 #include <linux/dma-iommu.h>
23 #include <linux/fwnode.h>
24 #include <linux/iommu.h>
25 #include <linux/list.h>
26 #include <linux/pci.h>
27 #include <linux/platform_device.h>
30 /* Node offset within the table */
32 struct fwnode_handle *fwnode;
33 struct list_head list;
36 struct viot_endpoint {
49 struct viot_iommu *viommu;
50 struct list_head list;
53 static struct acpi_table_viot *viot;
54 static LIST_HEAD(viot_iommus);
55 static LIST_HEAD(viot_pci_ranges);
56 static LIST_HEAD(viot_mmio_endpoints);
58 static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
60 struct acpi_viot_header *start, *end, *hdr_end;
62 start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
63 max_t(size_t, sizeof(*viot), viot->node_offset));
64 end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
65 hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
67 if (hdr < start || hdr_end > end) {
68 pr_err(FW_BUG "Node pointer overflows\n");
71 if (hdr->length < sizeof(*hdr)) {
72 pr_err(FW_BUG "Empty node\n");
78 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
82 struct fwnode_handle *fwnode;
84 pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
87 pr_err("Could not find PCI IOMMU\n");
91 fwnode = pdev->dev.fwnode;
94 * PCI devices aren't necessarily described by ACPI. Create a
95 * fwnode so the IOMMU subsystem can identify this device.
97 fwnode = acpi_alloc_fwnode_static();
102 set_primary_fwnode(&pdev->dev, fwnode);
104 viommu->fwnode = pdev->dev.fwnode;
109 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
112 struct acpi_device *adev;
113 struct resource res = {
116 .flags = IORESOURCE_MEM,
119 adev = acpi_resource_consumer(&res);
121 pr_err("Could not find MMIO IOMMU\n");
124 viommu->fwnode = &adev->fwnode;
128 static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
131 struct viot_iommu *viommu;
132 struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
135 struct acpi_viot_virtio_iommu_pci pci;
136 struct acpi_viot_virtio_iommu_mmio mmio;
137 } *node = (void *)hdr;
139 list_for_each_entry(viommu, &viot_iommus, list)
140 if (viommu->offset == offset)
143 if (viot_check_bounds(hdr))
146 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
150 viommu->offset = offset;
152 case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
153 if (hdr->length < sizeof(node->pci))
156 ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
159 case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
160 if (hdr->length < sizeof(node->mmio))
163 ret = viot_get_mmio_iommu_fwnode(viommu,
164 node->mmio.base_address);
172 list_add(&viommu->list, &viot_iommus);
180 static int __init viot_parse_node(const struct acpi_viot_header *hdr)
183 struct list_head *list;
184 struct viot_endpoint *ep;
186 struct acpi_viot_mmio mmio;
187 struct acpi_viot_pci_range pci;
188 } *node = (void *)hdr;
190 if (viot_check_bounds(hdr))
193 if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
194 hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
197 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
202 case ACPI_VIOT_NODE_PCI_RANGE:
203 if (hdr->length < sizeof(node->pci)) {
204 pr_err(FW_BUG "Invalid PCI node size\n");
208 ep->segment_start = node->pci.segment_start;
209 ep->segment_end = node->pci.segment_end;
210 ep->bdf_start = node->pci.bdf_start;
211 ep->bdf_end = node->pci.bdf_end;
212 ep->endpoint_id = node->pci.endpoint_start;
213 ep->viommu = viot_get_iommu(node->pci.output_node);
214 list = &viot_pci_ranges;
216 case ACPI_VIOT_NODE_MMIO:
217 if (hdr->length < sizeof(node->mmio)) {
218 pr_err(FW_BUG "Invalid MMIO node size\n");
222 ep->address = node->mmio.base_address;
223 ep->endpoint_id = node->mmio.endpoint;
224 ep->viommu = viot_get_iommu(node->mmio.output_node);
225 list = &viot_mmio_endpoints;
228 pr_warn("Unsupported node %x\n", hdr->type);
234 pr_warn("No IOMMU node found\n");
236 * A future version of the table may use the node for other
237 * purposes. Keep parsing.
243 list_add(&ep->list, list);
252 * acpi_viot_early_init - Test the presence of VIOT and enable ACS
254 * If the VIOT does exist, ACS must be enabled. This cannot be
255 * done in acpi_viot_init() which is called after the bus scan
257 void __init acpi_viot_early_init(void)
261 struct acpi_table_header *hdr;
263 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
264 if (ACPI_FAILURE(status))
272 * acpi_viot_init - Parse the VIOT table
274 * Parse the VIOT table, prepare the list of endpoints to be used during DMA
277 void __init acpi_viot_init(void)
281 struct acpi_table_header *hdr;
282 struct acpi_viot_header *node;
284 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
285 if (ACPI_FAILURE(status)) {
286 if (status != AE_NOT_FOUND) {
287 const char *msg = acpi_format_exception(status);
289 pr_err("Failed to get table, %s\n", msg);
296 node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
297 for (i = 0; i < viot->node_count; i++) {
298 if (viot_parse_node(node))
301 node = ACPI_ADD_PTR(struct acpi_viot_header, node,
308 static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
311 const struct iommu_ops *ops;
316 /* We're not translating ourself */
317 if (viommu->fwnode == dev->fwnode)
320 ops = iommu_ops_from_fwnode(viommu->fwnode);
322 return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
323 -EPROBE_DEFER : -ENODEV;
325 return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
328 static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
331 struct viot_endpoint *ep;
332 u32 domain_nr = pci_domain_nr(pdev->bus);
334 list_for_each_entry(ep, &viot_pci_ranges, list) {
335 if (domain_nr >= ep->segment_start &&
336 domain_nr <= ep->segment_end &&
337 dev_id >= ep->bdf_start &&
338 dev_id <= ep->bdf_end) {
339 epid = ((domain_nr - ep->segment_start) << 16) +
340 dev_id - ep->bdf_start + ep->endpoint_id;
342 return viot_dev_iommu_init(&pdev->dev, ep->viommu,
349 static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
351 struct resource *mem;
352 struct viot_endpoint *ep;
354 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
358 list_for_each_entry(ep, &viot_mmio_endpoints, list) {
359 if (ep->address == mem->start)
360 return viot_dev_iommu_init(&pdev->dev, ep->viommu,
367 * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
370 * Return: 0 on success, <0 on failure
372 int viot_iommu_configure(struct device *dev)
375 return pci_for_each_dma_alias(to_pci_dev(dev),
376 viot_pci_dev_iommu_init, NULL);
377 else if (dev_is_platform(dev))
378 return viot_mmio_dev_iommu_init(to_platform_device(dev));