1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio driver for the paravirtualized IOMMU
5 * Copyright (C) 2019 Arm Limited
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/amba/bus.h>
11 #include <linux/delay.h>
12 #include <linux/dma-iommu.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/freezer.h>
15 #include <linux/interval_tree.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/pci.h>
20 #include <linux/platform_device.h>
21 #include <linux/virtio.h>
22 #include <linux/virtio_config.h>
23 #include <linux/virtio_ids.h>
24 #include <linux/wait.h>
26 #include <uapi/linux/virtio_iommu.h>
28 #define MSI_IOVA_BASE 0x8000000
29 #define MSI_IOVA_LENGTH 0x100000
31 #define VIOMMU_REQUEST_VQ 0
32 #define VIOMMU_EVENT_VQ 1
33 #define VIOMMU_NR_VQS 2
36 struct iommu_device iommu;
38 struct virtio_device *vdev;
40 struct ida domain_ids;
42 struct virtqueue *vqs[VIOMMU_NR_VQS];
43 spinlock_t request_lock;
44 struct list_head requests;
47 /* Device configuration */
48 struct iommu_domain_geometry geometry;
52 /* Supported MAP flags */
57 struct viommu_mapping {
59 struct interval_tree_node iova;
63 struct viommu_domain {
64 struct iommu_domain domain;
65 struct viommu_dev *viommu;
66 struct mutex mutex; /* protects viommu pointer */
70 spinlock_t mappings_lock;
71 struct rb_root_cached mappings;
73 unsigned long nr_endpoints;
76 struct viommu_endpoint {
78 struct viommu_dev *viommu;
79 struct viommu_domain *vdomain;
80 struct list_head resv_regions;
83 struct viommu_request {
84 struct list_head list;
86 unsigned int write_offset;
91 #define VIOMMU_FAULT_RESV_MASK 0xffffff00
96 struct virtio_iommu_fault fault;
100 #define to_viommu_domain(domain) \
101 container_of(domain, struct viommu_domain, domain)
103 static int viommu_get_req_errno(void *buf, size_t len)
105 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
107 switch (tail->status) {
108 case VIRTIO_IOMMU_S_OK:
110 case VIRTIO_IOMMU_S_UNSUPP:
112 case VIRTIO_IOMMU_S_INVAL:
114 case VIRTIO_IOMMU_S_RANGE:
116 case VIRTIO_IOMMU_S_NOENT:
118 case VIRTIO_IOMMU_S_FAULT:
120 case VIRTIO_IOMMU_S_NOMEM:
122 case VIRTIO_IOMMU_S_IOERR:
123 case VIRTIO_IOMMU_S_DEVERR:
129 static void viommu_set_req_status(void *buf, size_t len, int status)
131 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
133 tail->status = status;
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
137 struct virtio_iommu_req_head *req,
140 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
142 if (req->type == VIRTIO_IOMMU_T_PROBE)
143 return len - viommu->probe_size - tail_size;
145 return len - tail_size;
149 * __viommu_sync_req - Complete all in-flight requests
151 * Wait for all added requests to complete. When this function returns, all
152 * requests that were in-flight at the time of the call have completed.
154 static int __viommu_sync_req(struct viommu_dev *viommu)
158 struct viommu_request *req;
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
161 assert_spin_locked(&viommu->request_lock);
165 while (!list_empty(&viommu->requests)) {
167 req = virtqueue_get_buf(vq, &len);
172 viommu_set_req_status(req->buf, req->len,
173 VIRTIO_IOMMU_S_IOERR);
175 write_len = req->len - req->write_offset;
176 if (req->writeback && len == write_len)
177 memcpy(req->writeback, req->buf + req->write_offset,
180 list_del(&req->list);
187 static int viommu_sync_req(struct viommu_dev *viommu)
192 spin_lock_irqsave(&viommu->request_lock, flags);
193 ret = __viommu_sync_req(viommu);
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
196 spin_unlock_irqrestore(&viommu->request_lock, flags);
202 * __viommu_add_request - Add one request to the queue
203 * @buf: pointer to the request buffer
204 * @len: length of the request buffer
205 * @writeback: copy data back to the buffer when the request completes.
207 * Add a request to the queue. Only synchronize the queue if it's already full.
208 * Otherwise don't kick the queue nor wait for requests to complete.
210 * When @writeback is true, data written by the device, including the request
211 * status, is copied into @buf after the request completes. This is unsafe if
212 * the caller allocates @buf on stack and drops the lock between add_req() and
215 * Return 0 if the request was successfully added to the queue.
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
222 struct viommu_request *req;
223 struct scatterlist top_sg, bottom_sg;
224 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
227 assert_spin_locked(&viommu->request_lock);
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
230 if (write_offset <= 0)
233 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
239 req->writeback = buf + write_offset;
240 req->write_offset = write_offset;
242 memcpy(&req->buf, buf, write_offset);
244 sg_init_one(&top_sg, req->buf, write_offset);
245 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
247 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
248 if (ret == -ENOSPC) {
249 /* If the queue is full, sync and retry */
250 if (!__viommu_sync_req(viommu))
251 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
256 list_add_tail(&req->list, &viommu->requests);
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
269 spin_lock_irqsave(&viommu->request_lock, flags);
270 ret = __viommu_add_req(viommu, buf, len, false);
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
273 spin_unlock_irqrestore(&viommu->request_lock, flags);
279 * Send a request and wait for it to complete. Return the request status (as an
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
288 spin_lock_irqsave(&viommu->request_lock, flags);
290 ret = __viommu_add_req(viommu, buf, len, true);
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
296 ret = __viommu_sync_req(viommu);
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
299 /* Fall-through (get the actual request status) */
302 ret = viommu_get_req_errno(buf, len);
304 spin_unlock_irqrestore(&viommu->request_lock, flags);
309 * viommu_add_mapping - add a mapping to the internal tree
311 * On success, return the new mapping. Otherwise return NULL.
313 static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
314 phys_addr_t paddr, size_t size, u32 flags)
316 unsigned long irqflags;
317 struct viommu_mapping *mapping;
319 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
323 mapping->paddr = paddr;
324 mapping->iova.start = iova;
325 mapping->iova.last = iova + size - 1;
326 mapping->flags = flags;
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
329 interval_tree_insert(&mapping->iova, &vdomain->mappings);
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
336 * viommu_del_mappings - remove mappings from the internal tree
338 * @vdomain: the domain
339 * @iova: start of the range
340 * @size: size of the range. A size of 0 corresponds to the entire address
343 * On success, returns the number of unmapped bytes (>= size)
345 static size_t viommu_del_mappings(struct viommu_domain *vdomain,
346 unsigned long iova, size_t size)
350 unsigned long last = iova + size - 1;
351 struct viommu_mapping *mapping = NULL;
352 struct interval_tree_node *node, *next;
354 spin_lock_irqsave(&vdomain->mappings_lock, flags);
355 next = interval_tree_iter_first(&vdomain->mappings, iova, last);
358 mapping = container_of(node, struct viommu_mapping, iova);
359 next = interval_tree_iter_next(node, iova, last);
361 /* Trying to split a mapping? */
362 if (mapping->iova.start < iova)
366 * Virtio-iommu doesn't allow UNMAP to split a mapping created
367 * with a single MAP request, so remove the full mapping.
369 unmapped += mapping->iova.last - mapping->iova.start + 1;
371 interval_tree_remove(node, &vdomain->mappings);
374 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
380 * viommu_replay_mappings - re-send MAP requests
382 * When reattaching a domain that was previously detached from all endpoints,
383 * mappings were deleted from the device. Re-create the mappings available in
386 static int viommu_replay_mappings(struct viommu_domain *vdomain)
390 struct viommu_mapping *mapping;
391 struct interval_tree_node *node;
392 struct virtio_iommu_req_map map;
394 spin_lock_irqsave(&vdomain->mappings_lock, flags);
395 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
397 mapping = container_of(node, struct viommu_mapping, iova);
398 map = (struct virtio_iommu_req_map) {
399 .head.type = VIRTIO_IOMMU_T_MAP,
400 .domain = cpu_to_le32(vdomain->id),
401 .virt_start = cpu_to_le64(mapping->iova.start),
402 .virt_end = cpu_to_le64(mapping->iova.last),
403 .phys_start = cpu_to_le64(mapping->paddr),
404 .flags = cpu_to_le32(mapping->flags),
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
411 node = interval_tree_iter_next(node, 0, -1UL);
413 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
418 static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
419 struct virtio_iommu_probe_resv_mem *mem,
424 phys_addr_t start, end;
425 struct iommu_resv_region *region = NULL;
426 unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
428 start = start64 = le64_to_cpu(mem->start);
429 end = end64 = le64_to_cpu(mem->end);
430 size = end64 - start64 + 1;
432 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
433 if (start != start64 || end != end64 || size < end64 - start64)
436 if (len < sizeof(*mem))
439 switch (mem->subtype) {
441 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
444 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
445 region = iommu_alloc_resv_region(start, size, 0,
446 IOMMU_RESV_RESERVED);
448 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
449 region = iommu_alloc_resv_region(start, size, prot,
456 list_add(®ion->list, &vdev->resv_regions);
460 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
466 struct virtio_iommu_req_probe *probe;
467 struct virtio_iommu_probe_property *prop;
468 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
469 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
471 if (!fwspec->num_ids)
474 probe_len = sizeof(*probe) + viommu->probe_size +
475 sizeof(struct virtio_iommu_req_tail);
476 probe = kzalloc(probe_len, GFP_KERNEL);
480 probe->head.type = VIRTIO_IOMMU_T_PROBE;
482 * For now, assume that properties of an endpoint that outputs multiple
483 * IDs are consistent. Only probe the first one.
485 probe->endpoint = cpu_to_le32(fwspec->ids[0]);
487 ret = viommu_send_req_sync(viommu, probe, probe_len);
491 prop = (void *)probe->properties;
492 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
494 while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
495 cur < viommu->probe_size) {
496 len = le16_to_cpu(prop->length) + sizeof(*prop);
499 case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
500 ret = viommu_add_resv_mem(vdev, (void *)prop, len);
503 dev_err(dev, "unknown viommu prop 0x%x\n", type);
507 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
510 if (cur >= viommu->probe_size)
513 prop = (void *)probe->properties + cur;
514 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
522 static int viommu_fault_handler(struct viommu_dev *viommu,
523 struct virtio_iommu_fault *fault)
527 u8 reason = fault->reason;
528 u32 flags = le32_to_cpu(fault->flags);
529 u32 endpoint = le32_to_cpu(fault->endpoint);
530 u64 address = le64_to_cpu(fault->address);
533 case VIRTIO_IOMMU_FAULT_R_DOMAIN:
534 reason_str = "domain";
536 case VIRTIO_IOMMU_FAULT_R_MAPPING:
539 case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
541 reason_str = "unknown";
545 /* TODO: find EP by ID and report_iommu_fault */
546 if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
547 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
548 reason_str, endpoint, address,
549 flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
550 flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
551 flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
553 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
554 reason_str, endpoint);
558 static void viommu_event_handler(struct virtqueue *vq)
562 struct scatterlist sg[1];
563 struct viommu_event *evt;
564 struct viommu_dev *viommu = vq->vdev->priv;
566 while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
567 if (len > sizeof(*evt)) {
569 "invalid event buffer (len %u != %zu)\n",
571 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
572 viommu_fault_handler(viommu, &evt->fault);
575 sg_init_one(sg, evt, sizeof(*evt));
576 ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
578 dev_err(viommu->dev, "could not add event buffer\n");
586 static struct iommu_domain *viommu_domain_alloc(unsigned type)
588 struct viommu_domain *vdomain;
590 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
593 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
597 mutex_init(&vdomain->mutex);
598 spin_lock_init(&vdomain->mappings_lock);
599 vdomain->mappings = RB_ROOT_CACHED;
601 return &vdomain->domain;
604 static int viommu_domain_finalise(struct viommu_endpoint *vdev,
605 struct iommu_domain *domain)
608 unsigned long viommu_page_size;
609 struct viommu_dev *viommu = vdev->viommu;
610 struct viommu_domain *vdomain = to_viommu_domain(domain);
612 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
613 if (viommu_page_size > PAGE_SIZE) {
615 "granule 0x%lx larger than system page size 0x%lx\n",
616 viommu_page_size, PAGE_SIZE);
620 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
621 viommu->last_domain, GFP_KERNEL);
625 vdomain->id = (unsigned int)ret;
627 domain->pgsize_bitmap = viommu->pgsize_bitmap;
628 domain->geometry = viommu->geometry;
630 vdomain->map_flags = viommu->map_flags;
631 vdomain->viommu = viommu;
636 static void viommu_domain_free(struct iommu_domain *domain)
638 struct viommu_domain *vdomain = to_viommu_domain(domain);
640 /* Free all remaining mappings (size 2^64) */
641 viommu_del_mappings(vdomain, 0, 0);
644 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
649 static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
653 struct virtio_iommu_req_attach req;
654 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
655 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
656 struct viommu_domain *vdomain = to_viommu_domain(domain);
658 mutex_lock(&vdomain->mutex);
659 if (!vdomain->viommu) {
661 * Properly initialize the domain now that we know which viommu
664 ret = viommu_domain_finalise(vdev, domain);
665 } else if (vdomain->viommu != vdev->viommu) {
666 dev_err(dev, "cannot attach to foreign vIOMMU\n");
669 mutex_unlock(&vdomain->mutex);
675 * In the virtio-iommu device, when attaching the endpoint to a new
676 * domain, it is detached from the old one and, if as as a result the
677 * old domain isn't attached to any endpoint, all mappings are removed
678 * from the old domain and it is freed.
680 * In the driver the old domain still exists, and its mappings will be
681 * recreated if it gets reattached to an endpoint. Otherwise it will be
684 * vdev->vdomain is protected by group->mutex
687 vdev->vdomain->nr_endpoints--;
689 req = (struct virtio_iommu_req_attach) {
690 .head.type = VIRTIO_IOMMU_T_ATTACH,
691 .domain = cpu_to_le32(vdomain->id),
694 for (i = 0; i < fwspec->num_ids; i++) {
695 req.endpoint = cpu_to_le32(fwspec->ids[i]);
697 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
702 if (!vdomain->nr_endpoints) {
704 * This endpoint is the first to be attached to the domain.
705 * Replay existing mappings (e.g. SW MSI).
707 ret = viommu_replay_mappings(vdomain);
712 vdomain->nr_endpoints++;
713 vdev->vdomain = vdomain;
718 static int viommu_map(struct iommu_domain *domain, unsigned long iova,
719 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
723 struct virtio_iommu_req_map map;
724 struct viommu_domain *vdomain = to_viommu_domain(domain);
726 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
727 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
728 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
730 if (flags & ~vdomain->map_flags)
733 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
737 map = (struct virtio_iommu_req_map) {
738 .head.type = VIRTIO_IOMMU_T_MAP,
739 .domain = cpu_to_le32(vdomain->id),
740 .virt_start = cpu_to_le64(iova),
741 .phys_start = cpu_to_le64(paddr),
742 .virt_end = cpu_to_le64(iova + size - 1),
743 .flags = cpu_to_le32(flags),
746 if (!vdomain->nr_endpoints)
749 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
751 viommu_del_mappings(vdomain, iova, size);
756 static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
757 size_t size, struct iommu_iotlb_gather *gather)
761 struct virtio_iommu_req_unmap unmap;
762 struct viommu_domain *vdomain = to_viommu_domain(domain);
764 unmapped = viommu_del_mappings(vdomain, iova, size);
768 /* Device already removed all mappings after detach. */
769 if (!vdomain->nr_endpoints)
772 unmap = (struct virtio_iommu_req_unmap) {
773 .head.type = VIRTIO_IOMMU_T_UNMAP,
774 .domain = cpu_to_le32(vdomain->id),
775 .virt_start = cpu_to_le64(iova),
776 .virt_end = cpu_to_le64(iova + unmapped - 1),
779 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
780 return ret ? 0 : unmapped;
783 static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
788 struct viommu_mapping *mapping;
789 struct interval_tree_node *node;
790 struct viommu_domain *vdomain = to_viommu_domain(domain);
792 spin_lock_irqsave(&vdomain->mappings_lock, flags);
793 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
795 mapping = container_of(node, struct viommu_mapping, iova);
796 paddr = mapping->paddr + (iova - mapping->iova.start);
798 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
803 static void viommu_iotlb_sync(struct iommu_domain *domain,
804 struct iommu_iotlb_gather *gather)
806 struct viommu_domain *vdomain = to_viommu_domain(domain);
808 viommu_sync_req(vdomain->viommu);
811 static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
813 struct iommu_resv_region *entry, *new_entry, *msi = NULL;
814 struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
815 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
817 list_for_each_entry(entry, &vdev->resv_regions, list) {
818 if (entry->type == IOMMU_RESV_MSI)
821 new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
824 list_add_tail(&new_entry->list, head);
828 * If the device didn't register any bypass MSI window, add a
829 * software-mapped region.
832 msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
833 prot, IOMMU_RESV_SW_MSI);
837 list_add_tail(&msi->list, head);
840 iommu_dma_get_resv_regions(dev, head);
843 static struct iommu_ops viommu_ops;
844 static struct virtio_driver virtio_iommu_drv;
846 static int viommu_match_node(struct device *dev, const void *data)
848 return dev->parent->fwnode == data;
851 static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
853 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
854 fwnode, viommu_match_node);
857 return dev ? dev_to_virtio(dev)->priv : NULL;
860 static struct iommu_device *viommu_probe_device(struct device *dev)
863 struct viommu_endpoint *vdev;
864 struct viommu_dev *viommu = NULL;
865 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
867 if (!fwspec || fwspec->ops != &viommu_ops)
868 return ERR_PTR(-ENODEV);
870 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
872 return ERR_PTR(-ENODEV);
874 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
876 return ERR_PTR(-ENOMEM);
879 vdev->viommu = viommu;
880 INIT_LIST_HEAD(&vdev->resv_regions);
881 dev_iommu_priv_set(dev, vdev);
883 if (viommu->probe_size) {
884 /* Get additional information for this endpoint */
885 ret = viommu_probe_endpoint(viommu, dev);
890 return &viommu->iommu;
893 generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
899 static void viommu_probe_finalize(struct device *dev)
901 #ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
902 /* First clear the DMA ops in case we're switching from a DMA domain */
903 set_dma_ops(dev, NULL);
904 iommu_setup_dma_ops(dev, 0, U64_MAX);
908 static void viommu_release_device(struct device *dev)
910 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
911 struct viommu_endpoint *vdev;
913 if (!fwspec || fwspec->ops != &viommu_ops)
916 vdev = dev_iommu_priv_get(dev);
918 generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
922 static struct iommu_group *viommu_device_group(struct device *dev)
925 return pci_device_group(dev);
927 return generic_device_group(dev);
930 static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
932 return iommu_fwspec_add_ids(dev, args->args, 1);
935 static struct iommu_ops viommu_ops = {
936 .domain_alloc = viommu_domain_alloc,
937 .domain_free = viommu_domain_free,
938 .attach_dev = viommu_attach_dev,
940 .unmap = viommu_unmap,
941 .iova_to_phys = viommu_iova_to_phys,
942 .iotlb_sync = viommu_iotlb_sync,
943 .probe_device = viommu_probe_device,
944 .probe_finalize = viommu_probe_finalize,
945 .release_device = viommu_release_device,
946 .device_group = viommu_device_group,
947 .get_resv_regions = viommu_get_resv_regions,
948 .put_resv_regions = generic_iommu_put_resv_regions,
949 .of_xlate = viommu_of_xlate,
950 .owner = THIS_MODULE,
953 static int viommu_init_vqs(struct viommu_dev *viommu)
955 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
956 const char *names[] = { "request", "event" };
957 vq_callback_t *callbacks[] = {
958 NULL, /* No async requests */
959 viommu_event_handler,
962 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
966 static int viommu_fill_evtq(struct viommu_dev *viommu)
969 struct scatterlist sg[1];
970 struct viommu_event *evts;
971 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
972 size_t nr_evts = vq->num_free;
974 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
975 sizeof(*evts), GFP_KERNEL);
979 for (i = 0; i < nr_evts; i++) {
980 sg_init_one(sg, &evts[i], sizeof(*evts));
981 ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
989 static int viommu_probe(struct virtio_device *vdev)
991 struct device *parent_dev = vdev->dev.parent;
992 struct viommu_dev *viommu = NULL;
993 struct device *dev = &vdev->dev;
995 u64 input_end = -1UL;
998 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
999 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
1002 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1006 spin_lock_init(&viommu->request_lock);
1007 ida_init(&viommu->domain_ids);
1009 viommu->vdev = vdev;
1010 INIT_LIST_HEAD(&viommu->requests);
1012 ret = viommu_init_vqs(viommu);
1016 virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
1017 &viommu->pgsize_bitmap);
1019 if (!viommu->pgsize_bitmap) {
1024 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1025 viommu->last_domain = ~0U;
1027 /* Optional features */
1028 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1029 struct virtio_iommu_config, input_range.start,
1032 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
1033 struct virtio_iommu_config, input_range.end,
1036 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1037 struct virtio_iommu_config, domain_range.start,
1038 &viommu->first_domain);
1040 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1041 struct virtio_iommu_config, domain_range.end,
1042 &viommu->last_domain);
1044 virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1045 struct virtio_iommu_config, probe_size,
1046 &viommu->probe_size);
1048 viommu->geometry = (struct iommu_domain_geometry) {
1049 .aperture_start = input_start,
1050 .aperture_end = input_end,
1051 .force_aperture = true,
1054 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1055 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1057 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1059 virtio_device_ready(vdev);
1061 /* Populate the event queue with buffers */
1062 ret = viommu_fill_evtq(viommu);
1066 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1067 virtio_bus_name(vdev));
1071 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
1074 if (pci_bus_type.iommu_ops != &viommu_ops) {
1075 ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
1077 goto err_unregister;
1080 #ifdef CONFIG_ARM_AMBA
1081 if (amba_bustype.iommu_ops != &viommu_ops) {
1082 ret = bus_set_iommu(&amba_bustype, &viommu_ops);
1084 goto err_unregister;
1087 if (platform_bus_type.iommu_ops != &viommu_ops) {
1088 ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
1090 goto err_unregister;
1093 vdev->priv = viommu;
1095 dev_info(dev, "input address: %u bits\n",
1096 order_base_2(viommu->geometry.aperture_end));
1097 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1102 iommu_device_sysfs_remove(&viommu->iommu);
1103 iommu_device_unregister(&viommu->iommu);
1105 vdev->config->del_vqs(vdev);
1110 static void viommu_remove(struct virtio_device *vdev)
1112 struct viommu_dev *viommu = vdev->priv;
1114 iommu_device_sysfs_remove(&viommu->iommu);
1115 iommu_device_unregister(&viommu->iommu);
1117 /* Stop all virtqueues */
1118 vdev->config->reset(vdev);
1119 vdev->config->del_vqs(vdev);
1121 dev_info(&vdev->dev, "device removed\n");
1124 static void viommu_config_changed(struct virtio_device *vdev)
1126 dev_warn(&vdev->dev, "config changed\n");
1129 static unsigned int features[] = {
1130 VIRTIO_IOMMU_F_MAP_UNMAP,
1131 VIRTIO_IOMMU_F_INPUT_RANGE,
1132 VIRTIO_IOMMU_F_DOMAIN_RANGE,
1133 VIRTIO_IOMMU_F_PROBE,
1134 VIRTIO_IOMMU_F_MMIO,
1137 static struct virtio_device_id id_table[] = {
1138 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
1141 MODULE_DEVICE_TABLE(virtio, id_table);
1143 static struct virtio_driver virtio_iommu_drv = {
1144 .driver.name = KBUILD_MODNAME,
1145 .driver.owner = THIS_MODULE,
1146 .id_table = id_table,
1147 .feature_table = features,
1148 .feature_table_size = ARRAY_SIZE(features),
1149 .probe = viommu_probe,
1150 .remove = viommu_remove,
1151 .config_changed = viommu_config_changed,
1154 module_virtio_driver(virtio_iommu_drv);
1156 MODULE_DESCRIPTION("Virtio IOMMU driver");
1157 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1158 MODULE_LICENSE("GPL v2");