1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/vfio.h>
14 #include <linux/iommufd.h>
15 #include <linux/anon_inodes.h>
20 struct list_head group_list;
21 struct mutex group_lock; /* locks group_list */
26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
29 struct vfio_device *it, *device = ERR_PTR(-ENODEV);
31 mutex_lock(&group->device_lock);
32 list_for_each_entry(it, &group->device_list, group_next) {
36 ret = it->ops->match(it, buf);
38 device = ERR_PTR(ret);
42 ret = !strcmp(dev_name(it->dev), buf);
45 if (ret && vfio_device_try_get_registration(it)) {
50 mutex_unlock(&group->device_lock);
56 * VFIO Group fd, /dev/vfio/$GROUP
58 static bool vfio_group_has_iommu(struct vfio_group *group)
60 lockdep_assert_held(&group->group_lock);
62 * There can only be users if there is a container, and if there is a
63 * container there must be users.
65 WARN_ON(!group->container != !group->container_users);
67 return group->container || group->iommufd;
71 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
72 * if there was no container to unset. Since the ioctl is called on
73 * the group, we know that still exists, therefore the only valid
74 * transition here is 1->0.
76 static int vfio_group_ioctl_unset_container(struct vfio_group *group)
80 mutex_lock(&group->group_lock);
81 if (!vfio_group_has_iommu(group)) {
85 if (group->container) {
86 if (group->container_users != 1) {
90 vfio_group_detach_container(group);
93 iommufd_ctx_put(group->iommufd);
94 group->iommufd = NULL;
98 mutex_unlock(&group->group_lock);
102 static int vfio_group_ioctl_set_container(struct vfio_group *group,
105 struct vfio_container *container;
106 struct iommufd_ctx *iommufd;
111 if (get_user(fd, arg))
118 mutex_lock(&group->group_lock);
119 if (vfio_group_has_iommu(group)) {
123 if (!group->iommu_group) {
128 container = vfio_container_from_file(f.file);
130 ret = vfio_container_attach_group(container, group);
134 iommufd = iommufd_ctx_from_file(f.file);
135 if (!IS_ERR(iommufd)) {
138 ret = iommufd_vfio_compat_ioas_id(iommufd, &ioas_id);
140 iommufd_ctx_put(iommufd);
144 group->iommufd = iommufd;
148 /* The FD passed is not recognized. */
152 mutex_unlock(&group->group_lock);
157 static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
159 spin_lock(&device->group->kvm_ref_lock);
160 if (!device->group->kvm)
163 _vfio_device_get_kvm_safe(device, device->group->kvm);
166 spin_unlock(&device->group->kvm_ref_lock);
169 static int vfio_device_group_open(struct vfio_device *device)
173 mutex_lock(&device->group->group_lock);
174 if (!vfio_group_has_iommu(device->group)) {
179 mutex_lock(&device->dev_set->lock);
182 * Before the first device open, get the KVM pointer currently
183 * associated with the group (if there is one) and obtain a reference
184 * now that will be held until the open_count reaches 0 again. Save
185 * the pointer in the device for use by drivers.
187 if (device->open_count == 0)
188 vfio_device_group_get_kvm_safe(device);
190 ret = vfio_device_open(device, device->group->iommufd);
192 if (device->open_count == 0)
193 vfio_device_put_kvm(device);
195 mutex_unlock(&device->dev_set->lock);
198 mutex_unlock(&device->group->group_lock);
202 void vfio_device_group_close(struct vfio_device *device)
204 mutex_lock(&device->group->group_lock);
205 mutex_lock(&device->dev_set->lock);
207 vfio_device_close(device, device->group->iommufd);
209 if (device->open_count == 0)
210 vfio_device_put_kvm(device);
212 mutex_unlock(&device->dev_set->lock);
213 mutex_unlock(&device->group->group_lock);
216 static struct file *vfio_device_open_file(struct vfio_device *device)
221 ret = vfio_device_group_open(device);
226 * We can't use anon_inode_getfd() because we need to modify
227 * the f_mode flags directly to allow more than just ioctls
229 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
232 ret = PTR_ERR(filep);
233 goto err_close_device;
237 * TODO: add an anon_inode interface to do this.
238 * Appears to be missing by lack of need rather than
239 * explicitly prevented. Now there's need.
241 filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
243 if (device->group->type == VFIO_NO_IOMMU)
244 dev_warn(device->dev, "vfio-noiommu device opened by user "
245 "(%s:%d)\n", current->comm, task_pid_nr(current));
247 * On success the ref of device is moved to the file and
248 * put in vfio_device_fops_release()
253 vfio_device_group_close(device);
258 static int vfio_group_ioctl_get_device_fd(struct vfio_group *group,
261 struct vfio_device *device;
267 buf = strndup_user(arg, PAGE_SIZE);
271 device = vfio_device_get_from_name(group, buf);
274 return PTR_ERR(device);
276 fdno = get_unused_fd_flags(O_CLOEXEC);
282 filep = vfio_device_open_file(device);
284 ret = PTR_ERR(filep);
288 fd_install(fdno, filep);
294 vfio_device_put_registration(device);
298 static int vfio_group_ioctl_get_status(struct vfio_group *group,
299 struct vfio_group_status __user *arg)
301 unsigned long minsz = offsetofend(struct vfio_group_status, flags);
302 struct vfio_group_status status;
304 if (copy_from_user(&status, arg, minsz))
307 if (status.argsz < minsz)
312 mutex_lock(&group->group_lock);
313 if (!group->iommu_group) {
314 mutex_unlock(&group->group_lock);
319 * With the container FD the iommu_group_claim_dma_owner() is done
320 * during SET_CONTAINER but for IOMMFD this is done during
321 * VFIO_GROUP_GET_DEVICE_FD. Meaning that with iommufd
322 * VFIO_GROUP_FLAGS_VIABLE could be set but GET_DEVICE_FD will fail due
325 if (vfio_group_has_iommu(group))
326 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
327 VFIO_GROUP_FLAGS_VIABLE;
328 else if (!iommu_group_dma_owner_claimed(group->iommu_group))
329 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
330 mutex_unlock(&group->group_lock);
332 if (copy_to_user(arg, &status, minsz))
337 static long vfio_group_fops_unl_ioctl(struct file *filep,
338 unsigned int cmd, unsigned long arg)
340 struct vfio_group *group = filep->private_data;
341 void __user *uarg = (void __user *)arg;
344 case VFIO_GROUP_GET_DEVICE_FD:
345 return vfio_group_ioctl_get_device_fd(group, uarg);
346 case VFIO_GROUP_GET_STATUS:
347 return vfio_group_ioctl_get_status(group, uarg);
348 case VFIO_GROUP_SET_CONTAINER:
349 return vfio_group_ioctl_set_container(group, uarg);
350 case VFIO_GROUP_UNSET_CONTAINER:
351 return vfio_group_ioctl_unset_container(group);
357 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
359 struct vfio_group *group =
360 container_of(inode->i_cdev, struct vfio_group, cdev);
363 mutex_lock(&group->group_lock);
366 * drivers can be zero if this races with vfio_device_remove_group(), it
367 * will be stable at 0 under the group rwsem
369 if (refcount_read(&group->drivers) == 0) {
374 if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
380 * Do we need multiple instances of the group open? Seems not.
382 if (group->opened_file) {
386 group->opened_file = filep;
387 filep->private_data = group;
390 mutex_unlock(&group->group_lock);
394 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
396 struct vfio_group *group = filep->private_data;
398 filep->private_data = NULL;
400 mutex_lock(&group->group_lock);
402 * Device FDs hold a group file reference, therefore the group release
403 * is only called when there are no open devices.
405 WARN_ON(group->notifier.head);
406 if (group->container)
407 vfio_group_detach_container(group);
408 if (group->iommufd) {
409 iommufd_ctx_put(group->iommufd);
410 group->iommufd = NULL;
412 group->opened_file = NULL;
413 mutex_unlock(&group->group_lock);
417 static const struct file_operations vfio_group_fops = {
418 .owner = THIS_MODULE,
419 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
420 .compat_ioctl = compat_ptr_ioctl,
421 .open = vfio_group_fops_open,
422 .release = vfio_group_fops_release,
426 * Group objects - create, release, get, put, search
428 static struct vfio_group *
429 vfio_group_find_from_iommu(struct iommu_group *iommu_group)
431 struct vfio_group *group;
433 lockdep_assert_held(&vfio.group_lock);
436 * group->iommu_group from the vfio.group_list cannot be NULL
437 * under the vfio.group_lock.
439 list_for_each_entry(group, &vfio.group_list, vfio_next) {
440 if (group->iommu_group == iommu_group)
446 static void vfio_group_release(struct device *dev)
448 struct vfio_group *group = container_of(dev, struct vfio_group, dev);
450 mutex_destroy(&group->device_lock);
451 mutex_destroy(&group->group_lock);
452 WARN_ON(group->iommu_group);
453 ida_free(&vfio.group_ida, MINOR(group->dev.devt));
457 static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
458 enum vfio_group_type type)
460 struct vfio_group *group;
463 group = kzalloc(sizeof(*group), GFP_KERNEL);
465 return ERR_PTR(-ENOMEM);
467 minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL);
470 return ERR_PTR(minor);
473 device_initialize(&group->dev);
474 group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor);
475 group->dev.class = vfio.class;
476 group->dev.release = vfio_group_release;
477 cdev_init(&group->cdev, &vfio_group_fops);
478 group->cdev.owner = THIS_MODULE;
480 refcount_set(&group->drivers, 1);
481 mutex_init(&group->group_lock);
482 spin_lock_init(&group->kvm_ref_lock);
483 INIT_LIST_HEAD(&group->device_list);
484 mutex_init(&group->device_lock);
485 group->iommu_group = iommu_group;
486 /* put in vfio_group_release() */
487 iommu_group_ref_get(iommu_group);
489 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
494 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
495 enum vfio_group_type type)
497 struct vfio_group *group;
498 struct vfio_group *ret;
501 lockdep_assert_held(&vfio.group_lock);
503 group = vfio_group_alloc(iommu_group, type);
507 err = dev_set_name(&group->dev, "%s%d",
508 group->type == VFIO_NO_IOMMU ? "noiommu-" : "",
509 iommu_group_id(iommu_group));
515 err = cdev_device_add(&group->cdev, &group->dev);
521 list_add(&group->vfio_next, &vfio.group_list);
526 put_device(&group->dev);
530 static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
531 enum vfio_group_type type)
533 struct iommu_group *iommu_group;
534 struct vfio_group *group;
537 iommu_group = iommu_group_alloc();
538 if (IS_ERR(iommu_group))
539 return ERR_CAST(iommu_group);
541 ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
544 ret = iommu_group_add_device(iommu_group, dev);
548 mutex_lock(&vfio.group_lock);
549 group = vfio_create_group(iommu_group, type);
550 mutex_unlock(&vfio.group_lock);
552 ret = PTR_ERR(group);
553 goto out_remove_device;
555 iommu_group_put(iommu_group);
559 iommu_group_remove_device(dev);
561 iommu_group_put(iommu_group);
565 static bool vfio_group_has_device(struct vfio_group *group, struct device *dev)
567 struct vfio_device *device;
569 mutex_lock(&group->device_lock);
570 list_for_each_entry(device, &group->device_list, group_next) {
571 if (device->dev == dev) {
572 mutex_unlock(&group->device_lock);
576 mutex_unlock(&group->device_lock);
580 static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
582 struct iommu_group *iommu_group;
583 struct vfio_group *group;
585 iommu_group = iommu_group_get(dev);
586 if (!iommu_group && vfio_noiommu) {
588 * With noiommu enabled, create an IOMMU group for devices that
589 * don't already have one, implying no IOMMU hardware/driver
590 * exists. Taint the kernel because we're about to give a DMA
591 * capable device to a user without IOMMU protection.
593 group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
594 if (!IS_ERR(group)) {
595 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
596 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
602 return ERR_PTR(-EINVAL);
605 * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
606 * restore cache coherency. It has to be checked here because it is only
607 * valid for cases where we are using iommu groups.
609 if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
610 iommu_group_put(iommu_group);
611 return ERR_PTR(-EINVAL);
614 mutex_lock(&vfio.group_lock);
615 group = vfio_group_find_from_iommu(iommu_group);
617 if (WARN_ON(vfio_group_has_device(group, dev)))
618 group = ERR_PTR(-EINVAL);
620 refcount_inc(&group->drivers);
622 group = vfio_create_group(iommu_group, VFIO_IOMMU);
624 mutex_unlock(&vfio.group_lock);
626 /* The vfio_group holds a reference to the iommu_group */
627 iommu_group_put(iommu_group);
631 int vfio_device_set_group(struct vfio_device *device,
632 enum vfio_group_type type)
634 struct vfio_group *group;
636 if (type == VFIO_IOMMU)
637 group = vfio_group_find_or_alloc(device->dev);
639 group = vfio_noiommu_group_alloc(device->dev, type);
642 return PTR_ERR(group);
644 /* Our reference on group is moved to the device */
645 device->group = group;
649 void vfio_device_remove_group(struct vfio_device *device)
651 struct vfio_group *group = device->group;
652 struct iommu_group *iommu_group;
654 if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
655 iommu_group_remove_device(device->dev);
657 /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */
658 if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock))
660 list_del(&group->vfio_next);
663 * We could concurrently probe another driver in the group that might
664 * race vfio_device_remove_group() with vfio_get_group(), so we have to
665 * ensure that the sysfs is all cleaned up under lock otherwise the
666 * cdev_device_add() will fail due to the name aready existing.
668 cdev_device_del(&group->cdev, &group->dev);
670 mutex_lock(&group->group_lock);
672 * These data structures all have paired operations that can only be
673 * undone when the caller holds a live reference on the device. Since
674 * all pairs must be undone these WARN_ON's indicate some caller did not
675 * properly hold the group reference.
677 WARN_ON(!list_empty(&group->device_list));
678 WARN_ON(group->notifier.head);
681 * Revoke all users of group->iommu_group. At this point we know there
682 * are no devices active because we are unplugging the last one. Setting
683 * iommu_group to NULL blocks all new users.
685 if (group->container)
686 vfio_group_detach_container(group);
687 iommu_group = group->iommu_group;
688 group->iommu_group = NULL;
689 mutex_unlock(&group->group_lock);
690 mutex_unlock(&vfio.group_lock);
692 iommu_group_put(iommu_group);
693 put_device(&group->dev);
696 void vfio_device_group_register(struct vfio_device *device)
698 mutex_lock(&device->group->device_lock);
699 list_add(&device->group_next, &device->group->device_list);
700 mutex_unlock(&device->group->device_lock);
703 void vfio_device_group_unregister(struct vfio_device *device)
705 mutex_lock(&device->group->device_lock);
706 list_del(&device->group_next);
707 mutex_unlock(&device->group->device_lock);
710 int vfio_device_group_use_iommu(struct vfio_device *device)
712 struct vfio_group *group = device->group;
715 lockdep_assert_held(&group->group_lock);
717 if (WARN_ON(!group->container))
720 ret = vfio_group_use_container(group);
723 vfio_device_container_register(device);
727 void vfio_device_group_unuse_iommu(struct vfio_device *device)
729 struct vfio_group *group = device->group;
731 lockdep_assert_held(&group->group_lock);
733 if (WARN_ON(!group->container))
736 vfio_device_container_unregister(device);
737 vfio_group_unuse_container(group);
740 bool vfio_device_has_container(struct vfio_device *device)
742 return device->group->container;
746 * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
747 * @file: VFIO group file
749 * The returned iommu_group is valid as long as a ref is held on the file. This
750 * returns a reference on the group. This function is deprecated, only the SPAPR
751 * path in kvm should call it.
753 struct iommu_group *vfio_file_iommu_group(struct file *file)
755 struct vfio_group *group = file->private_data;
756 struct iommu_group *iommu_group = NULL;
758 if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
761 if (!vfio_file_is_group(file))
764 mutex_lock(&group->group_lock);
765 if (group->iommu_group) {
766 iommu_group = group->iommu_group;
767 iommu_group_ref_get(iommu_group);
769 mutex_unlock(&group->group_lock);
772 EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
775 * vfio_file_is_group - True if the file is usable with VFIO aPIS
776 * @file: VFIO group file
778 bool vfio_file_is_group(struct file *file)
780 return file->f_op == &vfio_group_fops;
782 EXPORT_SYMBOL_GPL(vfio_file_is_group);
785 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
786 * is always CPU cache coherent
787 * @file: VFIO group file
789 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
790 * bit in DMA transactions. A return of false indicates that the user has
791 * rights to access additional instructions such as wbinvd on x86.
793 bool vfio_file_enforced_coherent(struct file *file)
795 struct vfio_group *group = file->private_data;
796 struct vfio_device *device;
799 if (!vfio_file_is_group(file))
803 * If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then
804 * any domain later attached to it will also not support it. If the cap
805 * is set then the iommu_domain eventually attached to the device/group
806 * must use a domain with enforce_cache_coherency().
808 mutex_lock(&group->device_lock);
809 list_for_each_entry(device, &group->device_list, group_next) {
810 if (!device_iommu_capable(device->dev,
811 IOMMU_CAP_ENFORCE_CACHE_COHERENCY)) {
816 mutex_unlock(&group->device_lock);
819 EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
822 * vfio_file_set_kvm - Link a kvm with VFIO drivers
823 * @file: VFIO group file
826 * When a VFIO device is first opened the KVM will be available in
827 * device->kvm if one was associated with the group.
829 void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
831 struct vfio_group *group = file->private_data;
833 if (!vfio_file_is_group(file))
836 spin_lock(&group->kvm_ref_lock);
838 spin_unlock(&group->kvm_ref_lock);
840 EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
843 * vfio_file_has_dev - True if the VFIO file is a handle for device
844 * @file: VFIO file to check
845 * @device: Device that must be part of the file
847 * Returns true if given file has permission to manipulate the given device.
849 bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
851 struct vfio_group *group = file->private_data;
853 if (!vfio_file_is_group(file))
856 return group == device->group;
858 EXPORT_SYMBOL_GPL(vfio_file_has_dev);
860 static char *vfio_devnode(const struct device *dev, umode_t *mode)
862 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
865 int __init vfio_group_init(void)
869 ida_init(&vfio.group_ida);
870 mutex_init(&vfio.group_lock);
871 INIT_LIST_HEAD(&vfio.group_list);
873 ret = vfio_container_init();
877 /* /dev/vfio/$GROUP */
878 vfio.class = class_create(THIS_MODULE, "vfio");
879 if (IS_ERR(vfio.class)) {
880 ret = PTR_ERR(vfio.class);
881 goto err_group_class;
884 vfio.class->devnode = vfio_devnode;
886 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
888 goto err_alloc_chrdev;
892 class_destroy(vfio.class);
895 vfio_container_cleanup();
899 void vfio_group_cleanup(void)
901 WARN_ON(!list_empty(&vfio.group_list));
902 ida_destroy(&vfio.group_ida);
903 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
904 class_destroy(vfio.class);
906 vfio_container_cleanup();