1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
13 int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
19 lockdep_assert_held(&vdev->dev_set->lock);
21 if (vfio_device_is_noiommu(vdev)) {
22 if (!capable(CAP_SYS_RAWIO))
26 * Require no compat ioas to be assigned to proceed. The basic
27 * statement is that the user cannot have done something that
28 * implies they expected translation to exist
30 if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
35 ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
39 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
42 ret = vdev->ops->attach_ioas(vdev, &ioas_id);
47 * The legacy path has no way to return the device id or the selected
53 if (vdev->ops->unbind_iommufd)
54 vdev->ops->unbind_iommufd(vdev);
58 void vfio_iommufd_unbind(struct vfio_device *vdev)
60 lockdep_assert_held(&vdev->dev_set->lock);
62 if (vfio_device_is_noiommu(vdev))
65 if (vdev->ops->unbind_iommufd)
66 vdev->ops->unbind_iommufd(vdev);
70 * The physical standard ops mean that the iommufd_device is bound to the
71 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
72 * using this ops set should call vfio_register_group_dev()
74 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
75 struct iommufd_ctx *ictx, u32 *out_device_id)
77 struct iommufd_device *idev;
79 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
82 vdev->iommufd_device = idev;
85 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
87 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
89 lockdep_assert_held(&vdev->dev_set->lock);
91 if (vdev->iommufd_attached) {
92 iommufd_device_detach(vdev->iommufd_device);
93 vdev->iommufd_attached = false;
95 iommufd_device_unbind(vdev->iommufd_device);
96 vdev->iommufd_device = NULL;
98 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
100 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
104 rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
107 vdev->iommufd_attached = true;
110 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
113 * The emulated standard ops mean that vfio_device is going to use the
114 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
115 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
116 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
119 static void vfio_emulated_unmap(void *data, unsigned long iova,
120 unsigned long length)
122 struct vfio_device *vdev = data;
124 if (vdev->ops->dma_unmap)
125 vdev->ops->dma_unmap(vdev, iova, length);
128 static const struct iommufd_access_ops vfio_user_ops = {
129 .needs_pin_pages = 1,
130 .unmap = vfio_emulated_unmap,
133 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
134 struct iommufd_ctx *ictx, u32 *out_device_id)
136 struct iommufd_access *user;
138 lockdep_assert_held(&vdev->dev_set->lock);
140 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
142 return PTR_ERR(user);
143 vdev->iommufd_access = user;
146 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
148 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
150 lockdep_assert_held(&vdev->dev_set->lock);
152 if (vdev->iommufd_access) {
153 iommufd_access_destroy(vdev->iommufd_access);
154 vdev->iommufd_attached = false;
155 vdev->iommufd_access = NULL;
158 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
160 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
164 lockdep_assert_held(&vdev->dev_set->lock);
166 if (vdev->iommufd_attached)
168 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
171 vdev->iommufd_attached = true;
174 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);