1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2023 Intel Corporation.
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
10 static dev_t device_devt;
12 void vfio_init_device_cdev(struct vfio_device *device)
14 device->device.devt = MKDEV(MAJOR(device_devt), device->index);
15 cdev_init(&device->cdev, &vfio_device_fops);
16 device->cdev.owner = THIS_MODULE;
20 * device access via the fd opened by this function is blocked until
21 * .open_device() is called successfully during BIND_IOMMUFD.
23 int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
25 struct vfio_device *device = container_of(inode->i_cdev,
26 struct vfio_device, cdev);
27 struct vfio_device_file *df;
30 /* Paired with the put in vfio_device_fops_release() */
31 if (!vfio_device_try_get_registration(device))
34 df = vfio_allocate_device_file(device);
37 goto err_put_registration;
40 filep->private_data = df;
45 vfio_device_put_registration(device);
49 static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
51 spin_lock(&df->kvm_ref_lock);
52 vfio_device_get_kvm_safe(df->device, df->kvm);
53 spin_unlock(&df->kvm_ref_lock);
56 long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
57 struct vfio_device_bind_iommufd __user *arg)
59 struct vfio_device *device = df->device;
60 struct vfio_device_bind_iommufd bind;
64 static_assert(__same_type(arg->out_devid, df->devid));
66 minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
68 if (copy_from_user(&bind, arg, minsz))
71 if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
74 /* BIND_IOMMUFD only allowed for cdev fds */
78 ret = vfio_device_block_group(device);
82 mutex_lock(&device->dev_set->lock);
83 /* one device cannot be bound twice */
84 if (df->access_granted) {
89 df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
90 if (IS_ERR(df->iommufd)) {
91 ret = PTR_ERR(df->iommufd);
97 * Before the device open, get the KVM pointer currently
98 * associated with the device file (if there is) and obtain
99 * a reference. This reference is held until device closed.
100 * Save the pointer in the device for use by drivers.
102 vfio_df_get_kvm_safe(df);
104 ret = vfio_df_open(df);
108 ret = copy_to_user(&arg->out_devid, &df->devid,
109 sizeof(df->devid)) ? -EFAULT : 0;
111 goto out_close_device;
113 device->cdev_opened = true;
115 * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
118 smp_store_release(&df->access_granted, true);
119 mutex_unlock(&device->dev_set->lock);
125 vfio_device_put_kvm(device);
126 iommufd_ctx_put(df->iommufd);
129 mutex_unlock(&device->dev_set->lock);
130 vfio_device_unblock_group(device);
134 void vfio_df_unbind_iommufd(struct vfio_device_file *df)
136 struct vfio_device *device = df->device;
139 * In the time of close, there is no contention with another one
140 * changing this flag. So read df->access_granted without lock
141 * and no smp_load_acquire() is ok.
143 if (!df->access_granted)
146 mutex_lock(&device->dev_set->lock);
148 vfio_device_put_kvm(device);
149 iommufd_ctx_put(df->iommufd);
150 device->cdev_opened = false;
151 mutex_unlock(&device->dev_set->lock);
152 vfio_device_unblock_group(device);
155 int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
156 struct vfio_device_attach_iommufd_pt __user *arg)
158 struct vfio_device *device = df->device;
159 struct vfio_device_attach_iommufd_pt attach;
163 minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
165 if (copy_from_user(&attach, arg, minsz))
168 if (attach.argsz < minsz || attach.flags)
171 mutex_lock(&device->dev_set->lock);
172 ret = device->ops->attach_ioas(device, &attach.pt_id);
176 if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
180 mutex_unlock(&device->dev_set->lock);
185 device->ops->detach_ioas(device);
187 mutex_unlock(&device->dev_set->lock);
191 int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
192 struct vfio_device_detach_iommufd_pt __user *arg)
194 struct vfio_device *device = df->device;
195 struct vfio_device_detach_iommufd_pt detach;
198 minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
200 if (copy_from_user(&detach, arg, minsz))
203 if (detach.argsz < minsz || detach.flags)
206 mutex_lock(&device->dev_set->lock);
207 device->ops->detach_ioas(device);
208 mutex_unlock(&device->dev_set->lock);
213 static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
215 return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
218 int vfio_cdev_init(struct class *device_class)
220 device_class->devnode = vfio_device_devnode;
221 return alloc_chrdev_region(&device_devt, 0,
222 MINORMASK + 1, "vfio-dev");
225 void vfio_cdev_cleanup(void)
227 unregister_chrdev_region(device_devt, MINORMASK + 1);