1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 #ifndef __VFIO_VFIO_H__
7 #define __VFIO_VFIO_H__
9 #include <linux/file.h>
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/module.h>
13 #include <linux/vfio.h>
17 struct vfio_container;
19 void vfio_device_put_registration(struct vfio_device *device);
20 bool vfio_device_try_get_registration(struct vfio_device *device);
21 int vfio_device_open(struct vfio_device *device,
22 struct iommufd_ctx *iommufd, struct kvm *kvm);
23 void vfio_device_close(struct vfio_device *device,
24 struct iommufd_ctx *iommufd);
26 extern const struct file_operations vfio_device_fops;
28 enum vfio_group_type {
30 * Physical device with IOMMU backing.
35 * Virtual device without IOMMU backing. The VFIO core fakes up an
36 * iommu_group as the iommu_group sysfs interface is part of the
37 * userspace ABI. The user of these devices must not be able to
38 * directly trigger unmediated DMA.
43 * Physical device without IOMMU backing. The VFIO core fakes up an
44 * iommu_group as the iommu_group sysfs interface is part of the
45 * userspace ABI. Users can trigger unmediated DMA by the device,
46 * usage is highly dangerous, requires an explicit opt-in and will
56 * When drivers is non-zero a driver is attached to the struct device
57 * that provided the iommu_group and thus the iommu_group is a valid
58 * pointer. When drivers is 0 the driver is being detached. Once users
59 * reaches 0 then the iommu_group is invalid.
62 unsigned int container_users;
63 struct iommu_group *iommu_group;
64 struct vfio_container *container;
65 struct list_head device_list;
66 struct mutex device_lock;
67 struct list_head vfio_next;
68 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
69 struct list_head container_next;
71 enum vfio_group_type type;
72 struct mutex group_lock;
74 struct file *opened_file;
75 struct blocking_notifier_head notifier;
76 struct iommufd_ctx *iommufd;
79 int vfio_device_set_group(struct vfio_device *device,
80 enum vfio_group_type type);
81 void vfio_device_remove_group(struct vfio_device *device);
82 void vfio_device_group_register(struct vfio_device *device);
83 void vfio_device_group_unregister(struct vfio_device *device);
84 int vfio_device_group_use_iommu(struct vfio_device *device);
85 void vfio_device_group_unuse_iommu(struct vfio_device *device);
86 void vfio_device_group_close(struct vfio_device *device);
87 bool vfio_device_has_container(struct vfio_device *device);
88 int __init vfio_group_init(void);
89 void vfio_group_cleanup(void);
91 static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
93 return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
94 vdev->group->type == VFIO_NO_IOMMU;
97 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
98 /* events for the backend driver notify callback */
99 enum vfio_iommu_notify_type {
100 VFIO_IOMMU_CONTAINER_CLOSE = 0,
104 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
106 struct vfio_iommu_driver_ops {
108 struct module *owner;
109 void *(*open)(unsigned long arg);
110 void (*release)(void *iommu_data);
111 long (*ioctl)(void *iommu_data, unsigned int cmd,
113 int (*attach_group)(void *iommu_data,
114 struct iommu_group *group,
115 enum vfio_group_type);
116 void (*detach_group)(void *iommu_data,
117 struct iommu_group *group);
118 int (*pin_pages)(void *iommu_data,
119 struct iommu_group *group,
120 dma_addr_t user_iova,
122 struct page **pages);
123 void (*unpin_pages)(void *iommu_data,
124 dma_addr_t user_iova, int npage);
125 void (*register_device)(void *iommu_data,
126 struct vfio_device *vdev);
127 void (*unregister_device)(void *iommu_data,
128 struct vfio_device *vdev);
129 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
130 void *data, size_t count, bool write);
131 struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
132 struct iommu_group *group);
133 void (*notify)(void *iommu_data,
134 enum vfio_iommu_notify_type event);
137 struct vfio_iommu_driver {
138 const struct vfio_iommu_driver_ops *ops;
139 struct list_head vfio_next;
142 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
143 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
145 struct vfio_container *vfio_container_from_file(struct file *filep);
146 int vfio_group_use_container(struct vfio_group *group);
147 void vfio_group_unuse_container(struct vfio_group *group);
148 int vfio_container_attach_group(struct vfio_container *container,
149 struct vfio_group *group);
150 void vfio_group_detach_container(struct vfio_group *group);
151 void vfio_device_container_register(struct vfio_device *device);
152 void vfio_device_container_unregister(struct vfio_device *device);
153 int vfio_device_container_pin_pages(struct vfio_device *device,
154 dma_addr_t iova, int npage,
155 int prot, struct page **pages);
156 void vfio_device_container_unpin_pages(struct vfio_device *device,
157 dma_addr_t iova, int npage);
158 int vfio_device_container_dma_rw(struct vfio_device *device,
159 dma_addr_t iova, void *data,
160 size_t len, bool write);
162 int __init vfio_container_init(void);
163 void vfio_container_cleanup(void);
165 static inline struct vfio_container *
166 vfio_container_from_file(struct file *filep)
171 static inline int vfio_group_use_container(struct vfio_group *group)
176 static inline void vfio_group_unuse_container(struct vfio_group *group)
180 static inline int vfio_container_attach_group(struct vfio_container *container,
181 struct vfio_group *group)
186 static inline void vfio_group_detach_container(struct vfio_group *group)
190 static inline void vfio_device_container_register(struct vfio_device *device)
194 static inline void vfio_device_container_unregister(struct vfio_device *device)
198 static inline int vfio_device_container_pin_pages(struct vfio_device *device,
199 dma_addr_t iova, int npage,
200 int prot, struct page **pages)
205 static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
206 dma_addr_t iova, int npage)
210 static inline int vfio_device_container_dma_rw(struct vfio_device *device,
211 dma_addr_t iova, void *data,
212 size_t len, bool write)
217 static inline int vfio_container_init(void)
221 static inline void vfio_container_cleanup(void)
226 #if IS_ENABLED(CONFIG_IOMMUFD)
227 int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
228 void vfio_iommufd_unbind(struct vfio_device *device);
230 static inline int vfio_iommufd_bind(struct vfio_device *device,
231 struct iommufd_ctx *ictx)
236 static inline void vfio_iommufd_unbind(struct vfio_device *device)
241 #if IS_ENABLED(CONFIG_VFIO_VIRQFD)
242 int __init vfio_virqfd_init(void);
243 void vfio_virqfd_exit(void);
245 static inline int __init vfio_virqfd_init(void)
249 static inline void vfio_virqfd_exit(void)
254 #ifdef CONFIG_VFIO_NOIOMMU
255 extern bool vfio_noiommu __read_mostly;
257 enum { vfio_noiommu = false };