1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO-KVM bridge pseudo device
5 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
24 struct kvm_vfio_group {
25 struct list_head node;
27 #ifdef CONFIG_SPAPR_TCE_IOMMU
28 struct iommu_group *iommu_group;
33 struct list_head group_list;
38 static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
40 void (*fn)(struct file *file, struct kvm *kvm);
42 fn = symbol_get(vfio_file_set_kvm);
48 symbol_put(vfio_file_set_kvm);
51 static bool kvm_vfio_file_enforced_coherent(struct file *file)
53 bool (*fn)(struct file *file);
56 fn = symbol_get(vfio_file_enforced_coherent);
62 symbol_put(vfio_file_enforced_coherent);
67 static bool kvm_vfio_file_is_group(struct file *file)
69 bool (*fn)(struct file *file);
72 fn = symbol_get(vfio_file_is_group);
78 symbol_put(vfio_file_is_group);
83 #ifdef CONFIG_SPAPR_TCE_IOMMU
84 static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
86 struct iommu_group *(*fn)(struct file *file);
87 struct iommu_group *ret;
89 fn = symbol_get(vfio_file_iommu_group);
95 symbol_put(vfio_file_iommu_group);
100 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
101 struct kvm_vfio_group *kvg)
103 if (WARN_ON_ONCE(!kvg->iommu_group))
106 kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
107 iommu_group_put(kvg->iommu_group);
108 kvg->iommu_group = NULL;
113 * Groups can use the same or different IOMMU domains. If the same then
114 * adding a new group may change the coherency of groups we've previously
115 * been told about. We don't want to care about any of that so we retest
116 * each group and bail as soon as we find one that's noncoherent. This
117 * means we only ever [un]register_noncoherent_dma once for the whole device.
119 static void kvm_vfio_update_coherency(struct kvm_device *dev)
121 struct kvm_vfio *kv = dev->private;
122 bool noncoherent = false;
123 struct kvm_vfio_group *kvg;
125 mutex_lock(&kv->lock);
127 list_for_each_entry(kvg, &kv->group_list, node) {
128 if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
134 if (noncoherent != kv->noncoherent) {
135 kv->noncoherent = noncoherent;
138 kvm_arch_register_noncoherent_dma(dev->kvm);
140 kvm_arch_unregister_noncoherent_dma(dev->kvm);
143 mutex_unlock(&kv->lock);
146 static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
148 struct kvm_vfio *kv = dev->private;
149 struct kvm_vfio_group *kvg;
157 /* Ensure the FD is a vfio group FD.*/
158 if (!kvm_vfio_file_is_group(filp)) {
163 mutex_lock(&kv->lock);
165 list_for_each_entry(kvg, &kv->group_list, node) {
166 if (kvg->file == filp) {
172 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
179 list_add_tail(&kvg->node, &kv->group_list);
181 kvm_arch_start_assignment(dev->kvm);
183 mutex_unlock(&kv->lock);
185 kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
186 kvm_vfio_update_coherency(dev);
190 mutex_unlock(&kv->lock);
196 static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
198 struct kvm_vfio *kv = dev->private;
199 struct kvm_vfio_group *kvg;
209 mutex_lock(&kv->lock);
211 list_for_each_entry(kvg, &kv->group_list, node) {
212 if (kvg->file != f.file)
215 list_del(&kvg->node);
216 kvm_arch_end_assignment(dev->kvm);
217 #ifdef CONFIG_SPAPR_TCE_IOMMU
218 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
220 kvm_vfio_file_set_kvm(kvg->file, NULL);
227 mutex_unlock(&kv->lock);
231 kvm_vfio_update_coherency(dev);
236 #ifdef CONFIG_SPAPR_TCE_IOMMU
237 static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
240 struct kvm_vfio_spapr_tce param;
241 struct kvm_vfio *kv = dev->private;
242 struct kvm_vfio_group *kvg;
246 if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce)))
249 f = fdget(param.groupfd);
255 mutex_lock(&kv->lock);
257 list_for_each_entry(kvg, &kv->group_list, node) {
258 if (kvg->file != f.file)
261 if (!kvg->iommu_group) {
262 kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
263 if (WARN_ON_ONCE(!kvg->iommu_group)) {
269 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
275 mutex_unlock(&kv->lock);
281 static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
284 int32_t __user *argp = arg;
288 case KVM_DEV_VFIO_GROUP_ADD:
289 if (get_user(fd, argp))
291 return kvm_vfio_group_add(dev, fd);
293 case KVM_DEV_VFIO_GROUP_DEL:
294 if (get_user(fd, argp))
296 return kvm_vfio_group_del(dev, fd);
298 #ifdef CONFIG_SPAPR_TCE_IOMMU
299 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
300 return kvm_vfio_group_set_spapr_tce(dev, arg);
307 static int kvm_vfio_set_attr(struct kvm_device *dev,
308 struct kvm_device_attr *attr)
310 switch (attr->group) {
311 case KVM_DEV_VFIO_GROUP:
312 return kvm_vfio_set_group(dev, attr->attr,
313 u64_to_user_ptr(attr->addr));
319 static int kvm_vfio_has_attr(struct kvm_device *dev,
320 struct kvm_device_attr *attr)
322 switch (attr->group) {
323 case KVM_DEV_VFIO_GROUP:
324 switch (attr->attr) {
325 case KVM_DEV_VFIO_GROUP_ADD:
326 case KVM_DEV_VFIO_GROUP_DEL:
327 #ifdef CONFIG_SPAPR_TCE_IOMMU
328 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
339 static void kvm_vfio_destroy(struct kvm_device *dev)
341 struct kvm_vfio *kv = dev->private;
342 struct kvm_vfio_group *kvg, *tmp;
344 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
345 #ifdef CONFIG_SPAPR_TCE_IOMMU
346 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
348 kvm_vfio_file_set_kvm(kvg->file, NULL);
350 list_del(&kvg->node);
352 kvm_arch_end_assignment(dev->kvm);
355 kvm_vfio_update_coherency(dev);
358 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
361 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
363 static struct kvm_device_ops kvm_vfio_ops = {
365 .create = kvm_vfio_create,
366 .destroy = kvm_vfio_destroy,
367 .set_attr = kvm_vfio_set_attr,
368 .has_attr = kvm_vfio_has_attr,
371 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
373 struct kvm_device *tmp;
376 /* Only one VFIO "device" per VM */
377 list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
378 if (tmp->ops == &kvm_vfio_ops)
381 kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
385 INIT_LIST_HEAD(&kv->group_list);
386 mutex_init(&kv->lock);
393 int kvm_vfio_ops_init(void)
395 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
398 void kvm_vfio_ops_exit(void)
400 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);