1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/cdev.h>
14 #include <linux/compat.h>
15 #include <linux/device.h>
17 #include <linux/idr.h>
18 #include <linux/iommu.h>
19 #ifdef CONFIG_HAVE_KVM
20 #include <linux/kvm_host.h>
22 #include <linux/list.h>
23 #include <linux/miscdevice.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pci.h>
27 #include <linux/rwsem.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/string.h>
32 #include <linux/uaccess.h>
33 #include <linux/vfio.h>
34 #include <linux/wait.h>
35 #include <linux/sched/signal.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/interval_tree.h>
38 #include <linux/iova_bitmap.h>
39 #include <linux/iommufd.h>
42 #define DRIVER_VERSION "0.3"
43 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
44 #define DRIVER_DESC "VFIO - User Level meta-driver"
47 struct class *device_class;
48 struct ida device_ida;
51 #ifdef CONFIG_VFIO_NOIOMMU
52 bool vfio_noiommu __read_mostly;
53 module_param_named(enable_unsafe_noiommu_mode,
54 vfio_noiommu, bool, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
58 static DEFINE_XARRAY(vfio_device_set_xa);
60 int vfio_assign_device_set(struct vfio_device *device, void *set_id)
62 unsigned long idx = (unsigned long)set_id;
63 struct vfio_device_set *new_dev_set;
64 struct vfio_device_set *dev_set;
70 * Atomically acquire a singleton object in the xarray for this set_id
72 xa_lock(&vfio_device_set_xa);
73 dev_set = xa_load(&vfio_device_set_xa, idx);
76 xa_unlock(&vfio_device_set_xa);
78 new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
81 mutex_init(&new_dev_set->lock);
82 INIT_LIST_HEAD(&new_dev_set->device_list);
83 new_dev_set->set_id = set_id;
85 xa_lock(&vfio_device_set_xa);
86 dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
89 dev_set = new_dev_set;
94 if (xa_is_err(dev_set)) {
95 xa_unlock(&vfio_device_set_xa);
96 return xa_err(dev_set);
100 dev_set->device_count++;
101 xa_unlock(&vfio_device_set_xa);
102 mutex_lock(&dev_set->lock);
103 device->dev_set = dev_set;
104 list_add_tail(&device->dev_set_list, &dev_set->device_list);
105 mutex_unlock(&dev_set->lock);
108 EXPORT_SYMBOL_GPL(vfio_assign_device_set);
110 static void vfio_release_device_set(struct vfio_device *device)
112 struct vfio_device_set *dev_set = device->dev_set;
117 mutex_lock(&dev_set->lock);
118 list_del(&device->dev_set_list);
119 mutex_unlock(&dev_set->lock);
121 xa_lock(&vfio_device_set_xa);
122 if (!--dev_set->device_count) {
123 __xa_erase(&vfio_device_set_xa,
124 (unsigned long)dev_set->set_id);
125 mutex_destroy(&dev_set->lock);
128 xa_unlock(&vfio_device_set_xa);
131 unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
133 struct vfio_device *cur;
134 unsigned int open_count = 0;
136 lockdep_assert_held(&dev_set->lock);
138 list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
139 open_count += cur->open_count;
142 EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
145 vfio_find_device_in_devset(struct vfio_device_set *dev_set,
148 struct vfio_device *cur;
150 lockdep_assert_held(&dev_set->lock);
152 list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
157 EXPORT_SYMBOL_GPL(vfio_find_device_in_devset);
160 * Device objects - create, release, get, put, search
162 /* Device reference always implies a group reference */
163 void vfio_device_put_registration(struct vfio_device *device)
165 if (refcount_dec_and_test(&device->refcount))
166 complete(&device->comp);
169 bool vfio_device_try_get_registration(struct vfio_device *device)
171 return refcount_inc_not_zero(&device->refcount);
177 /* Release helper called by vfio_put_device() */
178 static void vfio_device_release(struct device *dev)
180 struct vfio_device *device =
181 container_of(dev, struct vfio_device, device);
183 vfio_release_device_set(device);
184 ida_free(&vfio.device_ida, device->index);
186 if (device->ops->release)
187 device->ops->release(device);
192 static int vfio_init_device(struct vfio_device *device, struct device *dev,
193 const struct vfio_device_ops *ops);
196 * Allocate and initialize vfio_device so it can be registered to vfio
199 * Drivers should use the wrapper vfio_alloc_device() for allocation.
200 * @size is the size of the structure to be allocated, including any
201 * private data used by the driver.
203 * Driver may provide an @init callback to cover device private data.
205 * Use vfio_put_device() to release the structure after success return.
207 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
208 const struct vfio_device_ops *ops)
210 struct vfio_device *device;
213 if (WARN_ON(size < sizeof(struct vfio_device)))
214 return ERR_PTR(-EINVAL);
216 device = kvzalloc(size, GFP_KERNEL);
218 return ERR_PTR(-ENOMEM);
220 ret = vfio_init_device(device, dev, ops);
229 EXPORT_SYMBOL_GPL(_vfio_alloc_device);
232 * Initialize a vfio_device so it can be registered to vfio core.
234 static int vfio_init_device(struct vfio_device *device, struct device *dev,
235 const struct vfio_device_ops *ops)
239 ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
241 dev_dbg(dev, "Error to alloc index\n");
246 init_completion(&device->comp);
251 ret = ops->init(device);
256 device_initialize(&device->device);
257 device->device.release = vfio_device_release;
258 device->device.class = vfio.device_class;
259 device->device.parent = device->dev;
263 vfio_release_device_set(device);
264 ida_free(&vfio.device_ida, device->index);
268 static int __vfio_register_dev(struct vfio_device *device,
269 enum vfio_group_type type)
273 if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
274 (!device->ops->bind_iommufd ||
275 !device->ops->unbind_iommufd ||
276 !device->ops->attach_ioas ||
277 !device->ops->detach_ioas)))
281 * If the driver doesn't specify a set then the device is added to a
282 * singleton set just for itself.
284 if (!device->dev_set)
285 vfio_assign_device_set(device, device);
287 ret = dev_set_name(&device->device, "vfio%d", device->index);
291 ret = vfio_device_set_group(device, type);
296 * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
297 * restore cache coherency. It has to be checked here because it is only
298 * valid for cases where we are using iommu groups.
300 if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) &&
301 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) {
306 ret = vfio_device_add(device);
310 /* Refcounting can't start until the driver calls register */
311 refcount_set(&device->refcount, 1);
313 vfio_device_group_register(device);
317 vfio_device_remove_group(device);
321 int vfio_register_group_dev(struct vfio_device *device)
323 return __vfio_register_dev(device, VFIO_IOMMU);
325 EXPORT_SYMBOL_GPL(vfio_register_group_dev);
328 * Register a virtual device without IOMMU backing. The user of this
329 * device must not be able to directly trigger unmediated DMA.
331 int vfio_register_emulated_iommu_dev(struct vfio_device *device)
333 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU);
335 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
338 * Decrement the device reference count and wait for the device to be
339 * removed. Open file descriptors for the device... */
340 void vfio_unregister_group_dev(struct vfio_device *device)
343 bool interrupted = false;
347 * Prevent new device opened by userspace via the
348 * VFIO_GROUP_GET_DEVICE_FD in the group path.
350 vfio_device_group_unregister(device);
353 * Balances vfio_device_add() in register path, also prevents
354 * new device opened by userspace in the cdev path.
356 vfio_device_del(device);
358 vfio_device_put_registration(device);
359 rc = try_wait_for_completion(&device->comp);
361 if (device->ops->request)
362 device->ops->request(device, i++);
365 rc = wait_for_completion_timeout(&device->comp,
368 rc = wait_for_completion_interruptible_timeout(
369 &device->comp, HZ * 10);
372 dev_warn(device->dev,
373 "Device is currently in use, task"
375 "blocked until device is released",
376 current->comm, task_pid_nr(current));
381 /* Balances vfio_device_set_group in register path */
382 vfio_device_remove_group(device);
384 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
386 #ifdef CONFIG_HAVE_KVM
387 void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
389 void (*pfn)(struct kvm *kvm);
390 bool (*fn)(struct kvm *kvm);
393 lockdep_assert_held(&device->dev_set->lock);
398 pfn = symbol_get(kvm_put_kvm);
402 fn = symbol_get(kvm_get_kvm_safe);
404 symbol_put(kvm_put_kvm);
409 symbol_put(kvm_get_kvm_safe);
411 symbol_put(kvm_put_kvm);
415 device->put_kvm = pfn;
419 void vfio_device_put_kvm(struct vfio_device *device)
421 lockdep_assert_held(&device->dev_set->lock);
426 if (WARN_ON(!device->put_kvm))
429 device->put_kvm(device->kvm);
430 device->put_kvm = NULL;
431 symbol_put(kvm_put_kvm);
438 /* true if the vfio_device has open_device() called but not close_device() */
439 static bool vfio_assert_device_open(struct vfio_device *device)
441 return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
444 struct vfio_device_file *
445 vfio_allocate_device_file(struct vfio_device *device)
447 struct vfio_device_file *df;
449 df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT);
451 return ERR_PTR(-ENOMEM);
454 spin_lock_init(&df->kvm_ref_lock);
459 static int vfio_df_device_first_open(struct vfio_device_file *df)
461 struct vfio_device *device = df->device;
462 struct iommufd_ctx *iommufd = df->iommufd;
465 lockdep_assert_held(&device->dev_set->lock);
467 if (!try_module_get(device->dev->driver->owner))
471 ret = vfio_df_iommufd_bind(df);
473 ret = vfio_device_group_use_iommu(device);
477 if (device->ops->open_device) {
478 ret = device->ops->open_device(device);
480 goto err_unuse_iommu;
486 vfio_df_iommufd_unbind(df);
488 vfio_device_group_unuse_iommu(device);
490 module_put(device->dev->driver->owner);
494 static void vfio_df_device_last_close(struct vfio_device_file *df)
496 struct vfio_device *device = df->device;
497 struct iommufd_ctx *iommufd = df->iommufd;
499 lockdep_assert_held(&device->dev_set->lock);
501 if (device->ops->close_device)
502 device->ops->close_device(device);
504 vfio_df_iommufd_unbind(df);
506 vfio_device_group_unuse_iommu(device);
507 module_put(device->dev->driver->owner);
510 int vfio_df_open(struct vfio_device_file *df)
512 struct vfio_device *device = df->device;
515 lockdep_assert_held(&device->dev_set->lock);
518 * Only the group path allows the device to be opened multiple
519 * times. The device cdev path doesn't have a secure way for it.
521 if (device->open_count != 0 && !df->group)
524 device->open_count++;
525 if (device->open_count == 1) {
526 ret = vfio_df_device_first_open(df);
528 device->open_count--;
534 void vfio_df_close(struct vfio_device_file *df)
536 struct vfio_device *device = df->device;
538 lockdep_assert_held(&device->dev_set->lock);
540 vfio_assert_device_open(device);
541 if (device->open_count == 1)
542 vfio_df_device_last_close(df);
543 device->open_count--;
547 * Wrapper around pm_runtime_resume_and_get().
548 * Return error code on failure or 0 on success.
550 static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
552 struct device *dev = device->dev;
554 if (dev->driver && dev->driver->pm) {
557 ret = pm_runtime_resume_and_get(dev);
559 dev_info_ratelimited(dev,
560 "vfio: runtime resume failed %d\n", ret);
569 * Wrapper around pm_runtime_put().
571 static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
573 struct device *dev = device->dev;
575 if (dev->driver && dev->driver->pm)
582 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
584 struct vfio_device_file *df = filep->private_data;
585 struct vfio_device *device = df->device;
588 vfio_df_group_close(df);
590 vfio_df_unbind_iommufd(df);
592 vfio_device_put_registration(device);
600 * vfio_mig_get_next_state - Compute the next step in the FSM
601 * @cur_fsm - The current state the device is in
602 * @new_fsm - The target state to reach
603 * @next_fsm - Pointer to the next step to get to new_fsm
605 * Return 0 upon success, otherwise -errno
606 * Upon success the next step in the state progression between cur_fsm and
607 * new_fsm will be set in next_fsm.
609 * This breaks down requests for combination transitions into smaller steps and
610 * returns the next step to get to new_fsm. The function may need to be called
611 * multiple times before reaching new_fsm.
614 int vfio_mig_get_next_state(struct vfio_device *device,
615 enum vfio_device_mig_state cur_fsm,
616 enum vfio_device_mig_state new_fsm,
617 enum vfio_device_mig_state *next_fsm)
619 enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 };
621 * The coding in this table requires the driver to implement the
622 * following FSM arcs:
628 * If P2P is supported then the driver must also implement these FSM
630 * RUNNING -> RUNNING_P2P
631 * RUNNING_P2P -> RUNNING
632 * RUNNING_P2P -> STOP
633 * STOP -> RUNNING_P2P
635 * If precopy is supported then the driver must support these additional
637 * RUNNING -> PRE_COPY
638 * PRE_COPY -> RUNNING
639 * PRE_COPY -> STOP_COPY
640 * However, if precopy and P2P are supported together then the driver
641 * must support these additional arcs beyond the P2P arcs above:
642 * PRE_COPY -> RUNNING
643 * PRE_COPY -> PRE_COPY_P2P
644 * PRE_COPY_P2P -> PRE_COPY
645 * PRE_COPY_P2P -> RUNNING_P2P
646 * PRE_COPY_P2P -> STOP_COPY
647 * RUNNING -> PRE_COPY
648 * RUNNING_P2P -> PRE_COPY_P2P
650 * Without P2P and precopy the driver must implement:
654 * The coding will step through multiple states for some combination
655 * transitions; if all optional features are supported, this means the
657 * PRE_COPY -> PRE_COPY_P2P -> STOP_COPY
658 * PRE_COPY -> RUNNING -> RUNNING_P2P
659 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP
660 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING
661 * PRE_COPY_P2P -> RUNNING_P2P -> RUNNING
662 * PRE_COPY_P2P -> RUNNING_P2P -> STOP
663 * PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING
664 * RESUMING -> STOP -> RUNNING_P2P
665 * RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P
666 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING
667 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
668 * RESUMING -> STOP -> STOP_COPY
669 * RUNNING -> RUNNING_P2P -> PRE_COPY_P2P
670 * RUNNING -> RUNNING_P2P -> STOP
671 * RUNNING -> RUNNING_P2P -> STOP -> RESUMING
672 * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY
673 * RUNNING_P2P -> RUNNING -> PRE_COPY
674 * RUNNING_P2P -> STOP -> RESUMING
675 * RUNNING_P2P -> STOP -> STOP_COPY
676 * STOP -> RUNNING_P2P -> PRE_COPY_P2P
677 * STOP -> RUNNING_P2P -> RUNNING
678 * STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
679 * STOP_COPY -> STOP -> RESUMING
680 * STOP_COPY -> STOP -> RUNNING_P2P
681 * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING
683 * The following transitions are blocked:
684 * STOP_COPY -> PRE_COPY
685 * STOP_COPY -> PRE_COPY_P2P
687 static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = {
688 [VFIO_DEVICE_STATE_STOP] = {
689 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
690 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
691 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
692 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
693 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
694 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
695 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
696 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
698 [VFIO_DEVICE_STATE_RUNNING] = {
699 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
700 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
701 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
702 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
703 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
704 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
705 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
706 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
708 [VFIO_DEVICE_STATE_PRE_COPY] = {
709 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING,
710 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
711 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
712 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
713 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
714 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING,
715 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING,
716 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
718 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = {
719 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
720 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
721 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
722 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
723 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
724 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
725 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
726 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
728 [VFIO_DEVICE_STATE_STOP_COPY] = {
729 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
730 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
731 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
732 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
733 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
734 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
735 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
736 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
738 [VFIO_DEVICE_STATE_RESUMING] = {
739 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
740 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
741 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP,
742 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP,
743 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
744 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
745 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
746 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
748 [VFIO_DEVICE_STATE_RUNNING_P2P] = {
749 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
750 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
751 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING,
752 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
753 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
754 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
755 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
756 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
758 [VFIO_DEVICE_STATE_ERROR] = {
759 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR,
760 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR,
761 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
762 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
763 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR,
764 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR,
765 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR,
766 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
770 static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = {
771 [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY,
772 [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY,
773 [VFIO_DEVICE_STATE_PRE_COPY] =
774 VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY,
775 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY |
777 VFIO_MIGRATION_PRE_COPY,
778 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY,
779 [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY,
780 [VFIO_DEVICE_STATE_RUNNING_P2P] =
781 VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P,
782 [VFIO_DEVICE_STATE_ERROR] = ~0U,
785 if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
786 (state_flags_table[cur_fsm] & device->migration_flags) !=
787 state_flags_table[cur_fsm]))
790 if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
791 (state_flags_table[new_fsm] & device->migration_flags) !=
792 state_flags_table[new_fsm])
796 * Arcs touching optional and unsupported states are skipped over. The
797 * driver will instead see an arc from the original state to the next
798 * logical state, as per the above comment.
800 *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm];
801 while ((state_flags_table[*next_fsm] & device->migration_flags) !=
802 state_flags_table[*next_fsm])
803 *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm];
805 return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL;
807 EXPORT_SYMBOL_GPL(vfio_mig_get_next_state);
810 * Convert the drivers's struct file into a FD number and return it to userspace
812 static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
813 struct vfio_device_feature_mig_state *mig)
818 fd = get_unused_fd_flags(O_CLOEXEC);
825 if (copy_to_user(arg, mig, sizeof(*mig))) {
829 fd_install(fd, filp);
840 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
841 u32 flags, void __user *arg,
845 offsetofend(struct vfio_device_feature_mig_state, data_fd);
846 struct vfio_device_feature_mig_state mig;
847 struct file *filp = NULL;
850 if (!device->mig_ops)
853 ret = vfio_check_feature(flags, argsz,
854 VFIO_DEVICE_FEATURE_SET |
855 VFIO_DEVICE_FEATURE_GET,
860 if (copy_from_user(&mig, arg, minsz))
863 if (flags & VFIO_DEVICE_FEATURE_GET) {
864 enum vfio_device_mig_state curr_state;
866 ret = device->mig_ops->migration_get_state(device,
870 mig.device_state = curr_state;
874 /* Handle the VFIO_DEVICE_FEATURE_SET */
875 filp = device->mig_ops->migration_set_state(device, mig.device_state);
876 if (IS_ERR(filp) || !filp)
879 return vfio_ioct_mig_return_fd(filp, arg, &mig);
882 if (copy_to_user(arg, &mig, sizeof(mig)))
885 return PTR_ERR(filp);
890 vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device,
891 u32 flags, void __user *arg,
894 struct vfio_device_feature_mig_data_size data_size = {};
895 unsigned long stop_copy_length;
898 if (!device->mig_ops)
901 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
906 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length);
910 data_size.stop_copy_length = stop_copy_length;
911 if (copy_to_user(arg, &data_size, sizeof(data_size)))
917 static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
918 u32 flags, void __user *arg,
921 struct vfio_device_feature_migration mig = {
922 .flags = device->migration_flags,
926 if (!device->mig_ops)
929 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
933 if (copy_to_user(arg, &mig, sizeof(mig)))
938 void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
941 struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
942 unsigned long min_gap, curr_gap;
944 /* Special shortcut when a single range is required */
945 if (req_nodes == 1) {
948 comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
953 curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
954 if (prev != comb_start)
955 interval_tree_remove(prev, root);
957 comb_start->last = last;
961 /* Combine ranges which have the smallest gap */
962 while (cur_nodes > req_nodes) {
965 curr = interval_tree_iter_first(root, 0, ULONG_MAX);
968 curr_gap = curr->start - prev->last;
969 if (curr_gap < min_gap) {
976 curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
978 comb_start->last = comb_end->last;
979 interval_tree_remove(comb_end, root);
983 EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges);
985 /* Ranges should fit into a single kernel page */
986 #define LOG_MAX_RANGES \
987 (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
990 vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
991 u32 flags, void __user *arg,
995 offsetofend(struct vfio_device_feature_dma_logging_control,
997 struct vfio_device_feature_dma_logging_range __user *ranges;
998 struct vfio_device_feature_dma_logging_control control;
999 struct vfio_device_feature_dma_logging_range range;
1000 struct rb_root_cached root = RB_ROOT_CACHED;
1001 struct interval_tree_node *nodes;
1006 if (!device->log_ops)
1009 ret = vfio_check_feature(flags, argsz,
1010 VFIO_DEVICE_FEATURE_SET,
1015 if (copy_from_user(&control, arg, minsz))
1018 nnodes = control.num_ranges;
1022 if (nnodes > LOG_MAX_RANGES)
1025 ranges = u64_to_user_ptr(control.ranges);
1026 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
1031 for (i = 0; i < nnodes; i++) {
1032 if (copy_from_user(&range, &ranges[i], sizeof(range))) {
1036 if (!IS_ALIGNED(range.iova, control.page_size) ||
1037 !IS_ALIGNED(range.length, control.page_size)) {
1042 if (check_add_overflow(range.iova, range.length, &iova_end) ||
1043 iova_end > ULONG_MAX) {
1048 nodes[i].start = range.iova;
1049 nodes[i].last = range.iova + range.length - 1;
1050 if (interval_tree_iter_first(&root, nodes[i].start,
1052 /* Range overlapping */
1056 interval_tree_insert(nodes + i, &root);
1059 ret = device->log_ops->log_start(device, &root, nnodes,
1060 &control.page_size);
1064 if (copy_to_user(arg, &control, sizeof(control))) {
1066 device->log_ops->log_stop(device);
1075 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
1076 u32 flags, void __user *arg,
1081 if (!device->log_ops)
1084 ret = vfio_check_feature(flags, argsz,
1085 VFIO_DEVICE_FEATURE_SET, 0);
1089 return device->log_ops->log_stop(device);
1092 static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
1093 unsigned long iova, size_t length,
1096 struct vfio_device *device = opaque;
1098 return device->log_ops->log_read_and_clear(device, iova, length, iter);
1102 vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
1103 u32 flags, void __user *arg,
1107 offsetofend(struct vfio_device_feature_dma_logging_report,
1109 struct vfio_device_feature_dma_logging_report report;
1110 struct iova_bitmap *iter;
1114 if (!device->log_ops)
1117 ret = vfio_check_feature(flags, argsz,
1118 VFIO_DEVICE_FEATURE_GET,
1123 if (copy_from_user(&report, arg, minsz))
1126 if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
1129 if (check_add_overflow(report.iova, report.length, &iova_end) ||
1130 iova_end > ULONG_MAX)
1133 iter = iova_bitmap_alloc(report.iova, report.length,
1135 u64_to_user_ptr(report.bitmap));
1137 return PTR_ERR(iter);
1139 ret = iova_bitmap_for_each(iter, device,
1140 vfio_device_log_read_and_clear);
1142 iova_bitmap_free(iter);
1146 static int vfio_ioctl_device_feature(struct vfio_device *device,
1147 struct vfio_device_feature __user *arg)
1149 size_t minsz = offsetofend(struct vfio_device_feature, flags);
1150 struct vfio_device_feature feature;
1152 if (copy_from_user(&feature, arg, minsz))
1155 if (feature.argsz < minsz)
1158 /* Check unknown flags */
1160 ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET |
1161 VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE))
1164 /* GET & SET are mutually exclusive except with PROBE */
1165 if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
1166 (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
1167 (feature.flags & VFIO_DEVICE_FEATURE_GET))
1170 switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
1171 case VFIO_DEVICE_FEATURE_MIGRATION:
1172 return vfio_ioctl_device_feature_migration(
1173 device, feature.flags, arg->data,
1174 feature.argsz - minsz);
1175 case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE:
1176 return vfio_ioctl_device_feature_mig_device_state(
1177 device, feature.flags, arg->data,
1178 feature.argsz - minsz);
1179 case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
1180 return vfio_ioctl_device_feature_logging_start(
1181 device, feature.flags, arg->data,
1182 feature.argsz - minsz);
1183 case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
1184 return vfio_ioctl_device_feature_logging_stop(
1185 device, feature.flags, arg->data,
1186 feature.argsz - minsz);
1187 case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
1188 return vfio_ioctl_device_feature_logging_report(
1189 device, feature.flags, arg->data,
1190 feature.argsz - minsz);
1191 case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE:
1192 return vfio_ioctl_device_feature_migration_data_size(
1193 device, feature.flags, arg->data,
1194 feature.argsz - minsz);
1196 if (unlikely(!device->ops->device_feature))
1198 return device->ops->device_feature(device, feature.flags,
1200 feature.argsz - minsz);
1204 static long vfio_device_fops_unl_ioctl(struct file *filep,
1205 unsigned int cmd, unsigned long arg)
1207 struct vfio_device_file *df = filep->private_data;
1208 struct vfio_device *device = df->device;
1209 void __user *uptr = (void __user *)arg;
1212 if (cmd == VFIO_DEVICE_BIND_IOMMUFD)
1213 return vfio_df_ioctl_bind_iommufd(df, uptr);
1215 /* Paired with smp_store_release() following vfio_df_open() */
1216 if (!smp_load_acquire(&df->access_granted))
1219 ret = vfio_device_pm_runtime_get(device);
1223 /* cdev only ioctls */
1224 if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) {
1226 case VFIO_DEVICE_ATTACH_IOMMUFD_PT:
1227 ret = vfio_df_ioctl_attach_pt(df, uptr);
1230 case VFIO_DEVICE_DETACH_IOMMUFD_PT:
1231 ret = vfio_df_ioctl_detach_pt(df, uptr);
1237 case VFIO_DEVICE_FEATURE:
1238 ret = vfio_ioctl_device_feature(device, uptr);
1242 if (unlikely(!device->ops->ioctl))
1245 ret = device->ops->ioctl(device, cmd, arg);
1249 vfio_device_pm_runtime_put(device);
1253 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1254 size_t count, loff_t *ppos)
1256 struct vfio_device_file *df = filep->private_data;
1257 struct vfio_device *device = df->device;
1259 /* Paired with smp_store_release() following vfio_df_open() */
1260 if (!smp_load_acquire(&df->access_granted))
1263 if (unlikely(!device->ops->read))
1266 return device->ops->read(device, buf, count, ppos);
1269 static ssize_t vfio_device_fops_write(struct file *filep,
1270 const char __user *buf,
1271 size_t count, loff_t *ppos)
1273 struct vfio_device_file *df = filep->private_data;
1274 struct vfio_device *device = df->device;
1276 /* Paired with smp_store_release() following vfio_df_open() */
1277 if (!smp_load_acquire(&df->access_granted))
1280 if (unlikely(!device->ops->write))
1283 return device->ops->write(device, buf, count, ppos);
1286 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1288 struct vfio_device_file *df = filep->private_data;
1289 struct vfio_device *device = df->device;
1291 /* Paired with smp_store_release() following vfio_df_open() */
1292 if (!smp_load_acquire(&df->access_granted))
1295 if (unlikely(!device->ops->mmap))
1298 return device->ops->mmap(device, vma);
1301 const struct file_operations vfio_device_fops = {
1302 .owner = THIS_MODULE,
1303 .open = vfio_device_fops_cdev_open,
1304 .release = vfio_device_fops_release,
1305 .read = vfio_device_fops_read,
1306 .write = vfio_device_fops_write,
1307 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1308 .compat_ioctl = compat_ptr_ioctl,
1309 .mmap = vfio_device_fops_mmap,
1312 static struct vfio_device *vfio_device_from_file(struct file *file)
1314 struct vfio_device_file *df = file->private_data;
1316 if (file->f_op != &vfio_device_fops)
1322 * vfio_file_is_valid - True if the file is valid vfio file
1323 * @file: VFIO group file or VFIO device file
1325 bool vfio_file_is_valid(struct file *file)
1327 return vfio_group_from_file(file) ||
1328 vfio_device_from_file(file);
1330 EXPORT_SYMBOL_GPL(vfio_file_is_valid);
1333 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
1334 * is always CPU cache coherent
1335 * @file: VFIO group file or VFIO device file
1337 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
1338 * bit in DMA transactions. A return of false indicates that the user has
1339 * rights to access additional instructions such as wbinvd on x86.
1341 bool vfio_file_enforced_coherent(struct file *file)
1343 struct vfio_device *device;
1344 struct vfio_group *group;
1346 group = vfio_group_from_file(file);
1348 return vfio_group_enforced_coherent(group);
1350 device = vfio_device_from_file(file);
1352 return device_iommu_capable(device->dev,
1353 IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
1357 EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
1359 static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
1361 struct vfio_device_file *df = file->private_data;
1364 * The kvm is first recorded in the vfio_device_file, and will
1365 * be propagated to vfio_device::kvm when the file is bound to
1366 * iommufd successfully in the vfio device cdev path.
1368 spin_lock(&df->kvm_ref_lock);
1370 spin_unlock(&df->kvm_ref_lock);
1374 * vfio_file_set_kvm - Link a kvm with VFIO drivers
1375 * @file: VFIO group file or VFIO device file
1378 * When a VFIO device is first opened the KVM will be available in
1379 * device->kvm if one was associated with the file.
1381 void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
1383 struct vfio_group *group;
1385 group = vfio_group_from_file(file);
1387 vfio_group_set_kvm(group, kvm);
1389 if (vfio_device_from_file(file))
1390 vfio_device_file_set_kvm(file, kvm);
1392 EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
1395 * Sub-module support
1398 * Helper for managing a buffer of info chain capabilities, allocate or
1399 * reallocate a buffer with additional @size, filling in @id and @version
1400 * of the capability. A pointer to the new capability is returned.
1402 * NB. The chain is based at the head of the buffer, so new entries are
1403 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1404 * next offsets prior to copying to the user buffer.
1406 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1407 size_t size, u16 id, u16 version)
1410 struct vfio_info_cap_header *header, *tmp;
1412 /* Ensure that the next capability struct will be aligned */
1413 size = ALIGN(size, sizeof(u64));
1415 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1420 return ERR_PTR(-ENOMEM);
1424 header = buf + caps->size;
1426 /* Eventually copied to user buffer, zero */
1427 memset(header, 0, size);
1430 header->version = version;
1432 /* Add to the end of the capability chain */
1433 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1436 tmp->next = caps->size;
1441 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1443 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1445 struct vfio_info_cap_header *tmp;
1446 void *buf = (void *)caps->buf;
1448 /* Capability structs should start with proper alignment */
1449 WARN_ON(!IS_ALIGNED(offset, sizeof(u64)));
1451 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1452 tmp->next += offset;
1454 EXPORT_SYMBOL(vfio_info_cap_shift);
1456 int vfio_info_add_capability(struct vfio_info_cap *caps,
1457 struct vfio_info_cap_header *cap, size_t size)
1459 struct vfio_info_cap_header *header;
1461 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1463 return PTR_ERR(header);
1465 memcpy(header + 1, cap + 1, size - sizeof(*header));
1469 EXPORT_SYMBOL(vfio_info_add_capability);
1471 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1472 int max_irq_type, size_t *data_size)
1474 unsigned long minsz;
1477 minsz = offsetofend(struct vfio_irq_set, count);
1479 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1480 (hdr->count >= (U32_MAX - hdr->start)) ||
1481 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1482 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1488 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1491 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1492 case VFIO_IRQ_SET_DATA_NONE:
1495 case VFIO_IRQ_SET_DATA_BOOL:
1496 size = sizeof(uint8_t);
1498 case VFIO_IRQ_SET_DATA_EVENTFD:
1499 size = sizeof(int32_t);
1506 if (hdr->argsz - minsz < hdr->count * size)
1512 *data_size = hdr->count * size;
1517 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1520 * Pin contiguous user pages and return their associated host pages for local
1522 * @device [in] : device
1523 * @iova [in] : starting IOVA of user pages to be pinned.
1524 * @npage [in] : count of pages to be pinned. This count should not
1525 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1526 * @prot [in] : protection flags
1527 * @pages[out] : array of host pages
1528 * Return error or number of pages pinned.
1530 * A driver may only call this function if the vfio_device was created
1531 * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
1533 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
1534 int npage, int prot, struct page **pages)
1536 /* group->container cannot change while a vfio device is open */
1537 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
1539 if (!device->ops->dma_unmap)
1541 if (vfio_device_has_container(device))
1542 return vfio_device_container_pin_pages(device, iova,
1543 npage, prot, pages);
1544 if (device->iommufd_access) {
1547 if (iova > ULONG_MAX)
1550 * VFIO ignores the sub page offset, npages is from the start of
1551 * a PAGE_SIZE chunk of IOVA. The caller is expected to recover
1552 * the sub page offset by doing:
1553 * pages[0] + (iova % PAGE_SIZE)
1555 ret = iommufd_access_pin_pages(
1556 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
1557 npage * PAGE_SIZE, pages,
1558 (prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0);
1565 EXPORT_SYMBOL(vfio_pin_pages);
1568 * Unpin contiguous host pages for local domain only.
1569 * @device [in] : device
1570 * @iova [in] : starting address of user pages to be unpinned.
1571 * @npage [in] : count of pages to be unpinned. This count should not
1572 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1574 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
1576 if (WARN_ON(!vfio_assert_device_open(device)))
1578 if (WARN_ON(!device->ops->dma_unmap))
1581 if (vfio_device_has_container(device)) {
1582 vfio_device_container_unpin_pages(device, iova, npage);
1585 if (device->iommufd_access) {
1586 if (WARN_ON(iova > ULONG_MAX))
1588 iommufd_access_unpin_pages(device->iommufd_access,
1589 ALIGN_DOWN(iova, PAGE_SIZE),
1594 EXPORT_SYMBOL(vfio_unpin_pages);
1597 * This interface allows the CPUs to perform some sort of virtual DMA on
1598 * behalf of the device.
1600 * CPUs read/write from/into a range of IOVAs pointing to user space memory
1601 * into/from a kernel buffer.
1603 * As the read/write of user space memory is conducted via the CPUs and is
1604 * not a real device DMA, it is not necessary to pin the user space memory.
1606 * @device [in] : VFIO device
1607 * @iova [in] : base IOVA of a user space buffer
1608 * @data [in] : pointer to kernel buffer
1609 * @len [in] : kernel buffer length
1610 * @write : indicate read or write
1611 * Return error code on failure or 0 on success.
1613 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
1614 size_t len, bool write)
1616 if (!data || len <= 0 || !vfio_assert_device_open(device))
1619 if (vfio_device_has_container(device))
1620 return vfio_device_container_dma_rw(device, iova,
1623 if (device->iommufd_access) {
1624 unsigned int flags = 0;
1626 if (iova > ULONG_MAX)
1629 /* VFIO historically tries to auto-detect a kthread */
1631 flags |= IOMMUFD_ACCESS_RW_KTHREAD;
1633 flags |= IOMMUFD_ACCESS_RW_WRITE;
1634 return iommufd_access_rw(device->iommufd_access, iova, data,
1639 EXPORT_SYMBOL(vfio_dma_rw);
1642 * Module/class support
1644 static int __init vfio_init(void)
1648 ida_init(&vfio.device_ida);
1650 ret = vfio_group_init();
1654 ret = vfio_virqfd_init();
1658 /* /sys/class/vfio-dev/vfioX */
1659 vfio.device_class = class_create("vfio-dev");
1660 if (IS_ERR(vfio.device_class)) {
1661 ret = PTR_ERR(vfio.device_class);
1665 ret = vfio_cdev_init(vfio.device_class);
1667 goto err_alloc_dev_chrdev;
1669 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1672 err_alloc_dev_chrdev:
1673 class_destroy(vfio.device_class);
1674 vfio.device_class = NULL;
1678 vfio_group_cleanup();
1682 static void __exit vfio_cleanup(void)
1684 ida_destroy(&vfio.device_ida);
1685 vfio_cdev_cleanup();
1686 class_destroy(vfio.device_class);
1687 vfio.device_class = NULL;
1689 vfio_group_cleanup();
1690 xa_destroy(&vfio_device_set_xa);
1693 module_init(vfio_init);
1694 module_exit(vfio_cleanup);
1696 MODULE_VERSION(DRIVER_VERSION);
1697 MODULE_LICENSE("GPL v2");
1698 MODULE_AUTHOR(DRIVER_AUTHOR);
1699 MODULE_DESCRIPTION(DRIVER_DESC);
1700 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");