1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/aperture.h>
14 #include <linux/device.h>
15 #include <linux/eventfd.h>
16 #include <linux/file.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <linux/vgaarb.h>
28 #include <linux/nospec.h>
29 #include <linux/sched/mm.h>
30 #if IS_ENABLED(CONFIG_EEH)
34 #include "vfio_pci_priv.h"
36 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
37 #define DRIVER_DESC "core driver for VFIO based PCI devices"
39 static bool nointxmask;
40 static bool disable_vga;
41 static bool disable_idle_d3;
43 /* List of PF's that vfio_pci_core_sriov_configure() has been called on */
44 static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
45 static LIST_HEAD(vfio_pci_sriov_pfs);
47 struct vfio_pci_dummy_resource {
48 struct resource resource;
50 struct list_head res_next;
53 struct vfio_pci_vf_token {
59 struct vfio_pci_mmap_vma {
60 struct vm_area_struct *vma;
61 struct list_head vma_next;
64 static inline bool vfio_vga_disabled(void)
66 #ifdef CONFIG_VFIO_PCI_VGA
74 * Our VGA arbiter participation is limited since we don't know anything
75 * about the device itself. However, if the device is the only VGA device
76 * downstream of a bridge and VFIO VGA support is disabled, then we can
77 * safely return legacy VGA IO and memory as not decoded since the user
78 * has no way to get to it and routing can be disabled externally at the
81 static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
83 struct pci_dev *tmp = NULL;
84 unsigned char max_busnr;
87 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
88 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
89 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
91 max_busnr = pci_bus_max_busnr(pdev->bus);
92 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
94 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
96 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
97 pci_is_root_bus(tmp->bus))
100 if (tmp->bus->number >= pdev->bus->number &&
101 tmp->bus->number <= max_busnr) {
103 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
111 static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
113 struct resource *res;
115 struct vfio_pci_dummy_resource *dummy_res;
117 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
118 int bar = i + PCI_STD_RESOURCES;
120 res = &vdev->pdev->resource[bar];
122 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
125 if (!(res->flags & IORESOURCE_MEM))
129 * The PCI core shouldn't set up a resource with a
130 * type but zero size. But there may be bugs that
131 * cause us to do that.
133 if (!resource_size(res))
136 if (resource_size(res) >= PAGE_SIZE) {
137 vdev->bar_mmap_supported[bar] = true;
141 if (!(res->start & ~PAGE_MASK)) {
143 * Add a dummy resource to reserve the remainder
144 * of the exclusive page in case that hot-add
145 * device's bar is assigned into it.
148 kzalloc(sizeof(*dummy_res), GFP_KERNEL_ACCOUNT);
149 if (dummy_res == NULL)
152 dummy_res->resource.name = "vfio sub-page reserved";
153 dummy_res->resource.start = res->end + 1;
154 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
155 dummy_res->resource.flags = res->flags;
156 if (request_resource(res->parent,
157 &dummy_res->resource)) {
161 dummy_res->index = bar;
162 list_add(&dummy_res->res_next,
163 &vdev->dummy_resources_list);
164 vdev->bar_mmap_supported[bar] = true;
168 * Here we don't handle the case when the BAR is not page
169 * aligned because we can't expect the BAR will be
170 * assigned into the same location in a page in guest
171 * when we passthrough the BAR. And it's hard to access
172 * this BAR in userspace because we have no way to get
173 * the BAR's location in a page.
176 vdev->bar_mmap_supported[bar] = false;
180 struct vfio_pci_group_info;
181 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
182 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
183 struct vfio_pci_group_info *groups);
186 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
187 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
188 * If a device implements the former but not the latter we would typically
189 * expect broken_intx_masking be set and require an exclusive interrupt.
190 * However since we do have control of the device's ability to assert INTx,
191 * we can instead pretend that the device does not implement INTx, virtualizing
192 * the pin register to report zero and maintaining DisINTx set on the host.
194 static bool vfio_pci_nointx(struct pci_dev *pdev)
196 switch (pdev->vendor) {
197 case PCI_VENDOR_ID_INTEL:
198 switch (pdev->device) {
199 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
202 case 0x1580 ... 0x1581:
203 case 0x1583 ... 0x158b:
204 case 0x37d0 ... 0x37d2:
216 static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
218 struct pci_dev *pdev = vdev->pdev;
224 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
226 vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
230 * pci_set_power_state() wrapper handling devices which perform a soft reset on
231 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
232 * restore when returned to D0. Saved separately from pci_saved_state for use
233 * by PM capability emulation and separately from pci_dev internal saved state
234 * to avoid it being overwritten and consumed around other resets.
236 int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
238 struct pci_dev *pdev = vdev->pdev;
239 bool needs_restore = false, needs_save = false;
242 /* Prevent changing power state for PFs with VFs enabled */
243 if (pci_num_vf(pdev) && state > PCI_D0)
246 if (vdev->needs_pm_restore) {
247 if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
248 pci_save_state(pdev);
252 if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
253 needs_restore = true;
256 ret = pci_set_power_state(pdev, state);
259 /* D3 might be unsupported via quirk, skip unless in D3 */
260 if (needs_save && pdev->current_state >= PCI_D3hot) {
262 * The current PCI state will be saved locally in
263 * 'pm_save' during the D3hot transition. When the
264 * device state is changed to D0 again with the current
265 * function, then pci_store_saved_state() will restore
266 * the state and will free the memory pointed by
267 * 'pm_save'. There are few cases where the PCI power
268 * state can be changed to D0 without the involvement
269 * of the driver. For these cases, free the earlier
270 * allocated memory first before overwriting 'pm_save'
271 * to prevent the memory leak.
273 kfree(vdev->pm_save);
274 vdev->pm_save = pci_store_saved_state(pdev);
275 } else if (needs_restore) {
276 pci_load_and_free_saved_state(pdev, &vdev->pm_save);
277 pci_restore_state(pdev);
284 static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
285 struct eventfd_ctx *efdctx)
288 * The vdev power related flags are protected with 'memory_lock'
291 vfio_pci_zap_and_down_write_memory_lock(vdev);
292 if (vdev->pm_runtime_engaged) {
293 up_write(&vdev->memory_lock);
297 vdev->pm_runtime_engaged = true;
298 vdev->pm_wake_eventfd_ctx = efdctx;
299 pm_runtime_put_noidle(&vdev->pdev->dev);
300 up_write(&vdev->memory_lock);
305 static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
306 void __user *arg, size_t argsz)
308 struct vfio_pci_core_device *vdev =
309 container_of(device, struct vfio_pci_core_device, vdev);
312 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
317 * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
318 * will be decremented. The pm_runtime_put() will be invoked again
319 * while returning from the ioctl and then the device can go into
320 * runtime suspended state.
322 return vfio_pci_runtime_pm_entry(vdev, NULL);
325 static int vfio_pci_core_pm_entry_with_wakeup(
326 struct vfio_device *device, u32 flags,
327 struct vfio_device_low_power_entry_with_wakeup __user *arg,
330 struct vfio_pci_core_device *vdev =
331 container_of(device, struct vfio_pci_core_device, vdev);
332 struct vfio_device_low_power_entry_with_wakeup entry;
333 struct eventfd_ctx *efdctx;
336 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
341 if (copy_from_user(&entry, arg, sizeof(entry)))
344 if (entry.wakeup_eventfd < 0)
347 efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
349 return PTR_ERR(efdctx);
351 ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
353 eventfd_ctx_put(efdctx);
358 static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
360 if (vdev->pm_runtime_engaged) {
361 vdev->pm_runtime_engaged = false;
362 pm_runtime_get_noresume(&vdev->pdev->dev);
364 if (vdev->pm_wake_eventfd_ctx) {
365 eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
366 vdev->pm_wake_eventfd_ctx = NULL;
371 static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
374 * The vdev power related flags are protected with 'memory_lock'
377 down_write(&vdev->memory_lock);
378 __vfio_pci_runtime_pm_exit(vdev);
379 up_write(&vdev->memory_lock);
382 static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
383 void __user *arg, size_t argsz)
385 struct vfio_pci_core_device *vdev =
386 container_of(device, struct vfio_pci_core_device, vdev);
389 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
394 * The device is always in the active state here due to pm wrappers
395 * around ioctls. If the device had entered a low power state and
396 * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
397 * already signaled the eventfd and exited low power mode itself.
398 * pm_runtime_engaged protects the redundant call here.
400 vfio_pci_runtime_pm_exit(vdev);
405 static int vfio_pci_core_runtime_suspend(struct device *dev)
407 struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
409 down_write(&vdev->memory_lock);
411 * The user can move the device into D3hot state before invoking
412 * power management IOCTL. Move the device into D0 state here and then
413 * the pci-driver core runtime PM suspend function will move the device
414 * into the low power state. Also, for the devices which have
415 * NoSoftRst-, it will help in restoring the original state
416 * (saved locally in 'vdev->pm_save').
418 vfio_pci_set_power_state(vdev, PCI_D0);
419 up_write(&vdev->memory_lock);
422 * If INTx is enabled, then mask INTx before going into the runtime
423 * suspended state and unmask the same in the runtime resume.
424 * If INTx has already been masked by the user, then
425 * vfio_pci_intx_mask() will return false and in that case, INTx
426 * should not be unmasked in the runtime resume.
428 vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
429 vfio_pci_intx_mask(vdev));
434 static int vfio_pci_core_runtime_resume(struct device *dev)
436 struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
439 * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
442 down_write(&vdev->memory_lock);
443 if (vdev->pm_wake_eventfd_ctx) {
444 eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
445 __vfio_pci_runtime_pm_exit(vdev);
447 up_write(&vdev->memory_lock);
449 if (vdev->pm_intx_masked)
450 vfio_pci_intx_unmask(vdev);
454 #endif /* CONFIG_PM */
457 * The pci-driver core runtime PM routines always save the device state
458 * before going into suspended state. If the device is going into low power
459 * state with only with runtime PM ops, then no explicit handling is needed
460 * for the devices which have NoSoftRst-.
462 static const struct dev_pm_ops vfio_pci_core_pm_ops = {
463 SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
464 vfio_pci_core_runtime_resume,
468 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
470 struct pci_dev *pdev = vdev->pdev;
475 if (!disable_idle_d3) {
476 ret = pm_runtime_resume_and_get(&pdev->dev);
481 /* Don't allow our initial saved state to include busmaster */
482 pci_clear_master(pdev);
484 ret = pci_enable_device(pdev);
488 /* If reset fails because of the device lock, fail this path entirely */
489 ret = pci_try_reset_function(pdev);
491 goto out_disable_device;
493 vdev->reset_works = !ret;
494 pci_save_state(pdev);
495 vdev->pci_saved_state = pci_store_saved_state(pdev);
496 if (!vdev->pci_saved_state)
497 pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
499 if (likely(!nointxmask)) {
500 if (vfio_pci_nointx(pdev)) {
501 pci_info(pdev, "Masking broken INTx support\n");
505 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
508 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
509 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
510 cmd &= ~PCI_COMMAND_INTX_DISABLE;
511 pci_write_config_word(pdev, PCI_COMMAND, cmd);
514 ret = vfio_pci_zdev_open_device(vdev);
518 ret = vfio_config_init(vdev);
522 msix_pos = pdev->msix_cap;
527 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
528 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
530 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
531 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
532 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
534 vdev->msix_bar = 0xFF;
536 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
537 vdev->has_vga = true;
543 vfio_pci_zdev_close_device(vdev);
545 kfree(vdev->pci_saved_state);
546 vdev->pci_saved_state = NULL;
548 pci_disable_device(pdev);
550 if (!disable_idle_d3)
551 pm_runtime_put(&pdev->dev);
554 EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
556 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
558 struct pci_dev *pdev = vdev->pdev;
559 struct vfio_pci_dummy_resource *dummy_res, *tmp;
560 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
563 /* For needs_reset */
564 lockdep_assert_held(&vdev->vdev.dev_set->lock);
567 * This function can be invoked while the power state is non-D0.
568 * This non-D0 power state can be with or without runtime PM.
569 * vfio_pci_runtime_pm_exit() will internally increment the usage
570 * count corresponding to pm_runtime_put() called during low power
571 * feature entry and then pm_runtime_resume() will wake up the device,
572 * if the device has already gone into the suspended state. Otherwise,
573 * the vfio_pci_set_power_state() will change the device power state
576 vfio_pci_runtime_pm_exit(vdev);
577 pm_runtime_resume(&pdev->dev);
580 * This function calls __pci_reset_function_locked() which internally
581 * can use pci_pm_reset() for the function reset. pci_pm_reset() will
582 * fail if the power state is non-D0. Also, for the devices which
583 * have NoSoftRst-, the reset function can cause the PCI config space
584 * reset without restoring the original state (saved locally in
587 vfio_pci_set_power_state(vdev, PCI_D0);
589 /* Stop the device from further DMA */
590 pci_clear_master(pdev);
592 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
593 VFIO_IRQ_SET_ACTION_TRIGGER,
594 vdev->irq_type, 0, 0, NULL);
596 /* Device closed, don't need mutex here */
597 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
598 &vdev->ioeventfds_list, next) {
599 vfio_virqfd_disable(&ioeventfd->virqfd);
600 list_del(&ioeventfd->next);
603 vdev->ioeventfds_nr = 0;
605 vdev->virq_disabled = false;
607 for (i = 0; i < vdev->num_regions; i++)
608 vdev->region[i].ops->release(vdev, &vdev->region[i]);
610 vdev->num_regions = 0;
612 vdev->region = NULL; /* don't krealloc a freed pointer */
614 vfio_config_free(vdev);
616 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
617 bar = i + PCI_STD_RESOURCES;
618 if (!vdev->barmap[bar])
620 pci_iounmap(pdev, vdev->barmap[bar]);
621 pci_release_selected_regions(pdev, 1 << bar);
622 vdev->barmap[bar] = NULL;
625 list_for_each_entry_safe(dummy_res, tmp,
626 &vdev->dummy_resources_list, res_next) {
627 list_del(&dummy_res->res_next);
628 release_resource(&dummy_res->resource);
632 vdev->needs_reset = true;
634 vfio_pci_zdev_close_device(vdev);
637 * If we have saved state, restore it. If we can reset the device,
638 * even better. Resetting with current state seems better than
639 * nothing, but saving and restoring current state without reset
642 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
643 pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
645 if (!vdev->reset_works)
648 pci_save_state(pdev);
652 * Disable INTx and MSI, presumably to avoid spurious interrupts
653 * during reset. Stolen from pci_reset_function()
655 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
658 * Try to get the locks ourselves to prevent a deadlock. The
659 * success of this is dependent on being able to lock the device,
660 * which is not always possible.
661 * We can not use the "try" reset interface here, which will
662 * overwrite the previously restored configuration information.
664 if (vdev->reset_works && pci_dev_trylock(pdev)) {
665 if (!__pci_reset_function_locked(pdev))
666 vdev->needs_reset = false;
667 pci_dev_unlock(pdev);
670 pci_restore_state(pdev);
672 pci_disable_device(pdev);
674 vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
676 /* Put the pm-runtime usage counter acquired during enable */
677 if (!disable_idle_d3)
678 pm_runtime_put(&pdev->dev);
680 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
682 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
684 struct vfio_pci_core_device *vdev =
685 container_of(core_vdev, struct vfio_pci_core_device, vdev);
687 if (vdev->sriov_pf_core_dev) {
688 mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
689 WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
690 vdev->sriov_pf_core_dev->vf_token->users--;
691 mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
693 #if IS_ENABLED(CONFIG_EEH)
694 eeh_dev_release(vdev->pdev);
696 vfio_pci_core_disable(vdev);
698 mutex_lock(&vdev->igate);
699 if (vdev->err_trigger) {
700 eventfd_ctx_put(vdev->err_trigger);
701 vdev->err_trigger = NULL;
703 if (vdev->req_trigger) {
704 eventfd_ctx_put(vdev->req_trigger);
705 vdev->req_trigger = NULL;
707 mutex_unlock(&vdev->igate);
709 EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
711 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
713 vfio_pci_probe_mmaps(vdev);
714 #if IS_ENABLED(CONFIG_EEH)
715 eeh_dev_open(vdev->pdev);
718 if (vdev->sriov_pf_core_dev) {
719 mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
720 vdev->sriov_pf_core_dev->vf_token->users++;
721 mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
724 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
726 static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
728 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
731 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
732 vdev->nointx || vdev->pdev->is_virtfn)
735 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
738 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
742 pos = vdev->pdev->msi_cap;
744 pci_read_config_word(vdev->pdev,
745 pos + PCI_MSI_FLAGS, &flags);
746 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
748 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
752 pos = vdev->pdev->msix_cap;
754 pci_read_config_word(vdev->pdev,
755 pos + PCI_MSIX_FLAGS, &flags);
757 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
759 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
760 if (pci_is_pcie(vdev->pdev))
762 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
769 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
775 struct vfio_pci_fill_info {
778 struct vfio_pci_dependent_device *devices;
781 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
783 struct vfio_pci_fill_info *fill = data;
784 struct iommu_group *iommu_group;
786 if (fill->cur == fill->max)
787 return -EAGAIN; /* Something changed, try again */
789 iommu_group = iommu_group_get(&pdev->dev);
791 return -EPERM; /* Cannot reset non-isolated devices */
793 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
794 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
795 fill->devices[fill->cur].bus = pdev->bus->number;
796 fill->devices[fill->cur].devfn = pdev->devfn;
798 iommu_group_put(iommu_group);
802 struct vfio_pci_group_info {
807 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
809 for (; pdev; pdev = pdev->bus->self)
810 if (pdev->bus == slot->bus)
811 return (pdev->slot == slot);
815 struct vfio_pci_walk_info {
816 int (*fn)(struct pci_dev *pdev, void *data);
818 struct pci_dev *pdev;
823 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
825 struct vfio_pci_walk_info *walk = data;
827 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
828 walk->ret = walk->fn(pdev, walk->data);
833 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
834 int (*fn)(struct pci_dev *,
835 void *data), void *data,
838 struct vfio_pci_walk_info walk = {
839 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
842 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
847 static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
848 struct vfio_info_cap *caps)
850 struct vfio_info_cap_header header = {
851 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
855 return vfio_info_add_capability(caps, &header, sizeof(header));
858 int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
859 unsigned int type, unsigned int subtype,
860 const struct vfio_pci_regops *ops,
861 size_t size, u32 flags, void *data)
863 struct vfio_pci_region *region;
865 region = krealloc(vdev->region,
866 (vdev->num_regions + 1) * sizeof(*region),
871 vdev->region = region;
872 vdev->region[vdev->num_regions].type = type;
873 vdev->region[vdev->num_regions].subtype = subtype;
874 vdev->region[vdev->num_regions].ops = ops;
875 vdev->region[vdev->num_regions].size = size;
876 vdev->region[vdev->num_regions].flags = flags;
877 vdev->region[vdev->num_regions].data = data;
883 EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
885 static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
886 struct vfio_device_info __user *arg)
888 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
889 struct vfio_device_info info;
890 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
894 /* For backward compatibility, cannot require this */
895 capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
897 if (copy_from_user(&info, arg, minsz))
900 if (info.argsz < minsz)
903 if (info.argsz >= capsz) {
908 info.flags = VFIO_DEVICE_FLAGS_PCI;
910 if (vdev->reset_works)
911 info.flags |= VFIO_DEVICE_FLAGS_RESET;
913 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
914 info.num_irqs = VFIO_PCI_NUM_IRQS;
916 ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
917 if (ret && ret != -ENODEV) {
919 "Failed to setup zPCI info capabilities\n");
924 info.flags |= VFIO_DEVICE_FLAGS_CAPS;
925 if (info.argsz < sizeof(info) + caps.size) {
926 info.argsz = sizeof(info) + caps.size;
928 vfio_info_cap_shift(&caps, sizeof(info));
929 if (copy_to_user(arg + 1, caps.buf, caps.size)) {
933 info.cap_offset = sizeof(*arg);
939 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
942 static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
943 struct vfio_region_info __user *arg)
945 unsigned long minsz = offsetofend(struct vfio_region_info, offset);
946 struct pci_dev *pdev = vdev->pdev;
947 struct vfio_region_info info;
948 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
951 if (copy_from_user(&info, arg, minsz))
954 if (info.argsz < minsz)
957 switch (info.index) {
958 case VFIO_PCI_CONFIG_REGION_INDEX:
959 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
960 info.size = pdev->cfg_size;
961 info.flags = VFIO_REGION_INFO_FLAG_READ |
962 VFIO_REGION_INFO_FLAG_WRITE;
964 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
965 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
966 info.size = pci_resource_len(pdev, info.index);
972 info.flags = VFIO_REGION_INFO_FLAG_READ |
973 VFIO_REGION_INFO_FLAG_WRITE;
974 if (vdev->bar_mmap_supported[info.index]) {
975 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
976 if (info.index == vdev->msix_bar) {
977 ret = msix_mmappable_cap(vdev, &caps);
984 case VFIO_PCI_ROM_REGION_INDEX: {
989 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
992 /* Report the BAR size, not the ROM size */
993 info.size = pci_resource_len(pdev, info.index);
995 /* Shadow ROMs appear as PCI option ROMs */
996 if (pdev->resource[PCI_ROM_RESOURCE].flags &
997 IORESOURCE_ROM_SHADOW)
1004 * Is it really there? Enable memory decode for implicit access
1007 cmd = vfio_pci_memory_lock_and_enable(vdev);
1008 io = pci_map_rom(pdev, &size);
1010 info.flags = VFIO_REGION_INFO_FLAG_READ;
1011 pci_unmap_rom(pdev, io);
1015 vfio_pci_memory_unlock_and_restore(vdev, cmd);
1019 case VFIO_PCI_VGA_REGION_INDEX:
1023 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1024 info.size = 0xc0000;
1025 info.flags = VFIO_REGION_INFO_FLAG_READ |
1026 VFIO_REGION_INFO_FLAG_WRITE;
1030 struct vfio_region_info_cap_type cap_type = {
1031 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1035 if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1037 info.index = array_index_nospec(
1038 info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
1040 i = info.index - VFIO_PCI_NUM_REGIONS;
1042 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1043 info.size = vdev->region[i].size;
1044 info.flags = vdev->region[i].flags;
1046 cap_type.type = vdev->region[i].type;
1047 cap_type.subtype = vdev->region[i].subtype;
1049 ret = vfio_info_add_capability(&caps, &cap_type.header,
1054 if (vdev->region[i].ops->add_capability) {
1055 ret = vdev->region[i].ops->add_capability(
1056 vdev, &vdev->region[i], &caps);
1064 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1065 if (info.argsz < sizeof(info) + caps.size) {
1066 info.argsz = sizeof(info) + caps.size;
1067 info.cap_offset = 0;
1069 vfio_info_cap_shift(&caps, sizeof(info));
1070 if (copy_to_user(arg + 1, caps.buf, caps.size)) {
1074 info.cap_offset = sizeof(*arg);
1080 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
1083 static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
1084 struct vfio_irq_info __user *arg)
1086 unsigned long minsz = offsetofend(struct vfio_irq_info, count);
1087 struct vfio_irq_info info;
1089 if (copy_from_user(&info, arg, minsz))
1092 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1095 switch (info.index) {
1096 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
1097 case VFIO_PCI_REQ_IRQ_INDEX:
1099 case VFIO_PCI_ERR_IRQ_INDEX:
1100 if (pci_is_pcie(vdev->pdev))
1107 info.flags = VFIO_IRQ_INFO_EVENTFD;
1109 info.count = vfio_pci_get_irq_count(vdev, info.index);
1111 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1113 (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
1115 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1117 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
1120 static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
1121 struct vfio_irq_set __user *arg)
1123 unsigned long minsz = offsetofend(struct vfio_irq_set, count);
1124 struct vfio_irq_set hdr;
1127 size_t data_size = 0;
1129 if (copy_from_user(&hdr, arg, minsz))
1132 max = vfio_pci_get_irq_count(vdev, hdr.index);
1134 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
1140 data = memdup_user(&arg->data, data_size);
1142 return PTR_ERR(data);
1145 mutex_lock(&vdev->igate);
1147 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
1150 mutex_unlock(&vdev->igate);
1156 static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
1161 if (!vdev->reset_works)
1164 vfio_pci_zap_and_down_write_memory_lock(vdev);
1167 * This function can be invoked while the power state is non-D0. If
1168 * pci_try_reset_function() has been called while the power state is
1169 * non-D0, then pci_try_reset_function() will internally set the power
1170 * state to D0 without vfio driver involvement. For the devices which
1171 * have NoSoftRst-, the reset function can cause the PCI config space
1172 * reset without restoring the original state (saved locally in
1175 vfio_pci_set_power_state(vdev, PCI_D0);
1177 ret = pci_try_reset_function(vdev->pdev);
1178 up_write(&vdev->memory_lock);
1183 static int vfio_pci_ioctl_get_pci_hot_reset_info(
1184 struct vfio_pci_core_device *vdev,
1185 struct vfio_pci_hot_reset_info __user *arg)
1187 unsigned long minsz =
1188 offsetofend(struct vfio_pci_hot_reset_info, count);
1189 struct vfio_pci_hot_reset_info hdr;
1190 struct vfio_pci_fill_info fill = { 0 };
1191 struct vfio_pci_dependent_device *devices = NULL;
1195 if (copy_from_user(&hdr, arg, minsz))
1198 if (hdr.argsz < minsz)
1203 /* Can we do a slot or bus reset or neither? */
1204 if (!pci_probe_reset_slot(vdev->pdev->slot))
1206 else if (pci_probe_reset_bus(vdev->pdev->bus))
1209 /* How many devices are affected? */
1210 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1215 WARN_ON(!fill.max); /* Should always be at least one */
1218 * If there's enough space, fill it now, otherwise return -ENOSPC and
1219 * the number of devices affected.
1221 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
1223 hdr.count = fill.max;
1224 goto reset_info_exit;
1227 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
1231 fill.devices = devices;
1233 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
1237 * If a device was removed between counting and filling, we may come up
1238 * short of fill.max. If a device was added, we'll have a return of
1242 hdr.count = fill.cur;
1245 if (copy_to_user(arg, &hdr, minsz))
1249 if (copy_to_user(&arg->devices, devices,
1250 hdr.count * sizeof(*devices)))
1258 static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
1259 struct vfio_pci_hot_reset __user *arg)
1261 unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
1262 struct vfio_pci_hot_reset hdr;
1264 struct file **files;
1265 struct vfio_pci_group_info info;
1267 int file_idx, count = 0, ret = 0;
1269 if (copy_from_user(&hdr, arg, minsz))
1272 if (hdr.argsz < minsz || hdr.flags)
1275 /* Can we do a slot or bus reset or neither? */
1276 if (!pci_probe_reset_slot(vdev->pdev->slot))
1278 else if (pci_probe_reset_bus(vdev->pdev->bus))
1282 * We can't let userspace give us an arbitrarily large buffer to copy,
1283 * so verify how many we think there could be. Note groups can have
1284 * multiple devices so one group per device is the max.
1286 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1291 /* Somewhere between 1 and count is OK */
1292 if (!hdr.count || hdr.count > count)
1295 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1296 files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
1297 if (!group_fds || !files) {
1303 if (copy_from_user(group_fds, arg->group_fds,
1304 hdr.count * sizeof(*group_fds))) {
1311 * For each group_fd, get the group through the vfio external user
1312 * interface and store the group and iommu ID. This ensures the group
1313 * is held across the reset.
1315 for (file_idx = 0; file_idx < hdr.count; file_idx++) {
1316 struct file *file = fget(group_fds[file_idx]);
1323 /* Ensure the FD is a vfio group FD.*/
1324 if (!vfio_file_is_group(file)) {
1330 files[file_idx] = file;
1335 /* release reference to groups on error */
1337 goto hot_reset_release;
1339 info.count = hdr.count;
1342 ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
1345 for (file_idx--; file_idx >= 0; file_idx--)
1346 fput(files[file_idx]);
1352 static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
1353 struct vfio_device_ioeventfd __user *arg)
1355 unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1356 struct vfio_device_ioeventfd ioeventfd;
1359 if (copy_from_user(&ioeventfd, arg, minsz))
1362 if (ioeventfd.argsz < minsz)
1365 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1368 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1370 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1373 return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
1377 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1380 struct vfio_pci_core_device *vdev =
1381 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1382 void __user *uarg = (void __user *)arg;
1385 case VFIO_DEVICE_GET_INFO:
1386 return vfio_pci_ioctl_get_info(vdev, uarg);
1387 case VFIO_DEVICE_GET_IRQ_INFO:
1388 return vfio_pci_ioctl_get_irq_info(vdev, uarg);
1389 case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
1390 return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
1391 case VFIO_DEVICE_GET_REGION_INFO:
1392 return vfio_pci_ioctl_get_region_info(vdev, uarg);
1393 case VFIO_DEVICE_IOEVENTFD:
1394 return vfio_pci_ioctl_ioeventfd(vdev, uarg);
1395 case VFIO_DEVICE_PCI_HOT_RESET:
1396 return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
1397 case VFIO_DEVICE_RESET:
1398 return vfio_pci_ioctl_reset(vdev, uarg);
1399 case VFIO_DEVICE_SET_IRQS:
1400 return vfio_pci_ioctl_set_irqs(vdev, uarg);
1405 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
1407 static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
1408 uuid_t __user *arg, size_t argsz)
1410 struct vfio_pci_core_device *vdev =
1411 container_of(device, struct vfio_pci_core_device, vdev);
1415 if (!vdev->vf_token)
1418 * We do not support GET of the VF Token UUID as this could
1419 * expose the token of the previous device user.
1421 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
1426 if (copy_from_user(&uuid, arg, sizeof(uuid)))
1429 mutex_lock(&vdev->vf_token->lock);
1430 uuid_copy(&vdev->vf_token->uuid, &uuid);
1431 mutex_unlock(&vdev->vf_token->lock);
1435 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
1436 void __user *arg, size_t argsz)
1438 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
1439 case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
1440 return vfio_pci_core_pm_entry(device, flags, arg, argsz);
1441 case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
1442 return vfio_pci_core_pm_entry_with_wakeup(device, flags,
1444 case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
1445 return vfio_pci_core_pm_exit(device, flags, arg, argsz);
1446 case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
1447 return vfio_pci_core_feature_token(device, flags, arg, argsz);
1452 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
1454 static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
1455 size_t count, loff_t *ppos, bool iswrite)
1457 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1460 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1463 ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
1465 pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
1471 case VFIO_PCI_CONFIG_REGION_INDEX:
1472 ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1475 case VFIO_PCI_ROM_REGION_INDEX:
1479 ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1482 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1483 ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1486 case VFIO_PCI_VGA_REGION_INDEX:
1487 ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1491 index -= VFIO_PCI_NUM_REGIONS;
1492 ret = vdev->region[index].ops->rw(vdev, buf,
1493 count, ppos, iswrite);
1497 pm_runtime_put(&vdev->pdev->dev);
1501 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
1502 size_t count, loff_t *ppos)
1504 struct vfio_pci_core_device *vdev =
1505 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1510 return vfio_pci_rw(vdev, buf, count, ppos, false);
1512 EXPORT_SYMBOL_GPL(vfio_pci_core_read);
1514 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
1515 size_t count, loff_t *ppos)
1517 struct vfio_pci_core_device *vdev =
1518 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1523 return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
1525 EXPORT_SYMBOL_GPL(vfio_pci_core_write);
1527 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1528 static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
1530 struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1534 * vma_lock is nested under mmap_lock for vm_ops callback paths.
1535 * The memory_lock semaphore is used by both code paths calling
1536 * into this function to zap vmas and the vm_ops.fault callback
1537 * to protect the memory enable state of the device.
1539 * When zapping vmas we need to maintain the mmap_lock => vma_lock
1540 * ordering, which requires using vma_lock to walk vma_list to
1541 * acquire an mm, then dropping vma_lock to get the mmap_lock and
1542 * reacquiring vma_lock. This logic is derived from similar
1543 * requirements in uverbs_user_mmap_disassociate().
1545 * mmap_lock must always be the top-level lock when it is taken.
1546 * Therefore we can only hold the memory_lock write lock when
1547 * vma_list is empty, as we'd need to take mmap_lock to clear
1548 * entries. vma_list can only be guaranteed empty when holding
1549 * vma_lock, thus memory_lock is nested under vma_lock.
1551 * This enables the vm_ops.fault callback to acquire vma_lock,
1552 * followed by memory_lock read lock, while already holding
1553 * mmap_lock without risk of deadlock.
1556 struct mm_struct *mm = NULL;
1559 if (!mutex_trylock(&vdev->vma_lock))
1562 mutex_lock(&vdev->vma_lock);
1564 while (!list_empty(&vdev->vma_list)) {
1565 mmap_vma = list_first_entry(&vdev->vma_list,
1566 struct vfio_pci_mmap_vma,
1568 mm = mmap_vma->vma->vm_mm;
1569 if (mmget_not_zero(mm))
1572 list_del(&mmap_vma->vma_next);
1578 mutex_unlock(&vdev->vma_lock);
1581 if (!mmap_read_trylock(mm)) {
1589 if (!mutex_trylock(&vdev->vma_lock)) {
1590 mmap_read_unlock(mm);
1595 mutex_lock(&vdev->vma_lock);
1597 list_for_each_entry_safe(mmap_vma, tmp,
1598 &vdev->vma_list, vma_next) {
1599 struct vm_area_struct *vma = mmap_vma->vma;
1601 if (vma->vm_mm != mm)
1604 list_del(&mmap_vma->vma_next);
1607 zap_vma_ptes(vma, vma->vm_start,
1608 vma->vm_end - vma->vm_start);
1610 mutex_unlock(&vdev->vma_lock);
1611 mmap_read_unlock(mm);
1616 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
1618 vfio_pci_zap_and_vma_lock(vdev, false);
1619 down_write(&vdev->memory_lock);
1620 mutex_unlock(&vdev->vma_lock);
1623 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
1627 down_write(&vdev->memory_lock);
1628 pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1629 if (!(cmd & PCI_COMMAND_MEMORY))
1630 pci_write_config_word(vdev->pdev, PCI_COMMAND,
1631 cmd | PCI_COMMAND_MEMORY);
1636 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
1638 pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1639 up_write(&vdev->memory_lock);
1642 /* Caller holds vma_lock */
1643 static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
1644 struct vm_area_struct *vma)
1646 struct vfio_pci_mmap_vma *mmap_vma;
1648 mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
1652 mmap_vma->vma = vma;
1653 list_add(&mmap_vma->vma_next, &vdev->vma_list);
1659 * Zap mmaps on open so that we can fault them in on access and therefore
1660 * our vma_list only tracks mappings accessed since last zap.
1662 static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1664 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1667 static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1669 struct vfio_pci_core_device *vdev = vma->vm_private_data;
1670 struct vfio_pci_mmap_vma *mmap_vma;
1672 mutex_lock(&vdev->vma_lock);
1673 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1674 if (mmap_vma->vma == vma) {
1675 list_del(&mmap_vma->vma_next);
1680 mutex_unlock(&vdev->vma_lock);
1683 static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
1685 struct vm_area_struct *vma = vmf->vma;
1686 struct vfio_pci_core_device *vdev = vma->vm_private_data;
1687 struct vfio_pci_mmap_vma *mmap_vma;
1688 vm_fault_t ret = VM_FAULT_NOPAGE;
1690 mutex_lock(&vdev->vma_lock);
1691 down_read(&vdev->memory_lock);
1694 * Memory region cannot be accessed if the low power feature is engaged
1695 * or memory access is disabled.
1697 if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
1698 ret = VM_FAULT_SIGBUS;
1703 * We populate the whole vma on fault, so we need to test whether
1704 * the vma has already been mapped, such as for concurrent faults
1705 * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
1706 * we ask it to fill the same range again.
1708 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1709 if (mmap_vma->vma == vma)
1713 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1714 vma->vm_end - vma->vm_start,
1715 vma->vm_page_prot)) {
1716 ret = VM_FAULT_SIGBUS;
1717 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1721 if (__vfio_pci_add_vma(vdev, vma)) {
1723 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1727 up_read(&vdev->memory_lock);
1728 mutex_unlock(&vdev->vma_lock);
1732 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1733 .open = vfio_pci_mmap_open,
1734 .close = vfio_pci_mmap_close,
1735 .fault = vfio_pci_mmap_fault,
1738 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
1740 struct vfio_pci_core_device *vdev =
1741 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1742 struct pci_dev *pdev = vdev->pdev;
1744 u64 phys_len, req_len, pgoff, req_start;
1747 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1749 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1751 if (vma->vm_end < vma->vm_start)
1753 if ((vma->vm_flags & VM_SHARED) == 0)
1755 if (index >= VFIO_PCI_NUM_REGIONS) {
1756 int regnum = index - VFIO_PCI_NUM_REGIONS;
1757 struct vfio_pci_region *region = vdev->region + regnum;
1759 if (region->ops && region->ops->mmap &&
1760 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1761 return region->ops->mmap(vdev, region, vma);
1764 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1766 if (!vdev->bar_mmap_supported[index])
1769 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1770 req_len = vma->vm_end - vma->vm_start;
1771 pgoff = vma->vm_pgoff &
1772 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1773 req_start = pgoff << PAGE_SHIFT;
1775 if (req_start + req_len > phys_len)
1779 * Even though we don't make use of the barmap for the mmap,
1780 * we need to request the region and the barmap tracks that.
1782 if (!vdev->barmap[index]) {
1783 ret = pci_request_selected_regions(pdev,
1784 1 << index, "vfio-pci");
1788 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1789 if (!vdev->barmap[index]) {
1790 pci_release_selected_regions(pdev, 1 << index);
1795 vma->vm_private_data = vdev;
1796 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1797 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1800 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1801 * change vm_flags within the fault handler. Set them now.
1803 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1804 vma->vm_ops = &vfio_pci_mmap_ops;
1808 EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
1810 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
1812 struct vfio_pci_core_device *vdev =
1813 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1814 struct pci_dev *pdev = vdev->pdev;
1816 mutex_lock(&vdev->igate);
1818 if (vdev->req_trigger) {
1820 pci_notice_ratelimited(pdev,
1821 "Relaying device request to user (#%u)\n",
1823 eventfd_signal(vdev->req_trigger, 1);
1824 } else if (count == 0) {
1826 "No device request channel registered, blocked until released by user\n");
1829 mutex_unlock(&vdev->igate);
1831 EXPORT_SYMBOL_GPL(vfio_pci_core_request);
1833 static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
1834 bool vf_token, uuid_t *uuid)
1837 * There's always some degree of trust or collaboration between SR-IOV
1838 * PF and VFs, even if just that the PF hosts the SR-IOV capability and
1839 * can disrupt VFs with a reset, but often the PF has more explicit
1840 * access to deny service to the VF or access data passed through the
1841 * VF. We therefore require an opt-in via a shared VF token (UUID) to
1842 * represent this trust. This both prevents that a VF driver might
1843 * assume the PF driver is a trusted, in-kernel driver, and also that
1844 * a PF driver might be replaced with a rogue driver, unknown to in-use
1847 * Therefore when presented with a VF, if the PF is a vfio device and
1848 * it is bound to the vfio-pci driver, the user needs to provide a VF
1849 * token to access the device, in the form of appending a vf_token to
1850 * the device name, for example:
1852 * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
1854 * When presented with a PF which has VFs in use, the user must also
1855 * provide the current VF token to prove collaboration with existing
1856 * VF users. If VFs are not in use, the VF token provided for the PF
1857 * device will act to set the VF token.
1859 * If the VF token is provided but unused, an error is generated.
1861 if (vdev->pdev->is_virtfn) {
1862 struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
1867 return 0; /* PF is not vfio-pci, no VF token */
1869 pci_info_ratelimited(vdev->pdev,
1870 "VF token incorrectly provided, PF not bound to vfio-pci\n");
1875 pci_info_ratelimited(vdev->pdev,
1876 "VF token required to access device\n");
1880 mutex_lock(&pf_vdev->vf_token->lock);
1881 match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
1882 mutex_unlock(&pf_vdev->vf_token->lock);
1885 pci_info_ratelimited(vdev->pdev,
1886 "Incorrect VF token provided for device\n");
1889 } else if (vdev->vf_token) {
1890 mutex_lock(&vdev->vf_token->lock);
1891 if (vdev->vf_token->users) {
1893 mutex_unlock(&vdev->vf_token->lock);
1894 pci_info_ratelimited(vdev->pdev,
1895 "VF token required to access device\n");
1899 if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
1900 mutex_unlock(&vdev->vf_token->lock);
1901 pci_info_ratelimited(vdev->pdev,
1902 "Incorrect VF token provided for device\n");
1905 } else if (vf_token) {
1906 uuid_copy(&vdev->vf_token->uuid, uuid);
1909 mutex_unlock(&vdev->vf_token->lock);
1910 } else if (vf_token) {
1911 pci_info_ratelimited(vdev->pdev,
1912 "VF token incorrectly provided, not a PF or VF\n");
1919 #define VF_TOKEN_ARG "vf_token="
1921 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
1923 struct vfio_pci_core_device *vdev =
1924 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1925 bool vf_token = false;
1929 if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
1930 return 0; /* No match */
1932 if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
1933 buf += strlen(pci_name(vdev->pdev));
1936 return 0; /* No match: non-whitespace after name */
1944 if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
1945 strlen(VF_TOKEN_ARG))) {
1946 buf += strlen(VF_TOKEN_ARG);
1948 if (strlen(buf) < UUID_STRING_LEN)
1951 ret = uuid_parse(buf, &uuid);
1956 buf += UUID_STRING_LEN;
1958 /* Unknown/duplicate option */
1964 ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
1968 return 1; /* Match */
1970 EXPORT_SYMBOL_GPL(vfio_pci_core_match);
1972 static int vfio_pci_bus_notifier(struct notifier_block *nb,
1973 unsigned long action, void *data)
1975 struct vfio_pci_core_device *vdev = container_of(nb,
1976 struct vfio_pci_core_device, nb);
1977 struct device *dev = data;
1978 struct pci_dev *pdev = to_pci_dev(dev);
1979 struct pci_dev *physfn = pci_physfn(pdev);
1981 if (action == BUS_NOTIFY_ADD_DEVICE &&
1982 pdev->is_virtfn && physfn == vdev->pdev) {
1983 pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
1985 pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
1986 vdev->vdev.ops->name);
1987 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
1988 pdev->is_virtfn && physfn == vdev->pdev) {
1989 struct pci_driver *drv = pci_dev_driver(pdev);
1991 if (drv && drv != pci_dev_driver(vdev->pdev))
1992 pci_warn(vdev->pdev,
1993 "VF %s bound to driver %s while PF bound to driver %s\n",
1994 pci_name(pdev), drv->name,
1995 pci_dev_driver(vdev->pdev)->name);
2001 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
2003 struct pci_dev *pdev = vdev->pdev;
2004 struct vfio_pci_core_device *cur;
2005 struct pci_dev *physfn;
2008 if (pdev->is_virtfn) {
2010 * If this VF was created by our vfio_pci_core_sriov_configure()
2011 * then we can find the PF vfio_pci_core_device now, and due to
2012 * the locking in pci_disable_sriov() it cannot change until
2013 * this VF device driver is removed.
2015 physfn = pci_physfn(vdev->pdev);
2016 mutex_lock(&vfio_pci_sriov_pfs_mutex);
2017 list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
2018 if (cur->pdev == physfn) {
2019 vdev->sriov_pf_core_dev = cur;
2023 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2027 /* Not a SRIOV PF */
2028 if (!pdev->is_physfn)
2031 vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
2032 if (!vdev->vf_token)
2035 mutex_init(&vdev->vf_token->lock);
2036 uuid_gen(&vdev->vf_token->uuid);
2038 vdev->nb.notifier_call = vfio_pci_bus_notifier;
2039 ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
2041 kfree(vdev->vf_token);
2047 static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
2049 if (!vdev->vf_token)
2052 bus_unregister_notifier(&pci_bus_type, &vdev->nb);
2053 WARN_ON(vdev->vf_token->users);
2054 mutex_destroy(&vdev->vf_token->lock);
2055 kfree(vdev->vf_token);
2058 static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
2060 struct pci_dev *pdev = vdev->pdev;
2063 if (!vfio_pci_is_vga(pdev))
2066 ret = aperture_remove_conflicting_pci_devices(pdev, vdev->vdev.ops->name);
2070 ret = vga_client_register(pdev, vfio_pci_set_decode);
2073 vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
2077 static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
2079 struct pci_dev *pdev = vdev->pdev;
2081 if (!vfio_pci_is_vga(pdev))
2083 vga_client_unregister(pdev);
2084 vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
2085 VGA_RSRC_LEGACY_IO |
2086 VGA_RSRC_LEGACY_MEM);
2089 int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
2091 struct vfio_pci_core_device *vdev =
2092 container_of(core_vdev, struct vfio_pci_core_device, vdev);
2094 vdev->pdev = to_pci_dev(core_vdev->dev);
2095 vdev->irq_type = VFIO_PCI_NUM_IRQS;
2096 mutex_init(&vdev->igate);
2097 spin_lock_init(&vdev->irqlock);
2098 mutex_init(&vdev->ioeventfds_lock);
2099 INIT_LIST_HEAD(&vdev->dummy_resources_list);
2100 INIT_LIST_HEAD(&vdev->ioeventfds_list);
2101 mutex_init(&vdev->vma_lock);
2102 INIT_LIST_HEAD(&vdev->vma_list);
2103 INIT_LIST_HEAD(&vdev->sriov_pfs_item);
2104 init_rwsem(&vdev->memory_lock);
2108 EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
2110 void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
2112 struct vfio_pci_core_device *vdev =
2113 container_of(core_vdev, struct vfio_pci_core_device, vdev);
2115 mutex_destroy(&vdev->igate);
2116 mutex_destroy(&vdev->ioeventfds_lock);
2117 mutex_destroy(&vdev->vma_lock);
2118 kfree(vdev->region);
2119 kfree(vdev->pm_save);
2121 EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
2123 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
2125 struct pci_dev *pdev = vdev->pdev;
2126 struct device *dev = &pdev->dev;
2129 /* Drivers must set the vfio_pci_core_device to their drvdata */
2130 if (WARN_ON(vdev != dev_get_drvdata(dev)))
2133 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2136 if (vdev->vdev.mig_ops) {
2137 if (!(vdev->vdev.mig_ops->migration_get_state &&
2138 vdev->vdev.mig_ops->migration_set_state &&
2139 vdev->vdev.mig_ops->migration_get_data_size) ||
2140 !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
2144 if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
2145 vdev->vdev.log_ops->log_stop &&
2146 vdev->vdev.log_ops->log_read_and_clear))
2150 * Prevent binding to PFs with VFs enabled, the VFs might be in use
2151 * by the host or other users. We cannot capture the VFs if they
2152 * already exist, nor can we track VF users. Disabling SR-IOV here
2153 * would initiate removing the VFs, which would unbind the driver,
2154 * which is prone to blocking if that VF is also in use by vfio-pci.
2155 * Just reject these PFs and let the user sort it out.
2157 if (pci_num_vf(pdev)) {
2158 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
2162 if (pci_is_root_bus(pdev->bus)) {
2163 ret = vfio_assign_device_set(&vdev->vdev, vdev);
2164 } else if (!pci_probe_reset_slot(pdev->slot)) {
2165 ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
2168 * If there is no slot reset support for this device, the whole
2169 * bus needs to be grouped together to support bus-wide resets.
2171 ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
2176 ret = vfio_pci_vf_init(vdev);
2179 ret = vfio_pci_vga_init(vdev);
2183 vfio_pci_probe_power_state(vdev);
2186 * pci-core sets the device power state to an unknown value at
2187 * bootup and after being removed from a driver. The only
2188 * transition it allows from this unknown state is to D0, which
2189 * typically happens when a driver calls pci_enable_device().
2190 * We're not ready to enable the device yet, but we do want to
2191 * be able to get to D3. Therefore first do a D0 transition
2192 * before enabling runtime PM.
2194 vfio_pci_set_power_state(vdev, PCI_D0);
2196 dev->driver->pm = &vfio_pci_core_pm_ops;
2197 pm_runtime_allow(dev);
2198 if (!disable_idle_d3)
2199 pm_runtime_put(dev);
2201 ret = vfio_register_group_dev(&vdev->vdev);
2207 if (!disable_idle_d3)
2208 pm_runtime_get_noresume(dev);
2210 pm_runtime_forbid(dev);
2212 vfio_pci_vf_uninit(vdev);
2215 EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
2217 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
2219 vfio_pci_core_sriov_configure(vdev, 0);
2221 vfio_unregister_group_dev(&vdev->vdev);
2223 vfio_pci_vf_uninit(vdev);
2224 vfio_pci_vga_uninit(vdev);
2226 if (!disable_idle_d3)
2227 pm_runtime_get_noresume(&vdev->pdev->dev);
2229 pm_runtime_forbid(&vdev->pdev->dev);
2231 EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
2233 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
2234 pci_channel_state_t state)
2236 struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
2238 mutex_lock(&vdev->igate);
2240 if (vdev->err_trigger)
2241 eventfd_signal(vdev->err_trigger, 1);
2243 mutex_unlock(&vdev->igate);
2245 return PCI_ERS_RESULT_CAN_RECOVER;
2247 EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
2249 int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
2252 struct pci_dev *pdev = vdev->pdev;
2255 device_lock_assert(&pdev->dev);
2258 mutex_lock(&vfio_pci_sriov_pfs_mutex);
2260 * The thread that adds the vdev to the list is the only thread
2261 * that gets to call pci_enable_sriov() and we will only allow
2262 * it to be called once without going through
2263 * pci_disable_sriov()
2265 if (!list_empty(&vdev->sriov_pfs_item)) {
2269 list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
2270 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2273 * The PF power state should always be higher than the VF power
2274 * state. The PF can be in low power state either with runtime
2275 * power management (when there is no user) or PCI_PM_CTRL
2276 * register write by the user. If PF is in the low power state,
2277 * then change the power state to D0 first before enabling
2278 * SR-IOV. Also, this function can be called at any time, and
2279 * userspace PCI_PM_CTRL write can race against this code path,
2280 * so protect the same with 'memory_lock'.
2282 ret = pm_runtime_resume_and_get(&pdev->dev);
2286 down_write(&vdev->memory_lock);
2287 vfio_pci_set_power_state(vdev, PCI_D0);
2288 ret = pci_enable_sriov(pdev, nr_virtfn);
2289 up_write(&vdev->memory_lock);
2291 pm_runtime_put(&pdev->dev);
2297 if (pci_num_vf(pdev)) {
2298 pci_disable_sriov(pdev);
2299 pm_runtime_put(&pdev->dev);
2303 mutex_lock(&vfio_pci_sriov_pfs_mutex);
2304 list_del_init(&vdev->sriov_pfs_item);
2306 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2309 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
2311 const struct pci_error_handlers vfio_pci_core_err_handlers = {
2312 .error_detected = vfio_pci_core_aer_err_detected,
2314 EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
2316 static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
2317 struct vfio_pci_group_info *groups)
2321 for (i = 0; i < groups->count; i++)
2322 if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
2327 static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
2329 struct vfio_device_set *dev_set = data;
2330 struct vfio_device *cur;
2332 list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
2333 if (cur->dev == &pdev->dev)
2339 * vfio-core considers a group to be viable and will create a vfio_device even
2340 * if some devices are bound to drivers like pci-stub or pcieport. Here we
2341 * require all PCI devices to be inside our dev_set since that ensures they stay
2342 * put and that every driver controlling the device can co-ordinate with the
2345 * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
2346 * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
2348 static struct pci_dev *
2349 vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
2351 struct pci_dev *pdev;
2353 lockdep_assert_held(&dev_set->lock);
2356 * By definition all PCI devices in the dev_set share the same PCI
2357 * reset, so any pci_dev will have the same outcomes for
2358 * pci_probe_reset_*() and pci_reset_bus().
2360 pdev = list_first_entry(&dev_set->device_list,
2361 struct vfio_pci_core_device,
2362 vdev.dev_set_list)->pdev;
2364 /* pci_reset_bus() is supported */
2365 if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
2368 if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
2370 !pci_probe_reset_slot(pdev->slot)))
2375 static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
2377 struct vfio_pci_core_device *cur;
2380 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2381 ret = pm_runtime_resume_and_get(&cur->pdev->dev);
2389 list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
2391 pm_runtime_put(&cur->pdev->dev);
2397 * We need to get memory_lock for each device, but devices can share mmap_lock,
2398 * therefore we need to zap and hold the vma_lock for each device, and only then
2399 * get each memory_lock.
2401 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
2402 struct vfio_pci_group_info *groups)
2404 struct vfio_pci_core_device *cur_mem;
2405 struct vfio_pci_core_device *cur_vma;
2406 struct vfio_pci_core_device *cur;
2407 struct pci_dev *pdev;
2411 mutex_lock(&dev_set->lock);
2412 cur_mem = list_first_entry(&dev_set->device_list,
2413 struct vfio_pci_core_device,
2416 pdev = vfio_pci_dev_set_resettable(dev_set);
2423 * Some of the devices in the dev_set can be in the runtime suspended
2424 * state. Increment the usage count for all the devices in the dev_set
2425 * before reset and decrement the same after reset.
2427 ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
2431 list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
2433 * Test whether all the affected devices are contained by the
2434 * set of groups provided by the user.
2436 if (!vfio_dev_in_groups(cur_vma, groups)) {
2442 * Locking multiple devices is prone to deadlock, runaway and
2443 * unwind if we hit contention.
2445 if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
2452 list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
2453 if (!down_write_trylock(&cur_mem->memory_lock)) {
2457 mutex_unlock(&cur_mem->vma_lock);
2462 * The pci_reset_bus() will reset all the devices in the bus.
2463 * The power state can be non-D0 for some of the devices in the bus.
2464 * For these devices, the pci_reset_bus() will internally set
2465 * the power state to D0 without vfio driver involvement.
2466 * For the devices which have NoSoftRst-, the reset function can
2467 * cause the PCI config space reset without restoring the original
2468 * state (saved locally in 'vdev->pm_save').
2470 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
2471 vfio_pci_set_power_state(cur, PCI_D0);
2473 ret = pci_reset_bus(pdev);
2476 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2482 up_write(&cur->memory_lock);
2484 mutex_unlock(&cur->vma_lock);
2487 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
2488 pm_runtime_put(&cur->pdev->dev);
2490 mutex_unlock(&dev_set->lock);
2494 static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
2496 struct vfio_pci_core_device *cur;
2497 bool needs_reset = false;
2499 /* No other VFIO device in the set can be open. */
2500 if (vfio_device_set_open_count(dev_set) > 1)
2503 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
2504 needs_reset |= cur->needs_reset;
2509 * If a bus or slot reset is available for the provided dev_set and:
2510 * - All of the devices affected by that bus or slot reset are unused
2511 * - At least one of the affected devices is marked dirty via
2512 * needs_reset (such as by lack of FLR support)
2513 * Then attempt to perform that bus or slot reset.
2515 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
2517 struct vfio_pci_core_device *cur;
2518 struct pci_dev *pdev;
2519 bool reset_done = false;
2521 if (!vfio_pci_dev_set_needs_reset(dev_set))
2524 pdev = vfio_pci_dev_set_resettable(dev_set);
2529 * Some of the devices in the bus can be in the runtime suspended
2530 * state. Increment the usage count for all the devices in the dev_set
2531 * before reset and decrement the same after reset.
2533 if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
2536 if (!pci_reset_bus(pdev))
2539 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2541 cur->needs_reset = false;
2543 if (!disable_idle_d3)
2544 pm_runtime_put(&cur->pdev->dev);
2548 void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
2549 bool is_disable_idle_d3)
2551 nointxmask = is_nointxmask;
2552 disable_vga = is_disable_vga;
2553 disable_idle_d3 = is_disable_idle_d3;
2555 EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
2557 static void vfio_pci_core_cleanup(void)
2559 vfio_pci_uninit_perm_bits();
2562 static int __init vfio_pci_core_init(void)
2564 /* Allocate shared config space permission data used by all devices */
2565 return vfio_pci_init_perm_bits();
2568 module_init(vfio_pci_core_init);
2569 module_exit(vfio_pci_core_cleanup);
2571 MODULE_LICENSE("GPL v2");
2572 MODULE_AUTHOR(DRIVER_AUTHOR);
2573 MODULE_DESCRIPTION(DRIVER_DESC);