return ret;
}
---------- ---static int intel_iommu_enable_iopf(struct device *dev)
++++++++++ +++static int intel_iommu_disable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
++++++++++ +++ struct intel_iommu *iommu = info->iommu;
---------- --- if (info && info->pri_supported)
---------- --- return 0;
++++++++++ +++ if (!info->pri_enabled)
++++++++++ +++ return -EINVAL;
++++++++++ + +
- return -ENODEV;
++++++++++ +++ /*
++++++++++ +++ * PCIe spec states that by clearing PRI enable bit, the Page
++++++++++ +++ * Request Interface will not issue new page requests, but has
++++++++++ +++ * outstanding page requests that have been transmitted or are
++++++++++ +++ * queued for transmission. This is supposed to be called after
++++++++++ +++ * the device driver has stopped DMA, all PASIDs have been
++++++++++ +++ * unbound and the outstanding PRQs have been drained.
++++++++++ +++ */
++++++++++ +++ pci_disable_pri(to_pci_dev(dev));
++++++++++ +++ info->pri_enabled = 0;
+
---------- - - return -ENODEV;
++++++++++ +++ /*
++++++++++ +++ * With PRI disabled and outstanding PRQs drained, unregistering
++++++++++ +++ * fault handler and removing device from iopf queue should never
++++++++++ +++ * fail.
++++++++++ +++ */
++++++++++ +++ WARN_ON(iommu_unregister_device_fault_handler(dev));
++++++++++ +++ WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
++++++++++ +++
++++++++++ +++ return 0;
}
static int
}
------------ -void iommu_release_device(struct device *dev)
++++++++++++ +/*
++++++++++++ + * Remove a device from a group's device list and return the group device
++++++++++++ + * if successful.
++++++++++++ + */
++++++++++++ +static struct group_device *
++++++++++++ +__iommu_group_remove_device(struct iommu_group *group, struct device *dev)
+ {
++++++++++++ + struct group_device *device;
++++++++++++ +
++++++++++++ + lockdep_assert_held(&group->mutex);
++++++++++++ + list_for_each_entry(device, &group->devices, list) {
++++++++++++ + if (device->dev == dev) {
++++++++++++ + list_del(&device->list);
++++++++++++ + return device;
++++++++++++ + }
++++++++++++ + }
++++++++++++ +
++++++++++++ + return NULL;
++++++++++++ +}
++++++++++++ +
++++++++++++ +/*
++++++++++++ + * Release a device from its group and decrements the iommu group reference
++++++++++++ + * count.
++++++++++++ + */
++++++++++++ +static void __iommu_group_release_device(struct iommu_group *group,
++++++++++++ + struct group_device *grp_dev)
++++++++++++ +{
++++++++++++ + struct device *dev = grp_dev->dev;
++++++++++++ +
++++++++++++ + sysfs_remove_link(group->devices_kobj, grp_dev->name);
++++++++++++ + sysfs_remove_link(&dev->kobj, "iommu_group");
++++++++++++ +
++++++++++++ + trace_remove_device_from_group(group->id, dev);
++++++++++++ +
++++++++++++ + kfree(grp_dev->name);
++++++++++++ + kfree(grp_dev);
++++++++++++ + dev->iommu_group = NULL;
++++++++++++ + kobject_put(group->devices_kobj);
++++++++++++ +}
++++++++++++ +
++++++++++++ +static void iommu_release_device(struct device *dev)
+++++++++++ +{
++++++++++++ + struct iommu_group *group = dev->iommu_group;
++++++++++++ + struct group_device *device;
const struct iommu_ops *ops;
------------ - if (!dev->iommu)
++++++++++++ + if (!dev->iommu || !group)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
* Please take a closer look if intended to use for other purposes.
*/
static int iommu_change_dev_def_domain(struct iommu_group *group,
------------ - struct device *prev_dev, int type)
++++++++++++ + struct device *dev, int type)
{
++++++++++++ + struct __group_domain_type gtype = {NULL, 0};
struct iommu_domain *prev_dom;
------------ - struct group_device *grp_dev;
------------ - int ret, dev_def_dom;
------------ - struct device *dev;
------------ -
------------ - mutex_lock(&group->mutex);
------------ -
------------ - if (group->default_domain != group->domain) {
------------ - dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
------------ - ret = -EBUSY;
------------ - goto out;
------------ - }
------------ -
------------ - /*
------------ - * iommu group wasn't locked while acquiring device lock in
------------ - * iommu_group_store_type(). So, make sure that the device count hasn't
------------ - * changed while acquiring device lock.
------------ - *
------------ - * Changing default domain of an iommu group with two or more devices
------------ - * isn't supported because there could be a potential deadlock. Consider
------------ - * the following scenario. T1 is trying to acquire device locks of all
------------ - * the devices in the group and before it could acquire all of them,
------------ - * there could be another thread T2 (from different sub-system and use
------------ - * case) that has already acquired some of the device locks and might be
------------ - * waiting for T1 to release other device locks.
------------ - */
------------ - if (iommu_group_device_count(group) != 1) {
------------ - dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
------------ - ret = -EINVAL;
------------ - goto out;
------------ - }
-
- /* Since group has only one device */
- grp_dev = list_first_entry(&group->devices, struct group_device, list);
- dev = grp_dev->dev;
++++++++++++ + int ret;
----------- - /* Since group has only one device */
----------- - grp_dev = list_first_entry(&group->devices, struct group_device, list);
----------- - dev = grp_dev->dev;
----------- -
------------ - if (prev_dev != dev) {
------------ - dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
------------ - ret = -EBUSY;
------------ - goto out;
------------ - }
++++++++++++ + lockdep_assert_held(&group->mutex);
prev_dom = group->default_domain;
------------ - if (!prev_dom) {
------------ - ret = -EINVAL;
------------ - goto out;
------------ - }
------------ -
------------ - dev_def_dom = iommu_get_def_domain_type(dev);
++++++++++++ + __iommu_group_for_each_dev(group, >ype,
++++++++++++ + probe_get_default_domain_type);
if (!type) {
/*
* If the user hasn't requested any specific type of domain and