static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
+ +static struct dma_map_ops amd_iommu_dma_ops;
+ +
/*
* general struct to manage commands send to an IOMMU
*/
return true;
}
+++ ++static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
+++ ++{
+++ ++ pci_dev_put(*from);
+++ ++ *from = to;
+++ ++}
+++ ++
+++ ++#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
+++ ++
static int iommu_init_device(struct device *dev)
{
--- -- struct pci_dev *pdev = to_pci_dev(dev);
+++ ++ struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
+++ ++ struct iommu_group *group;
u16 alias;
+++ ++ int ret;
if (dev->archdata.iommu)
return 0;
return -ENOTSUPP;
}
dev_data->alias_data = alias_data;
+++ ++
+++ ++ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
+++ ++ } else
+++ ++ dma_pdev = pci_dev_get(pdev);
+++ ++
+++ ++ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+++ ++
+++ ++ if (dma_pdev->multifunction &&
+++ ++ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
+++ ++ swap_pci_ref(&dma_pdev,
+++ ++ pci_get_slot(dma_pdev->bus,
+++ ++ PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
+++ ++ 0)));
+++ ++
+++ ++ while (!pci_is_root_bus(dma_pdev->bus)) {
+++ ++ if (pci_acs_path_enabled(dma_pdev->bus->self,
+++ ++ NULL, REQ_ACS_FLAGS))
+++ ++ break;
+++ ++
+++ ++ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+++ ++ }
+++ ++
+++ ++ group = iommu_group_get(&dma_pdev->dev);
+++ ++ pci_dev_put(dma_pdev);
+++ ++ if (!group) {
+++ ++ group = iommu_group_alloc();
+++ ++ if (IS_ERR(group))
+++ ++ return PTR_ERR(group);
}
+++ ++ ret = iommu_group_add_device(group, dev);
+++ ++
+++ ++ iommu_group_put(group);
+++ ++
+++ ++ if (ret)
+++ ++ return ret;
+++ ++
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
static void iommu_uninit_device(struct device *dev)
{
+++ ++ iommu_group_remove_device(dev);
+++ ++
/*
* Nothing to do here - we keep dev_data around for unplugged devices
* and reuse it when the device is re-plugged - not doing so would
DECLARE_STATS_COUNTER(invalidate_iotlb_all);
DECLARE_STATS_COUNTER(pri_requests);
-- ---
static struct dentry *stats_dir;
static struct dentry *de_fflush;
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- - (u32 *)&amd_iommu_unmap_flush);
+ + &amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
/* FIXME: Move this to PCI code */
#define PCI_PRI_TLP_OFF (1 << 15)
-- ---bool pci_pri_tlp_required(struct pci_dev *pdev)
++ +++static bool pci_pri_tlp_required(struct pci_dev *pdev)
{
u16 status;
int pos;
iommu_init_device(dev);
+ ++++ /*
+ ++++ * dev_data is still NULL and
+ ++++ * got initialized in iommu_init_device
+ ++++ */
+ ++++ dev_data = get_dev_data(dev);
+ ++++
+ ++++ if (iommu_pass_through || dev_data->iommu_v2) {
+ ++++ dev_data->passthrough = true;
+ ++++ attach_device(dev, pt_domain);
+ ++++ break;
+ ++++ }
+ ++++
domain = domain_for_device(dev);
/* allocate a protection domain if a device is added */
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
- - - if (!dev_data->passthrough)
- - - dev->archdata.dma_ops = &amd_iommu_dma_ops;
- - - else
- - - dev->archdata.dma_ops = &nommu_dma_ops;
+ + dev_data = get_dev_data(dev);
+ +
+ ++++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ +
break;
case BUS_NOTIFY_DEL_DEVICE:
amd_iommu_stats_init();
++ +++ if (amd_iommu_unmap_flush)
++ +++ pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
++ +++ else
++ +++ pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
++ +++
return 0;
free_domains:
dom->priv = domain;
+++++ dom->geometry.aperture_start = 0;
+++++ dom->geometry.aperture_end = ~0ULL;
+++++ dom->geometry.force_aperture = true;
+++++
return 0;
out_free:
return 0;
}
--- --static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
--- --{
--- -- struct iommu_dev_data *dev_data = dev->archdata.iommu;
--- -- struct pci_dev *pdev = to_pci_dev(dev);
--- -- u16 devid;
--- --
--- -- if (!dev_data)
--- -- return -ENODEV;
--- --
--- -- if (pdev->is_virtfn || !iommu_group_mf)
--- -- devid = dev_data->devid;
--- -- else
--- -- devid = calc_devid(pdev->bus->number,
--- -- PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
--- --
--- -- *groupid = amd_iommu_alias_table[devid];
--- --
--- -- return 0;
--- --}
--- --
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
--- -- .device_group = amd_iommu_device_group,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
u16 flags;
};
-- ---struct device_state **state_table;
++ +++static struct device_state **state_table;
static spinlock_t state_lock;
/* List and lock for all pasid_states */
atomic_set(&pasid_state->count, 1);
init_waitqueue_head(&pasid_state->wq);
+ ++++ spin_lock_init(&pasid_state->lock);
+ ++++
pasid_state->task = task;
pasid_state->mm = get_task_mm(task);
pasid_state->device_state = dev_state;
domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
+++++ domain->geometry.aperture_start = 0;
+++++ domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+++++ domain->geometry.force_aperture = true;
+++++
return 0;
}
return 0;
}
--- --/*
--- -- * Group numbers are arbitrary. Device with the same group number
--- -- * indicate the iommu cannot differentiate between them. To avoid
--- -- * tracking used groups we just use the seg|bus|devfn of the lowest
--- -- * level we're able to differentiate devices
--- -- */
--- --static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+++ ++static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
{
--- -- struct pci_dev *pdev = to_pci_dev(dev);
--- -- struct pci_dev *bridge;
--- -- union {
--- -- struct {
--- -- u8 devfn;
--- -- u8 bus;
--- -- u16 segment;
--- -- } pci;
--- -- u32 group;
--- -- } id;
+++ ++ pci_dev_put(*from);
+++ ++ *from = to;
+++ ++}
--- -- if (iommu_no_mapping(dev))
--- -- return -ENODEV;
+++ ++#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
--- -- id.pci.segment = pci_domain_nr(pdev->bus);
--- -- id.pci.bus = pdev->bus->number;
--- -- id.pci.devfn = pdev->devfn;
+++ ++static int intel_iommu_add_device(struct device *dev)
+++ ++{
+++ ++ struct pci_dev *pdev = to_pci_dev(dev);
+++ ++ struct pci_dev *bridge, *dma_pdev;
+++ ++ struct iommu_group *group;
+++ ++ int ret;
--- -- if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+++ ++ if (!device_to_iommu(pci_domain_nr(pdev->bus),
+++ ++ pdev->bus->number, pdev->devfn))
return -ENODEV;
bridge = pci_find_upstream_pcie_bridge(pdev);
if (bridge) {
--- -- if (pci_is_pcie(bridge)) {
--- -- id.pci.bus = bridge->subordinate->number;
--- -- id.pci.devfn = 0;
--- -- } else {
--- -- id.pci.bus = bridge->bus->number;
--- -- id.pci.devfn = bridge->devfn;
--- -- }
+++ ++ if (pci_is_pcie(bridge))
+++ ++ dma_pdev = pci_get_domain_bus_and_slot(
+++ ++ pci_domain_nr(pdev->bus),
+++ ++ bridge->subordinate->number, 0);
+++ ++ else
+++ ++ dma_pdev = pci_dev_get(bridge);
+++ ++ } else
+++ ++ dma_pdev = pci_dev_get(pdev);
+++ ++
+++ ++ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+++ ++
+++ ++ if (dma_pdev->multifunction &&
+++ ++ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
+++ ++ swap_pci_ref(&dma_pdev,
+++ ++ pci_get_slot(dma_pdev->bus,
+++ ++ PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
+++ ++ 0)));
+++ ++
+++ ++ while (!pci_is_root_bus(dma_pdev->bus)) {
+++ ++ if (pci_acs_path_enabled(dma_pdev->bus->self,
+++ ++ NULL, REQ_ACS_FLAGS))
+++ ++ break;
+++ ++
+++ ++ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+++ ++ }
+++ ++
+++ ++ group = iommu_group_get(&dma_pdev->dev);
+++ ++ pci_dev_put(dma_pdev);
+++ ++ if (!group) {
+++ ++ group = iommu_group_alloc();
+++ ++ if (IS_ERR(group))
+++ ++ return PTR_ERR(group);
}
--- -- if (!pdev->is_virtfn && iommu_group_mf)
--- -- id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+++ ++ ret = iommu_group_add_device(group, dev);
--- -- *groupid = id.group;
+++ ++ iommu_group_put(group);
+++ ++ return ret;
+++ ++}
--- -- return 0;
+++ ++static void intel_iommu_remove_device(struct device *dev)
+++ ++{
+++ ++ iommu_group_remove_device(dev);
}
static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
--- -- .device_group = intel_iommu_device_group,
+++ ++ .add_device = intel_iommu_add_device,
+++ ++ .remove_device = intel_iommu_remove_device,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/iommu.h>
+++ ++#include <linux/idr.h>
+++ ++#include <linux/notifier.h>
+++ ++#include <linux/err.h>
+++ ++
+++ ++static struct kset *iommu_group_kset;
+++ ++static struct ida iommu_group_ida;
+++ ++static struct mutex iommu_group_mutex;
+++ ++
+++ ++struct iommu_group {
+++ ++ struct kobject kobj;
+++ ++ struct kobject *devices_kobj;
+++ ++ struct list_head devices;
+++ ++ struct mutex mutex;
+++ ++ struct blocking_notifier_head notifier;
+++ ++ void *iommu_data;
+++ ++ void (*iommu_data_release)(void *iommu_data);
+++ ++ char *name;
+++ ++ int id;
+++ ++};
+++ ++
+++ ++struct iommu_device {
+++ ++ struct list_head list;
+++ ++ struct device *dev;
+++ ++ char *name;
+++ ++};
+++ ++
+++ ++struct iommu_group_attribute {
+++ ++ struct attribute attr;
+++ ++ ssize_t (*show)(struct iommu_group *group, char *buf);
+++ ++ ssize_t (*store)(struct iommu_group *group,
+++ ++ const char *buf, size_t count);
+++ ++};
+++ ++
+++ ++#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
+++ ++struct iommu_group_attribute iommu_group_attr_##_name = \
+++ ++ __ATTR(_name, _mode, _show, _store)
+++ ++
+++ ++#define to_iommu_group_attr(_attr) \
+++ ++ container_of(_attr, struct iommu_group_attribute, attr)
+++ ++#define to_iommu_group(_kobj) \
+++ ++ container_of(_kobj, struct iommu_group, kobj)
--- --static ssize_t show_iommu_group(struct device *dev,
--- -- struct device_attribute *attr, char *buf)
+++ ++static ssize_t iommu_group_attr_show(struct kobject *kobj,
+++ ++ struct attribute *__attr, char *buf)
{
--- -- unsigned int groupid;
+++ ++ struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+++ ++ struct iommu_group *group = to_iommu_group(kobj);
+++ ++ ssize_t ret = -EIO;
--- -- if (iommu_device_group(dev, &groupid))
--- -- return 0;
+++ ++ if (attr->show)
+++ ++ ret = attr->show(group, buf);
+++ ++ return ret;
+++ ++}
+++ +
- return sprintf(buf, "%u", groupid);
+++ ++static ssize_t iommu_group_attr_store(struct kobject *kobj,
+++ ++ struct attribute *__attr,
+++ ++ const char *buf, size_t count)
+++ ++{
+++ ++ struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+++ ++ struct iommu_group *group = to_iommu_group(kobj);
+++ ++ ssize_t ret = -EIO;
+++ ++
+++ ++ if (attr->store)
+++ ++ ret = attr->store(group, buf, count);
+++ ++ return ret;
+++ + }
-static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+++ +
-static int add_iommu_group(struct device *dev, void *data)
+++ ++static const struct sysfs_ops iommu_group_sysfs_ops = {
+++ ++ .show = iommu_group_attr_show,
+++ ++ .store = iommu_group_attr_store,
+++ ++};
+
--- - return sprintf(buf, "%u", groupid);
+++ ++static int iommu_group_create_file(struct iommu_group *group,
+++ ++ struct iommu_group_attribute *attr)
+++ ++{
+++ ++ return sysfs_create_file(&group->kobj, &attr->attr);
+}
--- - static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
--- - static int add_iommu_group(struct device *dev, void *data)
+++ ++static void iommu_group_remove_file(struct iommu_group *group,
+++ ++ struct iommu_group_attribute *attr)
+++ + {
- unsigned int groupid;
+++ ++ sysfs_remove_file(&group->kobj, &attr->attr);
+++ ++}
+++ ++
+++ ++static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
+++ ++{
+++ ++ return sprintf(buf, "%s\n", group->name);
+++ ++}
+++ ++
+++ ++static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+++ +
- if (iommu_device_group(dev, &groupid) == 0)
- return device_create_file(dev, &dev_attr_iommu_group);
+++ ++static void iommu_group_release(struct kobject *kobj)
+++ ++{
+++ ++ struct iommu_group *group = to_iommu_group(kobj);
+++ ++
+++ ++ if (group->iommu_data_release)
+++ ++ group->iommu_data_release(group->iommu_data);
+++ ++
+++ ++ mutex_lock(&iommu_group_mutex);
+++ ++ ida_remove(&iommu_group_ida, group->id);
+++ ++ mutex_unlock(&iommu_group_mutex);
+++ ++
+++ ++ kfree(group->name);
+++ ++ kfree(group);
+++ ++}
+++ ++
+++ ++static struct kobj_type iommu_group_ktype = {
+++ ++ .sysfs_ops = &iommu_group_sysfs_ops,
+++ ++ .release = iommu_group_release,
+++ ++};
+++ ++
+++ ++/**
+++ ++ * iommu_group_alloc - Allocate a new group
+++ ++ * @name: Optional name to associate with group, visible in sysfs
+++ ++ *
+++ ++ * This function is called by an iommu driver to allocate a new iommu
+++ ++ * group. The iommu group represents the minimum granularity of the iommu.
+++ ++ * Upon successful return, the caller holds a reference to the supplied
+++ ++ * group in order to hold the group until devices are added. Use
+++ ++ * iommu_group_put() to release this extra reference count, allowing the
+++ ++ * group to be automatically reclaimed once it has no devices or external
+++ ++ * references.
+++ ++ */
+++ ++struct iommu_group *iommu_group_alloc(void)
+++ ++{
+++ ++ struct iommu_group *group;
+++ ++ int ret;
+++ ++
+++ ++ group = kzalloc(sizeof(*group), GFP_KERNEL);
+++ ++ if (!group)
+++ ++ return ERR_PTR(-ENOMEM);
+++ ++
+++ ++ group->kobj.kset = iommu_group_kset;
+++ ++ mutex_init(&group->mutex);
+++ ++ INIT_LIST_HEAD(&group->devices);
+++ ++ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
+++ ++
+++ ++ mutex_lock(&iommu_group_mutex);
+++ ++
+++ ++again:
+++ ++ if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
+++ ++ kfree(group);
+++ ++ mutex_unlock(&iommu_group_mutex);
+++ ++ return ERR_PTR(-ENOMEM);
+++ ++ }
+++ ++
+++ ++ if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
+++ ++ goto again;
+++ ++
+++ ++ mutex_unlock(&iommu_group_mutex);
+++ ++
+++ ++ ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
+++ ++ NULL, "%d", group->id);
+++ ++ if (ret) {
+++ ++ mutex_lock(&iommu_group_mutex);
+++ ++ ida_remove(&iommu_group_ida, group->id);
+++ ++ mutex_unlock(&iommu_group_mutex);
+++ ++ kfree(group);
+++ ++ return ERR_PTR(ret);
+++ ++ }
+++ ++
+++ ++ group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
+++ ++ if (!group->devices_kobj) {
+++ ++ kobject_put(&group->kobj); /* triggers .release & free */
+++ ++ return ERR_PTR(-ENOMEM);
+++ ++ }
+++ ++
+++ ++ /*
+++ ++ * The devices_kobj holds a reference on the group kobject, so
+++ ++ * as long as that exists so will the group. We can therefore
+++ ++ * use the devices_kobj for reference counting.
+++ ++ */
+++ ++ kobject_put(&group->kobj);
+++ ++
+++ ++ return group;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_alloc);
+++ ++
+++ ++/**
+++ ++ * iommu_group_get_iommudata - retrieve iommu_data registered for a group
+++ ++ * @group: the group
+++ ++ *
+++ ++ * iommu drivers can store data in the group for use when doing iommu
+++ ++ * operations. This function provides a way to retrieve it. Caller
+++ ++ * should hold a group reference.
+++ ++ */
+++ ++void *iommu_group_get_iommudata(struct iommu_group *group)
+++ ++{
+++ ++ return group->iommu_data;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
+++ ++
+++ ++/**
+++ ++ * iommu_group_set_iommudata - set iommu_data for a group
+++ ++ * @group: the group
+++ ++ * @iommu_data: new data
+++ ++ * @release: release function for iommu_data
+++ ++ *
+++ ++ * iommu drivers can store data in the group for use when doing iommu
+++ ++ * operations. This function provides a way to set the data after
+++ ++ * the group has been allocated. Caller should hold a group reference.
+++ ++ */
+++ ++void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
+++ ++ void (*release)(void *iommu_data))
+{
--- - unsigned int groupid;
+++ ++ group->iommu_data = iommu_data;
+++ ++ group->iommu_data_release = release;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
+
--- - if (iommu_device_group(dev, &groupid) == 0)
--- - return device_create_file(dev, &dev_attr_iommu_group);
+++ ++/**
+++ ++ * iommu_group_set_name - set name for a group
+++ ++ * @group: the group
+++ ++ * @name: name
+++ ++ *
+++ ++ * Allow iommu driver to set a name for a group. When set it will
+++ ++ * appear in a name attribute file under the group in sysfs.
+++ ++ */
+++ ++int iommu_group_set_name(struct iommu_group *group, const char *name)
+++ ++{
+++ ++ int ret;
+++ ++
+++ ++ if (group->name) {
+++ ++ iommu_group_remove_file(group, &iommu_group_attr_name);
+++ ++ kfree(group->name);
+++ ++ group->name = NULL;
+++ ++ if (!name)
+++ ++ return 0;
+++ ++ }
+++ ++
+++ ++ group->name = kstrdup(name, GFP_KERNEL);
+++ ++ if (!group->name)
+++ ++ return -ENOMEM;
+++ ++
+++ ++ ret = iommu_group_create_file(group, &iommu_group_attr_name);
+++ ++ if (ret) {
+++ ++ kfree(group->name);
+++ ++ group->name = NULL;
+++ ++ return ret;
+++ ++ }
return 0;
}
-static int remove_iommu_group(struct device *dev)
+++ ++EXPORT_SYMBOL_GPL(iommu_group_set_name);
+++ +
- unsigned int groupid;
+++ ++/**
+++ ++ * iommu_group_add_device - add a device to an iommu group
+++ ++ * @group: the group into which to add the device (reference should be held)
+++ ++ * @dev: the device
+++ ++ *
+++ ++ * This function is called by an iommu driver to add a device into a
+++ ++ * group. Adding a device increments the group reference count.
+++ ++ */
+++ ++int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+++ + {
- if (iommu_device_group(dev, &groupid) == 0)
- device_remove_file(dev, &dev_attr_iommu_group);
+++ ++ int ret, i = 0;
+++ ++ struct iommu_device *device;
+++ ++
+++ ++ device = kzalloc(sizeof(*device), GFP_KERNEL);
+++ ++ if (!device)
+++ ++ return -ENOMEM;
+++ ++
+++ ++ device->dev = dev;
+++ ++
+++ ++ ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
+++ ++ if (ret) {
+++ ++ kfree(device);
+++ ++ return ret;
+++ ++ }
+++ ++
+++ ++ device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
+++ ++rename:
+++ ++ if (!device->name) {
+++ ++ sysfs_remove_link(&dev->kobj, "iommu_group");
+++ ++ kfree(device);
+++ ++ return -ENOMEM;
+++ ++ }
+++ +
-static int iommu_device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+++ ++ ret = sysfs_create_link_nowarn(group->devices_kobj,
+++ ++ &dev->kobj, device->name);
+++ ++ if (ret) {
+++ ++ kfree(device->name);
+++ ++ if (ret == -EEXIST && i >= 0) {
+++ ++ /*
+++ ++ * Account for the slim chance of collision
+++ ++ * and append an instance to the name.
+++ ++ */
+++ ++ device->name = kasprintf(GFP_KERNEL, "%s.%d",
+++ ++ kobject_name(&dev->kobj), i++);
+++ ++ goto rename;
+++ ++ }
+++ ++
+++ ++ sysfs_remove_link(&dev->kobj, "iommu_group");
+++ ++ kfree(device);
+++ ++ return ret;
+++ ++ }
+++ ++
+++ ++ kobject_get(group->devices_kobj);
+++ ++
+++ ++ dev->iommu_group = group;
+++ ++
+++ ++ mutex_lock(&group->mutex);
+++ ++ list_add_tail(&device->list, &group->devices);
+++ ++ mutex_unlock(&group->mutex);
+++ +
+++ ++ /* Notify any listeners about change to group. */
+++ ++ blocking_notifier_call_chain(&group->notifier,
+++ ++ IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+++ + return 0;
+++ + }
+++ ++EXPORT_SYMBOL_GPL(iommu_group_add_device);
+++ +
--- - static int remove_iommu_group(struct device *dev)
+++ ++/**
+++ ++ * iommu_group_remove_device - remove a device from it's current group
+++ ++ * @dev: device to be removed
+++ ++ *
+++ ++ * This function is called by an iommu driver to remove the device from
+++ ++ * it's current group. This decrements the iommu group reference count.
+++ ++ */
+++ ++void iommu_group_remove_device(struct device *dev)
+++ ++{
+++ ++ struct iommu_group *group = dev->iommu_group;
+++ ++ struct iommu_device *tmp_device, *device = NULL;
+++ ++
+++ ++ /* Pre-notify listeners that a device is being removed. */
+++ ++ blocking_notifier_call_chain(&group->notifier,
+++ ++ IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
+++ ++
+++ ++ mutex_lock(&group->mutex);
+++ ++ list_for_each_entry(tmp_device, &group->devices, list) {
+++ ++ if (tmp_device->dev == dev) {
+++ ++ device = tmp_device;
+++ ++ list_del(&device->list);
+++ ++ break;
+++ ++ }
+++ ++ }
+++ ++ mutex_unlock(&group->mutex);
+++ ++
+++ ++ if (!device)
+++ ++ return;
+++ ++
+++ ++ sysfs_remove_link(group->devices_kobj, device->name);
+++ ++ sysfs_remove_link(&dev->kobj, "iommu_group");
+++ ++
+++ ++ kfree(device->name);
+++ ++ kfree(device);
+++ ++ dev->iommu_group = NULL;
+++ ++ kobject_put(group->devices_kobj);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+++ ++
+++ ++/**
+++ ++ * iommu_group_for_each_dev - iterate over each device in the group
+++ ++ * @group: the group
+++ ++ * @data: caller opaque data to be passed to callback function
+++ ++ * @fn: caller supplied callback function
+++ ++ *
+++ ++ * This function is called by group users to iterate over group devices.
+++ ++ * Callers should hold a reference count to the group during callback.
+++ ++ * The group->mutex is held across callbacks, which will block calls to
+++ ++ * iommu_group_add/remove_device.
+++ ++ */
+++ ++int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+++ ++ int (*fn)(struct device *, void *))
+++ ++{
+++ ++ struct iommu_device *device;
+++ ++ int ret = 0;
+++ ++
+++ ++ mutex_lock(&group->mutex);
+++ ++ list_for_each_entry(device, &group->devices, list) {
+++ ++ ret = fn(device->dev, data);
+++ ++ if (ret)
+++ ++ break;
+++ ++ }
+++ ++ mutex_unlock(&group->mutex);
+++ ++ return ret;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
+++ ++
+++ ++/**
+++ ++ * iommu_group_get - Return the group for a device and increment reference
+++ ++ * @dev: get the group that this device belongs to
+++ ++ *
+++ ++ * This function is called by iommu drivers and users to get the group
+++ ++ * for the specified device. If found, the group is returned and the group
+++ ++ * reference in incremented, else NULL.
+++ ++ */
+++ ++struct iommu_group *iommu_group_get(struct device *dev)
+++ ++{
+++ ++ struct iommu_group *group = dev->iommu_group;
+++ ++
+++ ++ if (group)
+++ ++ kobject_get(group->devices_kobj);
+++ ++
+++ ++ return group;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_get);
+++ ++
+++ ++/**
+++ ++ * iommu_group_put - Decrement group reference
+++ ++ * @group: the group to use
+++ ++ *
+++ ++ * This function is called by iommu drivers and users to release the
+++ ++ * iommu group. Once the reference count is zero, the group is released.
+++ ++ */
+++ ++void iommu_group_put(struct iommu_group *group)
+++ ++{
+++ ++ if (group)
+++ ++ kobject_put(group->devices_kobj);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_put);
+++ ++
+++ ++/**
+++ ++ * iommu_group_register_notifier - Register a notifier for group changes
+++ ++ * @group: the group to watch
+++ ++ * @nb: notifier block to signal
+++ ++ *
+++ ++ * This function allows iommu group users to track changes in a group.
+++ ++ * See include/linux/iommu.h for actions sent via this notifier. Caller
+++ ++ * should hold a reference to the group throughout notifier registration.
+++ ++ */
+++ ++int iommu_group_register_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb)
+++ ++{
+++ ++ return blocking_notifier_chain_register(&group->notifier, nb);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
+++ ++
+++ ++/**
+++ ++ * iommu_group_unregister_notifier - Unregister a notifier
+++ ++ * @group: the group to watch
+++ ++ * @nb: notifier block to signal
+++ ++ *
+++ ++ * Unregister a previously registered group notifier block.
+++ ++ */
+++ ++int iommu_group_unregister_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb)
+++ ++{
+++ ++ return blocking_notifier_chain_unregister(&group->notifier, nb);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
+++ ++
+++ ++/**
+++ ++ * iommu_group_id - Return ID for a group
+++ ++ * @group: the group to ID
+++ ++ *
+++ ++ * Return the unique ID for the group matching the sysfs group number.
+++ ++ */
+++ ++int iommu_group_id(struct iommu_group *group)
+++ ++{
+++ ++ return group->id;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_group_id);
+
--- - unsigned int groupid;
+++ ++static int add_iommu_group(struct device *dev, void *data)
+{
--- - if (iommu_device_group(dev, &groupid) == 0)
--- - device_remove_file(dev, &dev_attr_iommu_group);
+++ ++ struct iommu_ops *ops = data;
+++ ++
+++ ++ if (!ops->add_device)
+++ ++ return -ENODEV;
+
--- - static int iommu_device_notifier(struct notifier_block *nb,
--- - unsigned long action, void *data)
+++ ++ WARN_ON(dev->iommu_group);
+++ ++
+++ ++ ops->add_device(dev);
+
+ return 0;
+}
+
+++ ++static int iommu_bus_notifier(struct notifier_block *nb,
+++ ++ unsigned long action, void *data)
{
struct device *dev = data;
+++ ++ struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++ struct iommu_group *group;
+++ ++ unsigned long group_action = 0;
+++ ++
+++ ++ /*
+++ ++ * ADD/DEL call into iommu driver ops if provided, which may
+++ ++ * result in ADD/DEL notifiers to group->notifier
+++ ++ */
+++ ++ if (action == BUS_NOTIFY_ADD_DEVICE) {
+++ ++ if (ops->add_device)
+++ ++ return ops->add_device(dev);
+++ ++ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+++ ++ if (ops->remove_device && dev->iommu_group) {
+++ ++ ops->remove_device(dev);
+++ ++ return 0;
+++ ++ }
+++ ++ }
+++ ++
+++ ++ /*
+++ ++ * Remaining BUS_NOTIFYs get filtered and republished to the
+++ ++ * group, if anyone is listening
+++ ++ */
+++ ++ group = iommu_group_get(dev);
+++ ++ if (!group)
+++ ++ return 0;
+++ ++
+++ ++ switch (action) {
+++ ++ case BUS_NOTIFY_BIND_DRIVER:
+++ ++ group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
+++ ++ break;
+++ ++ case BUS_NOTIFY_BOUND_DRIVER:
+++ ++ group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
+++ ++ break;
+++ ++ case BUS_NOTIFY_UNBIND_DRIVER:
+++ ++ group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
+++ ++ break;
+++ ++ case BUS_NOTIFY_UNBOUND_DRIVER:
+++ ++ group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
+++ ++ break;
+++ ++ }
--- -- if (action == BUS_NOTIFY_ADD_DEVICE)
--- -- return add_iommu_group(dev, NULL);
--- -- else if (action == BUS_NOTIFY_DEL_DEVICE)
--- -- return remove_iommu_group(dev);
+++ ++ if (group_action)
+++ ++ blocking_notifier_call_chain(&group->notifier,
+++ ++ group_action, dev);
+++ ++ iommu_group_put(group);
return 0;
}
--- --static struct notifier_block iommu_device_nb = {
--- -- .notifier_call = iommu_device_notifier,
+++ ++static struct notifier_block iommu_bus_nb = {
+++ ++ .notifier_call = iommu_bus_notifier,
};
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
--- -- bus_register_notifier(bus, &iommu_device_nb);
--- -- bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
+++ ++ bus_register_notifier(bus, &iommu_bus_nb);
+++ ++ bus_for_each_dev(bus, NULL, ops, add_iommu_group);
}
/**
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
+++ ++/*
+++ ++ * IOMMU groups are really the natrual working unit of the IOMMU, but
+++ ++ * the IOMMU API works on domains and devices. Bridge that gap by
+++ ++ * iterating over the devices in a group. Ideally we'd have a single
+++ ++ * device which represents the requestor ID of the group, but we also
+++ ++ * allow IOMMU drivers to create policy defined minimum sets, where
+++ ++ * the physical hardware may be able to distiguish members, but we
+++ ++ * wish to group them at a higher level (ex. untrusted multi-function
+++ ++ * PCI devices). Thus we attach each device.
+++ ++ */
+++ ++static int iommu_group_do_attach_device(struct device *dev, void *data)
+++ ++{
+++ ++ struct iommu_domain *domain = data;
+++ ++
+++ ++ return iommu_attach_device(domain, dev);
+++ ++}
+++ ++
+++ ++int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+++ ++{
+++ ++ return iommu_group_for_each_dev(group, domain,
+++ ++ iommu_group_do_attach_device);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_attach_group);
+++ ++
+++ ++static int iommu_group_do_detach_device(struct device *dev, void *data)
+++ ++{
+++ ++ struct iommu_domain *domain = data;
+++ ++
+++ ++ iommu_detach_device(domain, dev);
+++ ++
+++ ++ return 0;
+++ ++}
+++ ++
+++ ++void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+++ ++{
+++ ++ iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_detach_group);
+++ ++
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
}
EXPORT_SYMBOL_GPL(iommu_unmap);
--- --int iommu_device_group(struct device *dev, unsigned int *groupid)
+++ ++static int __init iommu_init(void)
{
--- -- if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
--- -- return dev->bus->iommu_ops->device_group(dev, groupid);
+++ ++ iommu_group_kset = kset_create_and_add("iommu_groups",
+++ ++ NULL, kernel_kobj);
+++ ++ ida_init(&iommu_group_ida);
+++ ++ mutex_init(&iommu_group_mutex);
+++ ++
+++ ++ BUG_ON(!iommu_group_kset);
+++ +
- return -ENODEV;
+++ ++ return 0;
+++ + }
-EXPORT_SYMBOL_GPL(iommu_device_group);
+++ ++subsys_initcall(iommu_init);
+++++
+++++ int iommu_domain_get_attr(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data)
+++++ {
+++++ struct iommu_domain_geometry *geometry;
+++++ int ret = 0;
+++++
+++++ switch (attr) {
+++++ case DOMAIN_ATTR_GEOMETRY:
+++++ geometry = data;
+++++ *geometry = domain->geometry;
+++++
+++++ break;
+++++ default:
+++++ if (!domain->ops->domain_get_attr)
+++++ return -EINVAL;
+++++
+++++ ret = domain->ops->domain_get_attr(domain, attr, data);
+++++ }
+++++
+++++ return ret;
+++++ }
+++++ EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
+++++
+++++ int iommu_domain_set_attr(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data)
+++++ {
+++++ if (!domain->ops->domain_set_attr)
+++++ return -EINVAL;
+
--- - return -ENODEV;
+++++ return domain->ops->domain_set_attr(domain, attr, data);
+ }
--- - EXPORT_SYMBOL_GPL(iommu_device_group);
+++++ EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
#include <linux/sched.h>
#include <linux/iommu.h>
#include <linux/io.h>
++++ +#include <linux/of.h>
++++ +#include <linux/of_iommu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <mach/iomap.h>
#include <mach/smmu.h>
++++ +#include <mach/tegra-ahb.h>
/* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K)
#define SMMU_PDE_NEXT_SHIFT 28
---- -/* AHB Arbiter Registers */
---- -#define AHB_XBAR_CTRL 0xe0
---- -#define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
---- -#define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
---- -
---- -#define SMMU_NUM_ASIDS 4
#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
#define SMMU_PAGE_SHIFT 12
#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
++++ +#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
#define SMMU_PDIR_COUNT 1024
#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
#define SMMU_ASID_DISABLE 0
#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
++++ +#define NUM_SMMU_REG_BANKS 3
++++ +
#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
* Per SMMU device - IOMMU device
*/
struct smmu_device {
---- - void __iomem *regs, *regs_ahbarb;
++++ + void __iomem *regs[NUM_SMMU_REG_BANKS];
unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */
spinlock_t lock;
char *name;
struct device *dev;
---- - int num_as;
---- - struct smmu_as *as; /* Run-time allocated array */
struct page *avp_vector_page; /* dummy page shared by all AS's */
/*
unsigned long translation_enable_1;
unsigned long translation_enable_2;
unsigned long asid_security;
++++ +
++++ + struct device_node *ahb;
++++ +
++++ + int num_as;
++++ + struct smmu_as as[0]; /* Run-time allocated array */
};
static struct smmu_device *smmu_handle; /* unique for a system */
/*
---- - * SMMU/AHB register accessors
++++ + * SMMU register accessors
*/
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{
---- - return readl(smmu->regs + offs);
---- -}
---- -static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
---- -{
---- - writel(val, smmu->regs + offs);
++++ + BUG_ON(offs < 0x10);
++++ + if (offs < 0x3c)
++++ + return readl(smmu->regs[0] + offs - 0x10);
++++ + BUG_ON(offs < 0x1f0);
++++ + if (offs < 0x200)
++++ + return readl(smmu->regs[1] + offs - 0x1f0);
++++ + BUG_ON(offs < 0x228);
++++ + if (offs < 0x284)
++++ + return readl(smmu->regs[2] + offs - 0x228);
++++ + BUG();
}
---- -static inline u32 ahb_read(struct smmu_device *smmu, size_t offs)
---- -{
---- - return readl(smmu->regs_ahbarb + offs);
---- -}
---- -static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
++++ +static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
---- - writel(val, smmu->regs_ahbarb + offs);
++++ + BUG_ON(offs < 0x10);
++++ + if (offs < 0x3c) {
++++ + writel(val, smmu->regs[0] + offs - 0x10);
++++ + return;
++++ + }
++++ + BUG_ON(offs < 0x1f0);
++++ + if (offs < 0x200) {
++++ + writel(val, smmu->regs[1] + offs - 0x1f0);
++++ + return;
++++ + }
++++ + BUG_ON(offs < 0x228);
++++ + if (offs < 0x284) {
++++ + writel(val, smmu->regs[2] + offs - 0x228);
++++ + return;
++++ + }
++++ + BUG();
}
#define VA_PAGE_TO_PA(va, page) \
FLUSH_SMMU_REGS(smmu);
}
---- -static void smmu_setup_regs(struct smmu_device *smmu)
++++ +static int smmu_setup_regs(struct smmu_device *smmu)
{
int i;
u32 val;
smmu_flush_regs(smmu, 1);
---- - val = ahb_read(smmu, AHB_XBAR_CTRL);
---- - val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
---- - AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
---- - ahb_write(smmu, val, AHB_XBAR_CTRL);
++++ + return tegra_ahb_enable_smmu(smmu->ahb);
}
static void flush_ptc_and_tlb(struct smmu_device *smmu,
#endif
/*
---- - * Caller must lock/unlock as
++++ + * Caller must not hold as->lock
*/
static int alloc_pdir(struct smmu_as *as)
{
---- - unsigned long *pdir;
---- - int pdn;
++++ + unsigned long *pdir, flags;
++++ + int pdn, err = 0;
u32 val;
struct smmu_device *smmu = as->smmu;
- if (as->pdir_page)
- return 0;
++++ + struct page *page;
++++ + unsigned int *cnt;
++++
++++ + /*
++++ + * do the allocation, then grab as->lock
++++ + */
++++ + cnt = devm_kzalloc(smmu->dev,
++++ + sizeof(cnt[0]) * SMMU_PDIR_COUNT,
++++ + GFP_KERNEL);
++++ + page = alloc_page(GFP_KERNEL | __GFP_DMA);
---- if (as->pdir_page)
---- return 0;
- as->pte_count = devm_kzalloc(smmu->dev,
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
- if (!as->pte_count) {
- dev_err(smmu->dev,
- "failed to allocate smmu_device PTE cunters\n");
- return -ENOMEM;
++++ + spin_lock_irqsave(&as->lock, flags);
+
---- as->pte_count = devm_kzalloc(smmu->dev,
--- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
---- if (!as->pte_count) {
---- dev_err(smmu->dev,
---- "failed to allocate smmu_device PTE cunters\n");
---- return -ENOMEM;
++++ + if (as->pdir_page) {
++++ + /* We raced, free the redundant */
++++ + err = -EAGAIN;
++++ + goto err_out;
}
--- as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
- - as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
---- - if (!as->pdir_page) {
---- - dev_err(smmu->dev,
---- - "failed to allocate smmu_device page directory\n");
---- - devm_kfree(smmu->dev, as->pte_count);
---- - as->pte_count = NULL;
---- - return -ENOMEM;
++++ +
++++ + if (!page || !cnt) {
++++ + dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
++++ + err = -ENOMEM;
++++ + goto err_out;
}
++++ +
++++ + as->pdir_page = page;
++++ + as->pte_count = cnt;
++++ +
SetPageReserved(as->pdir_page);
pdir = page_address(as->pdir_page);
smmu_write(smmu, val, SMMU_TLB_FLUSH);
FLUSH_SMMU_REGS(as->smmu);
++++ + spin_unlock_irqrestore(&as->lock, flags);
++++ +
return 0;
++++ +
++++ +err_out:
++++ + spin_unlock_irqrestore(&as->lock, flags);
++++ +
++++ + devm_kfree(smmu->dev, cnt);
++++ + if (page)
++++ + __free_page(page);
++++ + return err;
}
static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
---- - int i;
++++ + int i, err = -ENODEV;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
---- - struct smmu_as *tmp = &smmu->as[i];
---- -
---- - spin_lock_irqsave(&tmp->lock, flags);
---- - if (!tmp->pdir_page) {
---- - as = tmp;
---- - goto found;
++++ + as = &smmu->as[i];
++++ + if (!as->pdir_page) {
++++ + err = alloc_pdir(as);
++++ + if (!err)
++++ + goto found;
}
---- - spin_unlock_irqrestore(&tmp->lock, flags);
++++ + if (err != -EAGAIN)
++++ + break;
}
---- - dev_err(smmu->dev, "no free AS\n");
---- - return -ENODEV;
++++ + if (i == smmu->num_as)
++++ + dev_err(smmu->dev, "no free AS\n");
++++ + return err;
found:
---- - if (alloc_pdir(as) < 0)
---- - goto err_alloc_pdir;
---- -
---- - spin_lock(&smmu->lock);
++++ + spin_lock_irqsave(&smmu->lock, flags);
/* Update PDIR register */
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
FLUSH_SMMU_REGS(smmu);
---- - spin_unlock(&smmu->lock);
++++ + spin_unlock_irqrestore(&smmu->lock, flags);
---- - spin_unlock_irqrestore(&as->lock, flags);
domain->priv = as;
+++++ domain->geometry.aperture_start = smmu->iovmm_base;
+++++ domain->geometry.aperture_end = smmu->iovmm_base +
+++++ smmu->page_count * SMMU_PAGE_SIZE - 1;
+++++ domain->geometry.force_aperture = true;
+++++
dev_dbg(smmu->dev, "smmu_as@%p\n", as);
---- - return 0;
---- -err_alloc_pdir:
---- - spin_unlock_irqrestore(&as->lock, flags);
---- - return -ENODEV;
++++ + return 0;
}
static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
unsigned long flags;
++++ + int err;
spin_lock_irqsave(&smmu->lock, flags);
---- - smmu_setup_regs(smmu);
++++ + err = smmu_setup_regs(smmu);
spin_unlock_irqrestore(&smmu->lock, flags);
---- - return 0;
++++ + return err;
}
static int tegra_smmu_probe(struct platform_device *pdev)
{
struct smmu_device *smmu;
---- - struct resource *regs, *regs2, *window;
struct device *dev = &pdev->dev;
---- - int i, err = 0;
++++ + int i, asids, err = 0;
++++ + dma_addr_t uninitialized_var(base);
++++ + size_t bytes, uninitialized_var(size);
if (smmu_handle)
return -EIO;
BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
---- - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
---- - regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
---- - window = platform_get_resource(pdev, IORESOURCE_MEM, 2);
---- - if (!regs || !regs2 || !window) {
---- - dev_err(dev, "No SMMU resources\n");
++++ + if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
return -ENODEV;
---- - }
---- - smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
++++ + bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
++++ + smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate smmu_device\n");
return -ENOMEM;
}
---- - smmu->dev = dev;
---- - smmu->num_as = SMMU_NUM_ASIDS;
---- - smmu->iovmm_base = (unsigned long)window->start;
---- - smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT;
---- - smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs));
---- - smmu->regs_ahbarb = devm_ioremap(dev, regs2->start,
---- - resource_size(regs2));
---- - if (!smmu->regs || !smmu->regs_ahbarb) {
---- - dev_err(dev, "failed to remap SMMU registers\n");
---- - err = -ENXIO;
---- - goto fail;
++++ + for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
++++ + struct resource *res;
++++ +
++++ + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
++++ + if (!res)
++++ + return -ENODEV;
++++ + smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
++++ + if (!smmu->regs[i])
++++ + return -EBUSY;
}
++++ + err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
++++ + if (err)
++++ + return -ENODEV;
++++ +
++++ + if (size & SMMU_PAGE_MASK)
++++ + return -EINVAL;
++++ +
++++ + size >>= SMMU_PAGE_SHIFT;
++++ + if (!size)
++++ + return -EINVAL;
++++ +
++++ + smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
++++ + if (!smmu->ahb)
++++ + return -ENODEV;
++++ +
++++ + smmu->dev = dev;
++++ + smmu->num_as = asids;
++++ + smmu->iovmm_base = base;
++++ + smmu->page_count = size;
++++ +
smmu->translation_enable_0 = ~0;
smmu->translation_enable_1 = ~0;
smmu->translation_enable_2 = ~0;
smmu->asid_security = 0;
---- - smmu->as = devm_kzalloc(dev,
---- - sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
---- - if (!smmu->as) {
---- - dev_err(dev, "failed to allocate smmu_as\n");
---- - err = -ENOMEM;
---- - goto fail;
---- - }
---- -
for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *as = &smmu->as[i];
INIT_LIST_HEAD(&as->client);
}
spin_lock_init(&smmu->lock);
---- - smmu_setup_regs(smmu);
++++ + err = smmu_setup_regs(smmu);
++++ + if (err)
++++ + return err;
platform_set_drvdata(pdev, smmu);
smmu->avp_vector_page = alloc_page(GFP_KERNEL);
if (!smmu->avp_vector_page)
---- - goto fail;
++++ + return -ENOMEM;
smmu_handle = smmu;
return 0;
---- -
---- -fail:
---- - if (smmu->avp_vector_page)
---- - __free_page(smmu->avp_vector_page);
---- - if (smmu->regs)
---- - devm_iounmap(dev, smmu->regs);
---- - if (smmu->regs_ahbarb)
---- - devm_iounmap(dev, smmu->regs_ahbarb);
---- - if (smmu && smmu->as) {
---- - for (i = 0; i < smmu->num_as; i++) {
---- - if (smmu->as[i].pdir_page) {
---- - ClearPageReserved(smmu->as[i].pdir_page);
---- - __free_page(smmu->as[i].pdir_page);
---- - }
---- - }
---- - devm_kfree(dev, smmu->as);
---- - }
---- - devm_kfree(dev, smmu);
---- - return err;
}
static int tegra_smmu_remove(struct platform_device *pdev)
{
struct smmu_device *smmu = platform_get_drvdata(pdev);
---- - struct device *dev = smmu->dev;
++++ + int i;
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
---- - platform_set_drvdata(pdev, NULL);
---- - if (smmu->as) {
---- - int i;
---- -
---- - for (i = 0; i < smmu->num_as; i++)
---- - free_pdir(&smmu->as[i]);
---- - devm_kfree(dev, smmu->as);
---- - }
---- - if (smmu->avp_vector_page)
---- - __free_page(smmu->avp_vector_page);
---- - if (smmu->regs)
---- - devm_iounmap(dev, smmu->regs);
---- - if (smmu->regs_ahbarb)
---- - devm_iounmap(dev, smmu->regs_ahbarb);
---- - devm_kfree(dev, smmu);
++++ + for (i = 0; i < smmu->num_as; i++)
++++ + free_pdir(&smmu->as[i]);
++++ + __free_page(smmu->avp_vector_page);
smmu_handle = NULL;
return 0;
}
.resume = tegra_smmu_resume,
};
++++ +#ifdef CONFIG_OF
++++ +static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
++++ + { .compatible = "nvidia,tegra30-smmu", },
++++ + { },
++++ +};
++++ +MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
++++ +#endif
++++ +
static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe,
.remove = tegra_smmu_remove,
.owner = THIS_MODULE,
.name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops,
++++ + .of_match_table = of_match_ptr(tegra_smmu_of_match),
},
};
MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
++++ +MODULE_ALIAS("platform:tegra-smmu");
MODULE_LICENSE("GPL v2");
if (target_state == PCI_POWER_ERROR)
return -EIO;
- - /* Some devices mustn't be in D3 during system sleep */
- - if (target_state == PCI_D3hot &&
- - (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
- - return 0;
- -
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
}
/**
+++ ++ * pci_acs_enabled - test ACS against required flags for a given device
+++ ++ * @pdev: device to test
+++ ++ * @acs_flags: required PCI ACS flags
+++ ++ *
+++ ++ * Return true if the device supports the provided flags. Automatically
+++ ++ * filters out flags that are not implemented on multifunction devices.
+++ ++ */
+++ ++bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+++ ++{
+++ ++ int pos, ret;
+++ ++ u16 ctrl;
+++ ++
+++ ++ ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
+++ ++ if (ret >= 0)
+++ ++ return ret > 0;
+++ ++
+++ ++ if (!pci_is_pcie(pdev))
+++ ++ return false;
+++ ++
+++ ++ /* Filter out flags not applicable to multifunction */
+++ ++ if (pdev->multifunction)
+++ ++ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
+++ ++ PCI_ACS_EC | PCI_ACS_DT);
+++ ++
+++ ++ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM ||
+++ ++ pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+++ ++ pdev->multifunction) {
+++ ++ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+++ ++ if (!pos)
+++ ++ return false;
+++ ++
+++ ++ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+++ ++ if ((ctrl & acs_flags) != acs_flags)
+++ ++ return false;
+++ ++ }
+++ ++
+++ ++ return true;
+++ ++}
+++ ++
+++ ++/**
+++ ++ * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
+++ ++ * @start: starting downstream device
+++ ++ * @end: ending upstream device or NULL to search to the root bus
+++ ++ * @acs_flags: required flags
+++ ++ *
+++ ++ * Walk up a device tree from start to end testing PCI ACS support. If
+++ ++ * any step along the way does not support the required flags, return false.
+++ ++ */
+++ ++bool pci_acs_path_enabled(struct pci_dev *start,
+++ ++ struct pci_dev *end, u16 acs_flags)
+++ ++{
+++ ++ struct pci_dev *pdev, *parent = start;
+++ ++
+++ ++ do {
+++ ++ pdev = parent;
+++ ++
+++ ++ if (!pci_acs_enabled(pdev, acs_flags))
+++ ++ return false;
+++ ++
+++ ++ if (pci_is_root_bus(pdev->bus))
+++ ++ return (end == NULL);
+++ ++
+++ ++ parent = pdev->bus->self;
+++ ++ } while (pdev != end);
+++ ++
+++ ++ return true;
+++ ++}
+++ ++
+++ ++/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
- -/*
- - * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
- - * ASUS motherboards will cause memory corruption or a system crash
- - * if they are in D3 while the system is put into S3 sleep.
- - */
- -static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
- -{
- - const char *sys_info;
- - static const char good_Asus_board[] = "P8Z68-V";
- -
- - if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
- - return;
- - if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
- - return;
- - sys_info = dmi_get_system_info(DMI_BOARD_NAME);
- - if (sys_info && memcmp(sys_info, good_Asus_board,
- - sizeof(good_Asus_board) - 1) == 0)
- - return;
- -
- - dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
- - dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
- - device_set_wakeup_capable(&dev->dev, false);
- -}
- -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
- -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
- -
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
return -ENOTTY;
}
+++ ++
+++ ++static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
+++ ++{
+++ ++ if (!PCI_FUNC(dev->devfn))
+++ ++ return pci_dev_get(dev);
+++ ++
+++ ++ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+++ ++}
+++ ++
+++ ++static const struct pci_dev_dma_source {
+++ ++ u16 vendor;
+++ ++ u16 device;
+++ ++ struct pci_dev *(*dma_source)(struct pci_dev *dev);
+++ ++} pci_dev_dma_source[] = {
+++ ++ /*
+++ ++ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+++ ++ *
+++ ++ * Some Ricoh devices use the function 0 source ID for DMA on
+++ ++ * other functions of a multifunction device. The DMA devices
+++ ++ * is therefore function 0, which will have implications of the
+++ ++ * iommu grouping of these devices.
+++ ++ */
+++ ++ { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
+++ ++ { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
+++ ++ { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
+++ ++ { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
+++ ++ { 0 }
+++ ++};
+++ ++
+++ ++/*
+++ ++ * IOMMUs with isolation capabilities need to be programmed with the
+++ ++ * correct source ID of a device. In most cases, the source ID matches
+++ ++ * the device doing the DMA, but sometimes hardware is broken and will
+++ ++ * tag the DMA as being sourced from a different device. This function
+++ ++ * allows that translation. Note that the reference count of the
+++ ++ * returned device is incremented on all paths.
+++ ++ */
+++ ++struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
+++ ++{
+++ ++ const struct pci_dev_dma_source *i;
+++ ++
+++ ++ for (i = pci_dev_dma_source; i->dma_source; i++) {
+++ ++ if ((i->vendor == dev->vendor ||
+++ ++ i->vendor == (u16)PCI_ANY_ID) &&
+++ ++ (i->device == dev->device ||
+++ ++ i->device == (u16)PCI_ANY_ID))
+++ ++ return i->dma_source(dev);
+++ ++ }
+++ ++
+++ ++ return pci_dev_get(dev);
+++ ++}
+++ ++
+++ ++static const struct pci_dev_acs_enabled {
+++ ++ u16 vendor;
+++ ++ u16 device;
+++ ++ int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
+++ ++} pci_dev_acs_enabled[] = {
+++ ++ { 0 }
+++ ++};
+++ ++
+++ ++int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
+++ ++{
+++ ++ const struct pci_dev_acs_enabled *i;
+++ ++ int ret;
+++ ++
+++ ++ /*
+++ ++ * Allow devices that do not expose standard PCIe ACS capabilities
+++ ++ * or control to indicate their support here. Multi-function express
+++ ++ * devices which do not allow internal peer-to-peer between functions,
+++ ++ * but do not implement PCIe ACS may wish to return true here.
+++ ++ */
+++ ++ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
+++ ++ if ((i->vendor == dev->vendor ||
+++ ++ i->vendor == (u16)PCI_ANY_ID) &&
+++ ++ (i->device == dev->device ||
+++ ++ i->device == (u16)PCI_ANY_ID)) {
+++ ++ ret = i->acs_enabled(dev, acs_flags);
+++ ++ if (ret >= 0)
+++ ++ return ret;
+++ ++ }
+++ ++ }
+++ ++
+++ ++ return -ENOTTY;
+++ ++}
struct bus_type;
struct device_node;
struct iommu_ops;
+++ ++struct iommu_group;
struct bus_attribute {
struct attribute attr;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
+++ ++ struct iommu_group *iommu_group;
};
/* Get the wakeup routines, which depend on struct device */
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
-----extern void wait_for_device_probe(void);
-----
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
extern int devtmpfs_delete_node(struct device *dev);
#define IOMMU_CACHE (4) /* DMA cache coherency */
struct iommu_ops;
+++ ++struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
+++++ struct iommu_domain_geometry {
+++++ dma_addr_t aperture_start; /* First address that can be mapped */
+++++ dma_addr_t aperture_end; /* Last address that can be mapped */
+++++ bool force_aperture; /* DMA only allowed in mappable range? */
+++++ };
+++++
struct iommu_domain {
struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
void *handler_token;
+++++ struct iommu_domain_geometry geometry;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
+++++ enum iommu_attr {
+++++ DOMAIN_ATTR_MAX,
+++++ DOMAIN_ATTR_GEOMETRY,
+++++ };
+++++
#ifdef CONFIG_IOMMU_API
/**
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @iova_to_phys: translate iova to physical address
* @domain_has_cap: domain capabilities query
----- * @commit: commit iommu domain
+++ ++ * @add_device: add device to iommu grouping
+++ ++ * @remove_device: remove device from iommu grouping
+++++ * @domain_get_attr: Query domain attributes
+++++ * @domain_set_attr: Change domain attributes
* @pgsize_bitmap: bitmap of supported page sizes
*/
struct iommu_ops {
unsigned long iova);
int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap);
+++ ++ int (*add_device)(struct device *dev);
+++ ++ void (*remove_device)(struct device *dev);
+ int (*device_group)(struct device *dev, unsigned int *groupid);
+++++ int (*domain_get_attr)(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data);
+++++ int (*domain_set_attr)(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data);
unsigned long pgsize_bitmap;
};
+++ ++#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
+++ ++#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+++ ++#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
+++ ++#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
+++ ++#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
+++ ++#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
+++ ++
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
--- --extern int iommu_device_group(struct device *dev, unsigned int *groupid);
+++ ++
+++ ++extern int iommu_attach_group(struct iommu_domain *domain,
+++ ++ struct iommu_group *group);
+++ ++extern void iommu_detach_group(struct iommu_domain *domain,
+++ ++ struct iommu_group *group);
+++ ++extern struct iommu_group *iommu_group_alloc(void);
+++ ++extern void *iommu_group_get_iommudata(struct iommu_group *group);
+++ ++extern void iommu_group_set_iommudata(struct iommu_group *group,
+++ ++ void *iommu_data,
+++ ++ void (*release)(void *iommu_data));
+++ ++extern int iommu_group_set_name(struct iommu_group *group, const char *name);
+++ ++extern int iommu_group_add_device(struct iommu_group *group,
+++ ++ struct device *dev);
+++ ++extern void iommu_group_remove_device(struct device *dev);
+++ ++extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+++ ++ int (*fn)(struct device *, void *));
+++ ++extern struct iommu_group *iommu_group_get(struct device *dev);
+++ ++extern void iommu_group_put(struct iommu_group *group);
+++ ++extern int iommu_group_register_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb);
+++ ++extern int iommu_group_unregister_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb);
+++ ++extern int iommu_group_id(struct iommu_group *group);
+++ +
+++++ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
+++++ void *data);
+++++ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
+++++ void *data);
+
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
+++ ++struct iommu_group {};
static inline bool iommu_present(struct bus_type *bus)
{
{
}
--- --static inline int iommu_device_group(struct device *dev, unsigned int *groupid)
+++ ++int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+++ ++{
+++ ++ return -ENODEV;
+++ ++}
+++ ++
+++ ++void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+++ ++{
+++ ++}
+++ ++
+++ ++struct iommu_group *iommu_group_alloc(void)
+++ ++{
+++ ++ return ERR_PTR(-ENODEV);
+++ ++}
+++ ++
+++ ++void *iommu_group_get_iommudata(struct iommu_group *group)
+++ ++{
+++ ++ return NULL;
+++ ++}
+++ ++
+++ ++void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
+++ ++ void (*release)(void *iommu_data))
+++ ++{
+++ ++}
+++ ++
+++ ++int iommu_group_set_name(struct iommu_group *group, const char *name)
+++ ++{
+++ ++ return -ENODEV;
+++ ++}
+++ ++
+++ ++int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+++ ++{
+++ ++ return -ENODEV;
+++ ++}
+++ ++
+++ ++void iommu_group_remove_device(struct device *dev)
+++ ++{
+++ ++}
+++ ++
+++ ++int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+++ ++ int (*fn)(struct device *, void *))
+++ ++{
+++ ++ return -ENODEV;
+++ ++}
+++ ++
+++ ++struct iommu_group *iommu_group_get(struct device *dev)
+++ ++{
+++ ++ return NULL;
+++ ++}
+++ ++
+++ ++void iommu_group_put(struct iommu_group *group)
+++ ++{
+++ ++}
+++ ++
+++ ++int iommu_group_register_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+++ ++int iommu_group_unregister_notifier(struct iommu_group *group,
+++ ++ struct notifier_block *nb)
+++ ++{
+++ ++ return 0;
+++ ++}
+++ ++
+++ ++int iommu_group_id(struct iommu_group *group)
+++ + {
+++ + return -ENODEV;
+++ + }
+++++
+++++ static inline int iommu_domain_get_attr(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data)
+++++ {
+++++ return -EINVAL;
+++++ }
+++++
+++++ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
+++++ enum iommu_attr attr, void *data)
+++++ {
+++++ return -EINVAL;
+++++ }
+++++
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
/* Provide indication device is assigned by a Virtual Machine Manager */
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
- - /* Device causes system crash if in D3 during S3 sleep */
- - PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
};
enum pci_irq_reroute_variant {
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+++ ++/*
+++ ++ * Translate above to generic errno for passing back through non-pci.
+++ ++ */
+++ ++static inline int pcibios_err_to_errno(int err)
+++ ++{
+++ ++ if (err <= PCIBIOS_SUCCESSFUL)
+++ ++ return err; /* Assume already errno */
+++ ++
+++ ++ switch (err) {
+++ ++ case PCIBIOS_FUNC_NOT_SUPPORTED:
+++ ++ return -ENOENT;
+++ ++ case PCIBIOS_BAD_VENDOR_ID:
+++ ++ return -EINVAL;
+++ ++ case PCIBIOS_DEVICE_NOT_FOUND:
+++ ++ return -ENODEV;
+++ ++ case PCIBIOS_BAD_REGISTER_NUMBER:
+++ ++ return -EFAULT;
+++ ++ case PCIBIOS_SET_FAILED:
+++ ++ return -EIO;
+++ ++ case PCIBIOS_BUFFER_TOO_SMALL:
+++ ++ return -ENOSPC;
+++ ++ }
+++ ++
+++ ++ return -ENOTTY;
+++ ++}
+++ ++
/* Low-level architecture-dependent routines */
struct pci_ops {
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
+++ ++/* user-space driven config access */
+++ ++int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
+++ ++int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
+++ ++int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
+++ ++int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
+++ ++int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
+++ ++int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
+++ ++
int __must_check pci_enable_device(struct pci_dev *dev);
int __must_check pci_enable_device_io(struct pci_dev *dev);
int __must_check pci_enable_device_mem(struct pci_dev *dev);
static inline int pci_domain_nr(struct pci_bus *bus)
{ return 0; }
+++ ++static inline struct pci_dev *pci_dev_get(struct pci_dev *dev)
+++ ++{ return NULL; }
+++ ++
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
#define dev_num_vf(d) (0)
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+++ ++struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
+++ ++int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) {}
+++ ++static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
+++ ++{
+++ ++ return pci_dev_get(dev);
+++ ++}
+++ ++static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+++ ++ u16 acs_flags)
+++ ++{
+++ ++ return -ENOTTY;
+++ ++}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
}
void pci_request_acs(void);
--- --
+++ ++bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+++ ++bool pci_acs_path_enabled(struct pci_dev *start,
+++ ++ struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)