else if (cells != 1)
dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
else
- bypass = false;
+ ret = 0;
+
+ parse_driver_options(smmu);
+
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return ret;
+ }
+
+ static int arm_smmu_device_probe(struct platform_device *pdev)
+ {
+ int irq, ret;
+ struct resource *res;
+++++++ resource_size_t ioaddr;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
return ret;
/* And we're up. Go go go! */
- ----- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
- #ifdef CONFIG_PCI
- pci_request_acs();
- ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+++++++ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+++++++ "smmu3.%pa", &ioaddr);
+ +++++ if (ret)
+ +++++ return ret;
+++++++
+++++++ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+++++++ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+++++++
+++++++ ret = iommu_device_register(&smmu->iommu);
+
+ #ifdef CONFIG_PCI
+ if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
+ pci_request_acs();
+ ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
#ifdef CONFIG_ARM_AMBA
- ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (amba_bustype.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
- return bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
- cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
- if (cttw_dt)
- smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
- if (cttw_dt || cttw_reg)
+ if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
- cttw_dt ? "" : "non-");
- if (cttw_dt != cttw_reg)
+ cttw_fw ? "" : "non-");
+ if (cttw_fw != cttw_reg)
dev_notice(smmu->dev,
- "\t(IDR0.CTTW overridden by dma-coherent property)\n");
+ "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
--- ---- size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+++ ++++ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+++ ++++ smmu->features |= ARM_SMMU_FEAT_EXIDS;
+++ ++++ size = 1 << 16;
+++ ++++ } else {
+++ ++++ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+++ ++++ }
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
--- ---- u32 smr;
--- ----
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
if (size == 0) {
return -ENODEV;
}
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+ }
+
+ static int arm_smmu_device_probe(struct platform_device *pdev)
+ {
+ struct resource *res;
+++++++ resource_size_t ioaddr;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ int num_irqs, i, err;
+
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate arm_smmu_device\n");
}
smmu->dev = dev;
- data = of_device_get_match_data(dev);
- smmu->version = data->version;
- smmu->model = data->model;
+ if (dev->of_node)
+ err = arm_smmu_device_dt_probe(pdev, smmu);
+ else
+ err = arm_smmu_device_acpi_probe(pdev, smmu);
+
+ if (err)
+ return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+++++++ ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
}
}
- ----- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+++++++ err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+++++++ "smmu.%pa", &ioaddr);
+++++++ if (err) {
+++++++ dev_err(dev, "Failed to register iommu in sysfs\n");
+++++++ return err;
+++++++ }
+++++++
+++++++ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+++++++ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+++++++
+++++++ err = iommu_device_register(&smmu->iommu);
+++++++ if (err) {
+++++++ dev_err(dev, "Failed to register iommu\n");
+++++++ return err;
+++++++ }
+++++++
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
+++ ++++ arm_smmu_test_smr_masks(smmu);
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type))
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- - acpi_put_table(dmar_tbl);
- early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
-- - dmar_tbl = NULL;
++ + if (dmar_tbl) {
++ + acpi_put_table(dmar_tbl);
++ + dmar_tbl = NULL;
++ + }
up_write(&dmar_global_lock);
return ret ? 1 : -ENODEV;
pm_runtime_enable(dev);
--- --- of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
- iommu_register_instance(dev->fwnode, &exynos_iommu_ops);
-------
return 0;
}
iommu_group_remove_device(dev);
------- iommu_device_unlink(iommu->iommu_dev, dev);
+++++++ iommu_device_unlink(&iommu->iommu, dev);
+++++ + }
+++++ +
+++++ ++static void intel_iommu_get_resv_regions(struct device *device,
+++++ ++ struct list_head *head)
+++++ ++{
+++++ ++ struct iommu_resv_region *reg;
+++++ ++ struct dmar_rmrr_unit *rmrr;
+++++ ++ struct device *i_dev;
+++++ ++ int i;
+++++ ++
+++++ ++ rcu_read_lock();
+++++ ++ for_each_rmrr_units(rmrr) {
+++++ ++ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+++++ ++ i, i_dev) {
+++++ ++ if (i_dev != device)
+++++ ++ continue;
+++++ ++
+++++ ++ list_add_tail(&rmrr->resv->list, head);
+++++ ++ }
+++++ ++ }
+++++ ++ rcu_read_unlock();
+++++ ++
+++++ ++ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+++++ ++ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+++++ ++ 0, IOMMU_RESV_RESERVED);
+++++ ++ if (!reg)
+++++ ++ return;
+++++ ++ list_add_tail(®->list, head);
+++++ ++}
+++++ ++
+++++ ++static void intel_iommu_put_resv_regions(struct device *dev,
+++++ ++ struct list_head *head)
+++++ ++{
+++++ ++ struct iommu_resv_region *entry, *next;
+++++ ++
+++++ ++ list_for_each_entry_safe(entry, next, head, list) {
+++++ ++ if (entry->type == IOMMU_RESV_RESERVED)
+++++ ++ kfree(entry);
+++++ ++ }
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
+ +#define MAX_NR_PASID_BITS (20)
+ +static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+ +{
+ + /*
+ + * Convert ecap_pss to extend context entry pts encoding, also
+ + * respect the soft pasid_max value set by the iommu.
+ + * - number of PASID bits = ecap_pss + 1
+ + * - number of PASID table entries = 2^(pts + 5)
+ + * Therefore, pts = ecap_pss - 4
+ + * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
+ + */
+ + if (ecap_pss(iommu->ecap) < 5)
+ + return 0;
+ +
+ + /* pasid_max is encoded as actual number of entries not the bits */
+ + return find_first_bit((unsigned long *)&iommu->pasid_max,
+ + MAX_NR_PASID_BITS) - 5;
+ +}
+ +
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{
struct device_domain_info *info;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
------- static const struct iommu_ops intel_iommu_ops = {
----- - .capable = intel_iommu_capable,
----- - .domain_alloc = intel_iommu_domain_alloc,
----- - .domain_free = intel_iommu_domain_free,
----- - .attach_dev = intel_iommu_attach_device,
----- - .detach_dev = intel_iommu_detach_device,
----- - .map = intel_iommu_map,
----- - .unmap = intel_iommu_unmap,
----- - .map_sg = default_iommu_map_sg,
----- - .iova_to_phys = intel_iommu_iova_to_phys,
----- - .add_device = intel_iommu_add_device,
----- - .remove_device = intel_iommu_remove_device,
----- - .device_group = pci_device_group,
----- - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+++++++ const struct iommu_ops intel_iommu_ops = {
- .capable = intel_iommu_capable,
- .domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
- .device_group = pci_device_group,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+++++ ++ .capable = intel_iommu_capable,
+++++ ++ .domain_alloc = intel_iommu_domain_alloc,
+++++ ++ .domain_free = intel_iommu_domain_free,
+++++ ++ .attach_dev = intel_iommu_attach_device,
+++++ ++ .detach_dev = intel_iommu_detach_device,
+++++ ++ .map = intel_iommu_map,
+++++ ++ .unmap = intel_iommu_unmap,
+++++ ++ .map_sg = default_iommu_map_sg,
+++++ ++ .iova_to_phys = intel_iommu_iova_to_phys,
+++++ ++ .add_device = intel_iommu_add_device,
+++++ ++ .remove_device = intel_iommu_remove_device,
+++++ ++ .get_resv_regions = intel_iommu_get_resv_regions,
+++++ ++ .put_resv_regions = intel_iommu_put_resv_regions,
+++++ ++ .device_group = pci_device_group,
+++++ ++ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
return sprintf(buf, "%s\n", group->name);
}
- struct iommu_device *device;
+++++ ++/**
+++++ ++ * iommu_insert_resv_region - Insert a new region in the
+++++ ++ * list of reserved regions.
+++++ ++ * @new: new region to insert
+++++ ++ * @regions: list of regions
+++++ ++ *
+++++ ++ * The new element is sorted by address with respect to the other
+++++ ++ * regions of the same type. In case it overlaps with another
+++++ ++ * region of the same type, regions are merged. In case it
+++++ ++ * overlaps with another region of different type, regions are
+++++ ++ * not merged.
+++++ ++ */
+++++ ++static int iommu_insert_resv_region(struct iommu_resv_region *new,
+++++ ++ struct list_head *regions)
+++++ ++{
+++++ ++ struct iommu_resv_region *region;
+++++ ++ phys_addr_t start = new->start;
+++++ ++ phys_addr_t end = new->start + new->length - 1;
+++++ ++ struct list_head *pos = regions->next;
+++++ ++
+++++ ++ while (pos != regions) {
+++++ ++ struct iommu_resv_region *entry =
+++++ ++ list_entry(pos, struct iommu_resv_region, list);
+++++ ++ phys_addr_t a = entry->start;
+++++ ++ phys_addr_t b = entry->start + entry->length - 1;
+++++ ++ int type = entry->type;
+++++ ++
+++++ ++ if (end < a) {
+++++ ++ goto insert;
+++++ ++ } else if (start > b) {
+++++ ++ pos = pos->next;
+++++ ++ } else if ((start >= a) && (end <= b)) {
+++++ ++ if (new->type == type)
+++++ ++ goto done;
+++++ ++ else
+++++ ++ pos = pos->next;
+++++ ++ } else {
+++++ ++ if (new->type == type) {
+++++ ++ phys_addr_t new_start = min(a, start);
+++++ ++ phys_addr_t new_end = max(b, end);
+++++ ++
+++++ ++ list_del(&entry->list);
+++++ ++ entry->start = new_start;
+++++ ++ entry->length = new_end - new_start + 1;
+++++ ++ iommu_insert_resv_region(entry, regions);
+++++ ++ } else {
+++++ ++ pos = pos->next;
+++++ ++ }
+++++ ++ }
+++++ ++ }
+++++ ++insert:
+++++ ++ region = iommu_alloc_resv_region(new->start, new->length,
+++++ ++ new->prot, new->type);
+++++ ++ if (!region)
+++++ ++ return -ENOMEM;
+++++ ++
+++++ ++ list_add_tail(®ion->list, pos);
+++++ ++done:
+++++ ++ return 0;
+++++ ++}
+++++ ++
+++++ ++static int
+++++ ++iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
+++++ ++ struct list_head *group_resv_regions)
+++++ ++{
+++++ ++ struct iommu_resv_region *entry;
+++++ ++ int ret = 0;
+++++ ++
+++++ ++ list_for_each_entry(entry, dev_resv_regions, list) {
+++++ ++ ret = iommu_insert_resv_region(entry, group_resv_regions);
+++++ ++ if (ret)
+++++ ++ break;
+++++ ++ }
+++++ ++ return ret;
+++++ ++}
+++++ ++
+++++ ++int iommu_get_group_resv_regions(struct iommu_group *group,
+++++ ++ struct list_head *head)
+++++ ++{
++++++++ struct group_device *device;
+++++ ++ int ret = 0;
+++++ ++
+++++ ++ mutex_lock(&group->mutex);
+++++ ++ list_for_each_entry(device, &group->devices, list) {
+++++ ++ struct list_head dev_resv_regions;
+++++ ++
+++++ ++ INIT_LIST_HEAD(&dev_resv_regions);
+++++ ++ iommu_get_resv_regions(device->dev, &dev_resv_regions);
+++++ ++ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
+++++ ++ iommu_put_resv_regions(device->dev, &dev_resv_regions);
+++++ ++ if (ret)
+++++ ++ break;
+++++ ++ }
+++++ ++ mutex_unlock(&group->mutex);
+++++ ++ return ret;
+++++ ++}
+++++ ++EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
+++++ ++
+++++ ++static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
+++++ ++ char *buf)
+++++ ++{
+++++ ++ struct iommu_resv_region *region, *next;
+++++ ++ struct list_head group_resv_regions;
+++++ ++ char *str = buf;
+++++ ++
+++++ ++ INIT_LIST_HEAD(&group_resv_regions);
+++++ ++ iommu_get_group_resv_regions(group, &group_resv_regions);
+++++ ++
+++++ ++ list_for_each_entry_safe(region, next, &group_resv_regions, list) {
+++++ ++ str += sprintf(str, "0x%016llx 0x%016llx %s\n",
+++++ ++ (long long int)region->start,
+++++ ++ (long long int)(region->start +
+++++ ++ region->length - 1),
+++++ ++ iommu_group_resv_type_string[region->type]);
+++++ ++ kfree(region);
+++++ ++ }
+++++ ++
+++++ ++ return (str - buf);
+++++ ++}
+++++ ++
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+++++ ++static IOMMU_GROUP_ATTR(reserved_regions, 0444,
+++++ ++ iommu_group_show_resv_regions, NULL);
+++++ ++
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
return ret;
}
- ----- struct iommu_instance {
- ----- struct list_head list;
- ----- struct fwnode_handle *fwnode;
- ----- const struct iommu_ops *ops;
- ----- };
- ----- static LIST_HEAD(iommu_instance_list);
- ----- static DEFINE_SPINLOCK(iommu_instance_lock);
- -----
- ----- void iommu_register_instance(struct fwnode_handle *fwnode,
- ----- const struct iommu_ops *ops)
- {
- struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
- if (WARN_ON(!iommu))
- return;
-
- of_node_get(to_of_node(fwnode));
- INIT_LIST_HEAD(&iommu->list);
- iommu->fwnode = fwnode;
- iommu->ops = ops;
- spin_lock(&iommu_instance_lock);
- list_add_tail(&iommu->list, &iommu_instance_list);
- spin_unlock(&iommu_instance_lock);
- }
-
- const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+ {
- --- - struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
- --- -
- --- - if (WARN_ON(!iommu))
- --- - return;
- --- -
- --- - of_node_get(to_of_node(fwnode));
- --- - INIT_LIST_HEAD(&iommu->list);
- --- - iommu->fwnode = fwnode;
- --- - iommu->ops = ops;
- --- - spin_lock(&iommu_instance_lock);
- --- - list_add_tail(&iommu->list, &iommu_instance_list);
- --- - spin_unlock(&iommu_instance_lock);
- --- - }
- --- -
- --- - const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
- --- - {
- ----- struct iommu_instance *instance;
+ const struct iommu_ops *ops = NULL;
+++++++ struct iommu_device *iommu;
+
- ----- spin_lock(&iommu_instance_lock);
- ----- list_for_each_entry(instance, &iommu_instance_list, list)
- ----- if (instance->fwnode == fwnode) {
- ----- ops = instance->ops;
+++++++ spin_lock(&iommu_device_lock);
+++++++ list_for_each_entry(iommu, &iommu_device_list, list)
+++++++ if (iommu->fwnode == fwnode) {
+++++++ ops = iommu->ops;
+ break;
+ }
- ----- spin_unlock(&iommu_instance_lock);
+++++++ spin_unlock(&iommu_device_lock);
+ return ops;
+ }
+
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{
"iommu-map-mask", &iommu_spec.np, iommu_spec.args))
return NULL;
--- ---- ops = of_iommu_get_ops(iommu_spec.np);
- ops = iommu_get_instance(&iommu_spec.np->fwnode);
++++++++ ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
ops->of_xlate(&pdev->dev, &iommu_spec))
"#iommu-cells", idx,
&iommu_spec)) {
np = iommu_spec.np;
--- ---- ops = of_iommu_get_ops(np);
- ops = iommu_get_instance(&np->fwnode);
++++++++ ops = iommu_ops_from_fwnode(&np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(dev, &np->fwnode, ops) ||
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
- ----- void iommu_register_instance(struct fwnode_handle *fwnode,
- ----- const struct iommu_ops *ops);
- ----- const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
+++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
return -EINVAL;
}
------- static inline struct device *iommu_device_create(struct device *parent,
------- void *drvdata,
------- const struct attribute_group **groups,
------- const char *fmt, ...)
+++++++ static inline int iommu_device_register(struct iommu_device *iommu)
+++++++ {
+++++++ return -ENODEV;
+++++++ }
+++++++
+++++++ static inline void iommu_device_set_ops(struct iommu_device *iommu,
+++++++ const struct iommu_ops *ops)
+ +++++ {
- return ERR_PTR(-ENODEV);
+ +++++ }
+ +++++
- static inline void iommu_device_destroy(struct device *dev)
+++++++ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+++++++ struct fwnode_handle *fwnode)
+++++ + {
- return ERR_PTR(-ENODEV);
+++++ + }
+++++ +
- static inline void iommu_device_destroy(struct device *dev)
+++++++ static inline void iommu_device_unregister(struct iommu_device *iommu)
+ + {
- --- - return ERR_PTR(-ENODEV);
+ + }
+ +
- --- - static inline void iommu_device_destroy(struct device *dev)
+++++++ static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
+++++++ struct device *parent,
+++++++ const struct attribute_group **groups,
+++++++ const char *fmt, ...)
+++++++ {
+++++++ return -ENODEV;
+++++++ }
+++++++
+++++++ static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
}
return -ENODEV;
}
- ----- static inline void iommu_register_instance(struct fwnode_handle *fwnode,
- ----- const struct iommu_ops *ops)
- ----- {
- ----- }
- -----
+ static inline
- ----- const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+ {
+ return NULL;
+ }
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */