From: Joerg Roedel Date: Fri, 10 Feb 2017 14:13:10 +0000 (+0100) Subject: Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek... X-Git-Tag: v4.11-rc1~158^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8d2932dd0634ebeb0a42df896976772bdb569bfe;p=platform%2Fkernel%2Flinux-exynos.git Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek', 'arm/core', 'x86/vt-d' and 'core' into next --- 8d2932dd0634ebeb0a42df896976772bdb569bfe diff --cc drivers/iommu/arm-smmu-v3.c index 4d6ec44,15c01c3,4d6ec44,947807c,4d6ec44,6cdd501,4d6ec44,5375137..5806a6a --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@@@@@@@@ -2599,23 -2576,7 -2599,23 -2592,23 -2599,23 -2620,23 -2599,23 -2607,24 +2621,24 @@@@@@@@@ static int arm_smmu_device_dt_probe(str else if (cells != 1) dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); else - bypass = false; + ret = 0; + + parse_driver_options(smmu); + + if (of_dma_is_coherent(dev->of_node)) + smmu->features |= ARM_SMMU_FEAT_COHERENCY; + + return ret; + } + + static int arm_smmu_device_probe(struct platform_device *pdev) + { + int irq, ret; + struct resource *res; +++++++ resource_size_t ioaddr; + struct arm_smmu_device *smmu; + struct device *dev = &pdev->dev; + bool bypass; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@@@@@@@@ -2682,29 -2634,19 -2682,29 -2675,29 -2682,29 -2703,29 -2682,29 -2692,37 +2706,37 @@@@@@@@@ return ret; /* And we're up. Go go go! */ - ----- iommu_register_instance(dev->fwnode, &arm_smmu_ops); - of_iommu_set_ops(dev->of_node, &arm_smmu_ops); - #ifdef CONFIG_PCI - pci_request_acs(); - ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); +++++++ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, +++++++ "smmu3.%pa", &ioaddr); + +++++ if (ret) + +++++ return ret; +++++++ +++++++ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); +++++++ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); +++++++ +++++++ ret = iommu_device_register(&smmu->iommu); + + #ifdef CONFIG_PCI + if (pci_bus_type.iommu_ops != &arm_smmu_ops) { + pci_request_acs(); + ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + if (ret) + return ret; + } #endif #ifdef CONFIG_ARM_AMBA - ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); - if (ret) - return ret; + if (amba_bustype.iommu_ops != &arm_smmu_ops) { + ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); + if (ret) + return ret; + } #endif - return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + if (platform_bus_type.iommu_ops != &arm_smmu_ops) { + ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + if (ret) + return ret; + } + return 0; } static int arm_smmu_device_remove(struct platform_device *pdev) diff --cc drivers/iommu/arm-smmu.c index a60cded,c841eb7,a60cded,d01802e,a60cded,54368f5,a60cded,8fb4af2..abf6496 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@@@@@@@@ -1726,20 -1704,23 -1726,20 -1768,23 -1726,20 -1752,20 -1726,20 -1738,20 +1806,23 @@@@@@@@@ static int arm_smmu_device_cfg_probe(st * Fortunately, this also opens up a workaround for systems where the * ID register value has ended up configured incorrectly. */ - cttw_dt = of_dma_is_coherent(smmu->dev->of_node); cttw_reg = !!(id & ID0_CTTW); - if (cttw_dt) - smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; - if (cttw_dt || cttw_reg) + if (cttw_fw || cttw_reg) dev_notice(smmu->dev, "\t%scoherent table walk\n", - cttw_dt ? "" : "non-"); - if (cttw_dt != cttw_reg) + cttw_fw ? "" : "non-"); + if (cttw_fw != cttw_reg) dev_notice(smmu->dev, - "\t(IDR0.CTTW overridden by dma-coherent property)\n"); + "\t(IDR0.CTTW overridden by FW configuration)\n"); /* Max. number of entries we have for stream matching/indexing */ --- ---- size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); +++ ++++ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { +++ ++++ smmu->features |= ARM_SMMU_FEAT_EXIDS; +++ ++++ size = 1 << 16; +++ ++++ } else { +++ ++++ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); +++ ++++ } smmu->streamid_mask = size - 1; if (id & ID0_SMS) { --- ---- u32 smr; --- ---- smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; if (size == 0) { @@@@@@@@@ -2002,19 -1915,6 -2002,19 -2031,19 -2002,19 -2028,19 -2002,19 -2014,20 +2069,20 @@@@@@@@@ static int arm_smmu_device_dt_probe(str return -ENODEV; } + if (of_dma_is_coherent(dev->of_node)) + smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; + + return 0; + } + + static int arm_smmu_device_probe(struct platform_device *pdev) + { + struct resource *res; +++++++ resource_size_t ioaddr; + struct arm_smmu_device *smmu; + struct device *dev = &pdev->dev; + int num_irqs, i, err; + smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { dev_err(dev, "failed to allocate arm_smmu_device\n"); @@@@@@@@@ -2022,15 -1922,11 -2022,15 -2051,15 -2022,15 -2048,15 -2022,15 -2035,16 +2090,16 @@@@@@@@@ } smmu->dev = dev; - data = of_device_get_match_data(dev); - smmu->version = data->version; - smmu->model = data->model; + if (dev->of_node) + err = arm_smmu_device_dt_probe(pdev, smmu); + else + err = arm_smmu_device_acpi_probe(pdev, smmu); + + if (err) + return err; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +++++++ ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); @@@@@@@@@ -2091,9 -1995,9 -2091,9 -2120,10 -2091,9 -2117,9 -2091,9 -2105,24 +2160,25 @@@@@@@@@ } } - ----- iommu_register_instance(dev->fwnode, &arm_smmu_ops); - of_iommu_set_ops(dev->of_node, &arm_smmu_ops); +++++++ err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, +++++++ "smmu.%pa", &ioaddr); +++++++ if (err) { +++++++ dev_err(dev, "Failed to register iommu in sysfs\n"); +++++++ return err; +++++++ } +++++++ +++++++ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); +++++++ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); +++++++ +++++++ err = iommu_device_register(&smmu->iommu); +++++++ if (err) { +++++++ dev_err(dev, "Failed to register iommu\n"); +++++++ return err; +++++++ } +++++++ platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); +++ ++++ arm_smmu_test_smr_masks(smmu); /* Oh, for a proper bus abstraction */ if (!iommu_present(&platform_bus_type)) diff --cc drivers/iommu/dmar.c index a88576d,58470f5,8ccbd70,8ccbd70,8ccbd70,8ccbd70,8ccbd70,fc13146..d9c0dec --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@@@@@@@@ -903,8 -904,8 -903,10 -903,10 -903,10 -903,10 -903,10 -905,8 +905,10 @@@@@@@@@ int __init detect_intel_iommu(void x86_init.iommu.iommu_init = intel_iommu_init; #endif - - acpi_put_table(dmar_tbl); - early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); -- - dmar_tbl = NULL; ++ + if (dmar_tbl) { ++ + acpi_put_table(dmar_tbl); ++ + dmar_tbl = NULL; ++ + } up_write(&dmar_global_lock); return ret ? 1 : -ENODEV; diff --cc drivers/iommu/exynos-iommu.c index 57ba0d3,fa529c2,57ba0d3,b79e4c4,57ba0d3,57ba0d3,57ba0d3,778eccc..a7e0821 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@@@@@@@@ -628,8 -629,8 -628,8 -628,8 -628,8 -628,8 -628,8 -642,6 +643,6 @@@@@@@@@ static int __init exynos_sysmmu_probe(s pm_runtime_enable(dev); --- --- of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); - iommu_register_instance(dev->fwnode, &exynos_iommu_ops); ------- return 0; } diff --cc drivers/iommu/intel-iommu.c index 8a18525,a4407ea,8a18525,8a18525,8a18525,bce59a5,5d179c8,cbe7c49..f5e02f8 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@@@@@@@@ -5200,29 -5170,10 -5200,29 -5200,29 -5200,29 -5215,68 -5201,29 -5184,10 +5219,68 @@@@@@@@@ static void intel_iommu_remove_device(s iommu_group_remove_device(dev); ------- iommu_device_unlink(iommu->iommu_dev, dev); +++++++ iommu_device_unlink(&iommu->iommu, dev); +++++ + } +++++ + +++++ ++static void intel_iommu_get_resv_regions(struct device *device, +++++ ++ struct list_head *head) +++++ ++{ +++++ ++ struct iommu_resv_region *reg; +++++ ++ struct dmar_rmrr_unit *rmrr; +++++ ++ struct device *i_dev; +++++ ++ int i; +++++ ++ +++++ ++ rcu_read_lock(); +++++ ++ for_each_rmrr_units(rmrr) { +++++ ++ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, +++++ ++ i, i_dev) { +++++ ++ if (i_dev != device) +++++ ++ continue; +++++ ++ +++++ ++ list_add_tail(&rmrr->resv->list, head); +++++ ++ } +++++ ++ } +++++ ++ rcu_read_unlock(); +++++ ++ +++++ ++ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, +++++ ++ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, +++++ ++ 0, IOMMU_RESV_RESERVED); +++++ ++ if (!reg) +++++ ++ return; +++++ ++ list_add_tail(®->list, head); +++++ ++} +++++ ++ +++++ ++static void intel_iommu_put_resv_regions(struct device *dev, +++++ ++ struct list_head *head) +++++ ++{ +++++ ++ struct iommu_resv_region *entry, *next; +++++ ++ +++++ ++ list_for_each_entry_safe(entry, next, head, list) { +++++ ++ if (entry->type == IOMMU_RESV_RESERVED) +++++ ++ kfree(entry); +++++ ++ } +} + #ifdef CONFIG_INTEL_IOMMU_SVM + +#define MAX_NR_PASID_BITS (20) + +static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) + +{ + + /* + + * Convert ecap_pss to extend context entry pts encoding, also + + * respect the soft pasid_max value set by the iommu. + + * - number of PASID bits = ecap_pss + 1 + + * - number of PASID table entries = 2^(pts + 5) + + * Therefore, pts = ecap_pss - 4 + + * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15 + + */ + + if (ecap_pss(iommu->ecap) < 5) + + return 0; + + + + /* pasid_max is encoded as actual number of entries not the bits */ + + return find_first_bit((unsigned long *)&iommu->pasid_max, + + MAX_NR_PASID_BITS) - 5; + +} + + int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) { struct device_domain_info *info; @@@@@@@@@ -5332,20 -5281,20 -5332,20 -5332,20 -5332,20 -5386,22 -5333,20 -5295,20 +5390,22 @@@@@@@@@ struct intel_iommu *intel_svm_device_to } #endif /* CONFIG_INTEL_IOMMU_SVM */ ------- static const struct iommu_ops intel_iommu_ops = { ----- - .capable = intel_iommu_capable, ----- - .domain_alloc = intel_iommu_domain_alloc, ----- - .domain_free = intel_iommu_domain_free, ----- - .attach_dev = intel_iommu_attach_device, ----- - .detach_dev = intel_iommu_detach_device, ----- - .map = intel_iommu_map, ----- - .unmap = intel_iommu_unmap, ----- - .map_sg = default_iommu_map_sg, ----- - .iova_to_phys = intel_iommu_iova_to_phys, ----- - .add_device = intel_iommu_add_device, ----- - .remove_device = intel_iommu_remove_device, ----- - .device_group = pci_device_group, ----- - .pgsize_bitmap = INTEL_IOMMU_PGSIZES, +++++++ const struct iommu_ops intel_iommu_ops = { - .capable = intel_iommu_capable, - .domain_alloc = intel_iommu_domain_alloc, - .domain_free = intel_iommu_domain_free, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map, - .unmap = intel_iommu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = intel_iommu_iova_to_phys, - .add_device = intel_iommu_add_device, - .remove_device = intel_iommu_remove_device, - .device_group = pci_device_group, - .pgsize_bitmap = INTEL_IOMMU_PGSIZES, +++++ ++ .capable = intel_iommu_capable, +++++ ++ .domain_alloc = intel_iommu_domain_alloc, +++++ ++ .domain_free = intel_iommu_domain_free, +++++ ++ .attach_dev = intel_iommu_attach_device, +++++ ++ .detach_dev = intel_iommu_detach_device, +++++ ++ .map = intel_iommu_map, +++++ ++ .unmap = intel_iommu_unmap, +++++ ++ .map_sg = default_iommu_map_sg, +++++ ++ .iova_to_phys = intel_iommu_iova_to_phys, +++++ ++ .add_device = intel_iommu_add_device, +++++ ++ .remove_device = intel_iommu_remove_device, +++++ ++ .get_resv_regions = intel_iommu_get_resv_regions, +++++ ++ .put_resv_regions = intel_iommu_put_resv_regions, +++++ ++ .device_group = pci_device_group, +++++ ++ .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; static void quirk_iommu_g4x_gfx(struct pci_dev *dev) diff --cc drivers/iommu/iommu.c index dbe7f65,9a2f196,dbe7f65,dbe7f65,dbe7f65,c37d701,dbe7f65,162d865..8ea14f4 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@@@@@@@@ -133,8 -133,8 -133,8 -133,8 -133,8 -139,131 -133,8 -152,8 +158,131 @@@@@@@@@ static ssize_t iommu_group_show_name(st return sprintf(buf, "%s\n", group->name); } +++++ ++/** +++++ ++ * iommu_insert_resv_region - Insert a new region in the +++++ ++ * list of reserved regions. +++++ ++ * @new: new region to insert +++++ ++ * @regions: list of regions +++++ ++ * +++++ ++ * The new element is sorted by address with respect to the other +++++ ++ * regions of the same type. In case it overlaps with another +++++ ++ * region of the same type, regions are merged. In case it +++++ ++ * overlaps with another region of different type, regions are +++++ ++ * not merged. +++++ ++ */ +++++ ++static int iommu_insert_resv_region(struct iommu_resv_region *new, +++++ ++ struct list_head *regions) +++++ ++{ +++++ ++ struct iommu_resv_region *region; +++++ ++ phys_addr_t start = new->start; +++++ ++ phys_addr_t end = new->start + new->length - 1; +++++ ++ struct list_head *pos = regions->next; +++++ ++ +++++ ++ while (pos != regions) { +++++ ++ struct iommu_resv_region *entry = +++++ ++ list_entry(pos, struct iommu_resv_region, list); +++++ ++ phys_addr_t a = entry->start; +++++ ++ phys_addr_t b = entry->start + entry->length - 1; +++++ ++ int type = entry->type; +++++ ++ +++++ ++ if (end < a) { +++++ ++ goto insert; +++++ ++ } else if (start > b) { +++++ ++ pos = pos->next; +++++ ++ } else if ((start >= a) && (end <= b)) { +++++ ++ if (new->type == type) +++++ ++ goto done; +++++ ++ else +++++ ++ pos = pos->next; +++++ ++ } else { +++++ ++ if (new->type == type) { +++++ ++ phys_addr_t new_start = min(a, start); +++++ ++ phys_addr_t new_end = max(b, end); +++++ ++ +++++ ++ list_del(&entry->list); +++++ ++ entry->start = new_start; +++++ ++ entry->length = new_end - new_start + 1; +++++ ++ iommu_insert_resv_region(entry, regions); +++++ ++ } else { +++++ ++ pos = pos->next; +++++ ++ } +++++ ++ } +++++ ++ } +++++ ++insert: +++++ ++ region = iommu_alloc_resv_region(new->start, new->length, +++++ ++ new->prot, new->type); +++++ ++ if (!region) +++++ ++ return -ENOMEM; +++++ ++ +++++ ++ list_add_tail(®ion->list, pos); +++++ ++done: +++++ ++ return 0; +++++ ++} +++++ ++ +++++ ++static int +++++ ++iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, +++++ ++ struct list_head *group_resv_regions) +++++ ++{ +++++ ++ struct iommu_resv_region *entry; +++++ ++ int ret = 0; +++++ ++ +++++ ++ list_for_each_entry(entry, dev_resv_regions, list) { +++++ ++ ret = iommu_insert_resv_region(entry, group_resv_regions); +++++ ++ if (ret) +++++ ++ break; +++++ ++ } +++++ ++ return ret; +++++ ++} +++++ ++ +++++ ++int iommu_get_group_resv_regions(struct iommu_group *group, +++++ ++ struct list_head *head) +++++ ++{ - struct iommu_device *device; ++++++++ struct group_device *device; +++++ ++ int ret = 0; +++++ ++ +++++ ++ mutex_lock(&group->mutex); +++++ ++ list_for_each_entry(device, &group->devices, list) { +++++ ++ struct list_head dev_resv_regions; +++++ ++ +++++ ++ INIT_LIST_HEAD(&dev_resv_regions); +++++ ++ iommu_get_resv_regions(device->dev, &dev_resv_regions); +++++ ++ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); +++++ ++ iommu_put_resv_regions(device->dev, &dev_resv_regions); +++++ ++ if (ret) +++++ ++ break; +++++ ++ } +++++ ++ mutex_unlock(&group->mutex); +++++ ++ return ret; +++++ ++} +++++ ++EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); +++++ ++ +++++ ++static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, +++++ ++ char *buf) +++++ ++{ +++++ ++ struct iommu_resv_region *region, *next; +++++ ++ struct list_head group_resv_regions; +++++ ++ char *str = buf; +++++ ++ +++++ ++ INIT_LIST_HEAD(&group_resv_regions); +++++ ++ iommu_get_group_resv_regions(group, &group_resv_regions); +++++ ++ +++++ ++ list_for_each_entry_safe(region, next, &group_resv_regions, list) { +++++ ++ str += sprintf(str, "0x%016llx 0x%016llx %s\n", +++++ ++ (long long int)region->start, +++++ ++ (long long int)(region->start + +++++ ++ region->length - 1), +++++ ++ iommu_group_resv_type_string[region->type]); +++++ ++ kfree(region); +++++ ++ } +++++ ++ +++++ ++ return (str - buf); +++++ ++} +++++ ++ static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); +++++ ++static IOMMU_GROUP_ATTR(reserved_regions, 0444, +++++ ++ iommu_group_show_resv_regions, NULL); +++++ ++ static void iommu_group_release(struct kobject *kobj) { struct iommu_group *group = to_iommu_group(kobj); @@@@@@@@@ -1628,46 -1615,6 -1628,46 -1628,46 -1628,46 -1783,46 -1628,46 -1658,21 +1813,21 @@@@@@@@@ out return ret; } - ----- struct iommu_instance { - ----- struct list_head list; - ----- struct fwnode_handle *fwnode; - ----- const struct iommu_ops *ops; - ----- }; - ----- static LIST_HEAD(iommu_instance_list); - ----- static DEFINE_SPINLOCK(iommu_instance_lock); - ----- - ----- void iommu_register_instance(struct fwnode_handle *fwnode, - ----- const struct iommu_ops *ops) - { - struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - - if (WARN_ON(!iommu)) - return; - - of_node_get(to_of_node(fwnode)); - INIT_LIST_HEAD(&iommu->list); - iommu->fwnode = fwnode; - iommu->ops = ops; - spin_lock(&iommu_instance_lock); - list_add_tail(&iommu->list, &iommu_instance_list); - spin_unlock(&iommu_instance_lock); - } - - const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) +++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) + { - --- - struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - --- - - --- - if (WARN_ON(!iommu)) - --- - return; - --- - - --- - of_node_get(to_of_node(fwnode)); - --- - INIT_LIST_HEAD(&iommu->list); - --- - iommu->fwnode = fwnode; - --- - iommu->ops = ops; - --- - spin_lock(&iommu_instance_lock); - --- - list_add_tail(&iommu->list, &iommu_instance_list); - --- - spin_unlock(&iommu_instance_lock); - --- - } - --- - - --- - const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) - --- - { - ----- struct iommu_instance *instance; + const struct iommu_ops *ops = NULL; +++++++ struct iommu_device *iommu; + - ----- spin_lock(&iommu_instance_lock); - ----- list_for_each_entry(instance, &iommu_instance_list, list) - ----- if (instance->fwnode == fwnode) { - ----- ops = instance->ops; +++++++ spin_lock(&iommu_device_lock); +++++++ list_for_each_entry(iommu, &iommu_device_list, list) +++++++ if (iommu->fwnode == fwnode) { +++++++ ops = iommu->ops; + break; + } - ----- spin_unlock(&iommu_instance_lock); +++++++ spin_unlock(&iommu_device_lock); + return ops; + } + int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops) { diff --cc drivers/iommu/of_iommu.c index 0f57ddc,5b82862,0f57ddc,d7f480a,0f57ddc,0f57ddc,0f57ddc,0f57ddc..2683e9f --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@@@@@@@@ -127,7 -166,7 -127,7 -127,7 -127,7 -127,7 -127,7 -127,7 +127,7 @@@@@@@@@ static const struct iommu_op "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) return NULL; --- ---- ops = of_iommu_get_ops(iommu_spec.np); - ops = iommu_get_instance(&iommu_spec.np->fwnode); ++++++++ ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); if (!ops || !ops->of_xlate || iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || ops->of_xlate(&pdev->dev, &iommu_spec)) @@@@@@@@@ -157,7 -196,7 -157,7 -157,7 -157,7 -157,7 -157,7 -157,7 +157,7 @@@@@@@@@ const struct iommu_ops *of_iommu_config "#iommu-cells", idx, &iommu_spec)) { np = iommu_spec.np; --- ---- ops = of_iommu_get_ops(np); - ops = iommu_get_instance(&np->fwnode); ++++++++ ops = iommu_ops_from_fwnode(&np->fwnode); if (!ops || !ops->of_xlate || iommu_fwspec_init(dev, &np->fwnode, ops) || diff --cc include/linux/iommu.h index 0ff5111,436dc21,0ff5111,0ff5111,0ff5111,add30c3,0ff5111,9e82fc8..6a6de18 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@@@@@@@@ -352,9 -351,6 -352,9 -352,9 -352,9 -371,9 -352,9 -382,7 +401,7 @@@@@@@@@ int iommu_fwspec_init(struct device *de const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); - ----- void iommu_register_instance(struct fwnode_handle *fwnode, - ----- const struct iommu_ops *ops); - ----- const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode); +++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); #else /* CONFIG_IOMMU_API */ @@@@@@@@@ -546,15 -542,15 -546,15 -546,15 -546,15 -571,15 -546,15 -575,34 +600,34 @@@@@@@@@ static inline int iommu_domain_set_attr return -EINVAL; } ------- static inline struct device *iommu_device_create(struct device *parent, ------- void *drvdata, ------- const struct attribute_group **groups, ------- const char *fmt, ...) +++++++ static inline int iommu_device_register(struct iommu_device *iommu) +++++++ { +++++++ return -ENODEV; +++++++ } +++++++ +++++++ static inline void iommu_device_set_ops(struct iommu_device *iommu, +++++++ const struct iommu_ops *ops) + +++++ { - return ERR_PTR(-ENODEV); + +++++ } + +++++ - static inline void iommu_device_destroy(struct device *dev) +++++++ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, +++++++ struct fwnode_handle *fwnode) +++++ + { - return ERR_PTR(-ENODEV); +++++ + } +++++ + - static inline void iommu_device_destroy(struct device *dev) +++++++ static inline void iommu_device_unregister(struct iommu_device *iommu) + + { - --- - return ERR_PTR(-ENODEV); + + } + + - --- - static inline void iommu_device_destroy(struct device *dev) +++++++ static inline int iommu_device_sysfs_add(struct iommu_device *iommu, +++++++ struct device *parent, +++++++ const struct attribute_group **groups, +++++++ const char *fmt, ...) +++++++ { +++++++ return -ENODEV; +++++++ } +++++++ +++++++ static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) { } @@@@@@@@@ -584,17 -580,6 -584,17 -584,17 -584,17 -609,17 -584,17 -632,12 +657,12 @@@@@@@@@ static inline int iommu_fwspec_add_ids( return -ENODEV; } - ----- static inline void iommu_register_instance(struct fwnode_handle *fwnode, - ----- const struct iommu_ops *ops) - ----- { - ----- } - ----- + static inline - ----- const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) +++++++ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) + { + return NULL; + } + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */