struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
------- static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
++++++ static void update_and_flush_device_table(struct protection_domain *domain,
++++++ struct domain_pgtable *pgtable);
/****************************************************************************
*
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
-- - struct amd_iommu *iommu;
int devid;
------- if (dev->archdata.iommu)
+++++++ if (dev_iommu_priv_get(dev))
return 0;
devid = get_device_id(dev);
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
------- dev->archdata.iommu = dev_data;
-- -
-- - iommu_device_link(&iommu->iommu, dev);
+++++++ dev_iommu_priv_set(dev, dev_data);
return 0;
}
setup_aliases(dev);
}
-- - static void iommu_uninit_device(struct device *dev)
++ + static void amd_iommu_uninit_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
-- - - struct amd_iommu *iommu;
------- int devid;
------
------ devid = get_device_id(dev);
------ if (devid < 0)
------ return;
- - -
- - - iommu = amd_iommu_rlookup_table[devid];
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
- iommu = amd_iommu_rlookup_table[devid];
-
------- dev_data = search_dev_data(devid);
+++++++ dev_data = dev_iommu_priv_get(dev);
if (!dev_data)
return;
if (dev_data->domain)
detach_device(dev);
-- - iommu_device_unlink(&iommu->iommu, dev);
-- -
-- - iommu_group_remove_device(dev);
-- -
-- - /* Remove dma-ops */
-- - dev->dma_ops = NULL;
+++++++ dev_iommu_priv_set(dev, NULL);
+++ +
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
return freelist;
}
------- static void free_pagetable(struct protection_domain *domain)
+++++++ static void free_pagetable(struct domain_pgtable *pgtable)
{
- struct domain_pgtable pgtable;
------ unsigned long root = (unsigned long)domain->pt_root;
struct page *freelist = NULL;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- atomic64_set(&domain->pt_root, 0);
--- - BUG_ON(domain->mode < PAGE_MODE_NONE ||
--- - domain->mode > PAGE_MODE_6_LEVEL);
++++++ unsigned long root;
+ +
- BUG_ON(pgtable.mode < PAGE_MODE_NONE ||
- pgtable.mode > PAGE_MODE_6_LEVEL);
- - BUG_ON(domain->mode < PAGE_MODE_NONE ||
- - domain->mode > PAGE_MODE_6_LEVEL);
- - - freelist = free_sub_pt(root, domain->mode, freelist);
+++++++ if (pgtable->mode == PAGE_MODE_NONE)
+++++++ return;
+
- root = (unsigned long)pgtable.root;
- freelist = free_sub_pt(root, pgtable.mode, freelist);
- - - freelist = free_sub_pt(root, domain->mode, freelist);
+++++++ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+++++++ pgtable->mode > PAGE_MODE_6_LEVEL);
+ + +
+++++++ root = (unsigned long)pgtable->root;
+++++++ freelist = free_sub_pt(root, pgtable->mode, freelist);
free_page_list(freelist);
}
BUG_ON(!is_power_of_2(page_size));
------ while (address > PM_LEVEL_SIZE(domain->mode))
------ *updated = increase_address_space(domain, address, gfp) || *updated;
++++++ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ + +
- - - level = domain->mode - 1;
- - - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++++++ while (address > PM_LEVEL_SIZE(pgtable.mode)) {
++++++ /*
++++++ * Return an error if there is no memory to update the
++++++ * page-table.
++++++ */
++++++ if (!increase_address_space(domain, address, gfp))
++++++ return NULL;
+ + +
- - - level = domain->mode - 1;
- - - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++++++ /* Read new values to check if update was successful */
++++++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++++++ }
++++++
++++++
++++++ level = pgtable.mode - 1;
++++++ pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
free_page((unsigned long)domain->gcr3_tbl);
}
------- /*
------- * Free a domain, only used if something went wrong in the
------- * allocation path and we need to free an already allocated page table
------- */
------- static void dma_ops_domain_free(struct protection_domain *domain)
------- {
------- if (!domain)
------- return;
-------
------- iommu_put_dma_cookie(&domain->domain);
-------
------- free_pagetable(domain);
-------
------- if (domain->id)
------- domain_id_free(domain->id);
-------
------- kfree(domain);
------- }
-------
------- /*
------- * Allocates a new protection domain usable for the dma_ops functions.
------- * It also initializes the page table and the address allocator data
------- * structures required for the dma_ops interface
------- */
------- static struct protection_domain *dma_ops_domain_alloc(void)
------- {
------- struct protection_domain *domain;
- u64 *pt_root, root;
-------
------- domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
------- if (!domain)
------- return NULL;
-------
------- if (protection_domain_init(domain))
- goto free_domain;
-
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
------- goto free_domain;
-------
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&domain->pt_root, root);
------ domain->mode = PAGE_MODE_3_LEVEL;
------ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
------- domain->flags = PD_DMA_OPS_MASK;
------ if (!domain->pt_root)
------ goto free_domain;
-------
------- if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
------- goto free_domain;
-------
------- return domain;
-------
------- free_domain:
------- dma_ops_domain_free(domain);
-------
------- return NULL;
------- }
-------
------- /*
------- * little helper function to check whether a given protection domain is a
------- * dma_ops domain
------- */
------- static bool dma_ops_domain(struct protection_domain *domain)
------- {
------- return domain->flags & PD_DMA_OPS_MASK;
------- }
-------
static void set_dte_entry(u16 devid, struct protection_domain *domain,
++++++ struct domain_pgtable *pgtable,
bool ats, bool ppr)
{
u64 pte_root = 0;
iommu = amd_iommu_rlookup_table[devid];
--- - if (get_dev_data(dev))
+++++++ if (dev_iommu_priv_get(dev))
++ + return &iommu->iommu;
++ +
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP)
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
-- - if (domain->type == IOMMU_DOMAIN_IDENTITY)
-- - dev_data->passthrough = true;
-- - else if (domain->type == IOMMU_DOMAIN_DMA)
++ + if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
-- -
-- - out:
-- - iommu_completion_wait(iommu);
-- -
-- - return 0;
}
-- - static void amd_iommu_remove_device(struct device *dev)
++ + static void amd_iommu_release_device(struct device *dev)
{
+++++++ int devid = get_device_id(dev);
struct amd_iommu *iommu;
------- int devid;
if (!check_device(dev))
return;
------- devid = get_device_id(dev);
------- if (devid < 0)
------- return;
-------
iommu = amd_iommu_rlookup_table[devid];
-- - iommu_uninit_device(dev);
++ + amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
------- struct protection_domain *pdomain;
- u64 *pt_root, root;
+++++++ struct protection_domain *domain;
+++++++ int mode = DEFAULT_PGTABLE_LEVEL;
------- switch (type) {
------- case IOMMU_DOMAIN_UNMANAGED:
------- pdomain = protection_domain_alloc();
------- if (!pdomain)
------- return NULL;
+++++++ if (type == IOMMU_DOMAIN_IDENTITY)
+++++++ mode = PAGE_MODE_NONE;
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root) {
------ pdomain->mode = PAGE_MODE_3_LEVEL;
------ pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
------ if (!pdomain->pt_root) {
------- protection_domain_free(pdomain);
------- return NULL;
------- }
+++++++ domain = protection_domain_alloc(mode);
+++++++ if (!domain)
+++++++ return NULL;
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&pdomain->pt_root, root);
------ pdomain->domain.geometry.aperture_start = 0;
------ pdomain->domain.geometry.aperture_end = ~0ULL;
------ pdomain->domain.geometry.force_aperture = true;
+++++++ domain->domain.geometry.aperture_start = 0;
+++++++ domain->domain.geometry.aperture_end = ~0ULL;
+++++++ domain->domain.geometry.force_aperture = true;
- pdomain->domain.geometry.aperture_start = 0;
- pdomain->domain.geometry.aperture_end = ~0ULL;
- pdomain->domain.geometry.force_aperture = true;
------ break;
------ case IOMMU_DOMAIN_DMA:
------ pdomain = dma_ops_domain_alloc();
------ if (!pdomain) {
------ pr_err("Failed to allocate\n");
------ return NULL;
------ }
------ break;
------ case IOMMU_DOMAIN_IDENTITY:
------ pdomain = protection_domain_alloc();
------ if (!pdomain)
------ return NULL;
+++++++ if (type == IOMMU_DOMAIN_DMA &&
+++++++ iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+++++++ goto free_domain;
- break;
- case IOMMU_DOMAIN_DMA:
- pdomain = dma_ops_domain_alloc();
- if (!pdomain) {
- pr_err("Failed to allocate\n");
- return NULL;
- }
------ pdomain->mode = PAGE_MODE_NONE;
------- break;
- case IOMMU_DOMAIN_IDENTITY:
- pdomain = protection_domain_alloc();
- if (!pdomain)
- return NULL;
------ default:
------ return NULL;
------ }
+++++++ return &domain->domain;
- atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE);
- break;
- default:
- return NULL;
- }
------ return &pdomain->domain;
+++++++ free_domain:
+++++++ protection_domain_free(domain);
++++++
- return &pdomain->domain;
+++++++ return NULL;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
if (!dom)
return;
------- switch (dom->type) {
------- case IOMMU_DOMAIN_DMA:
------- /* Now release the domain */
------- dma_ops_domain_free(domain);
------- break;
------- default:
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (pgtable.mode != PAGE_MODE_NONE)
------ if (domain->mode != PAGE_MODE_NONE)
------- free_pagetable(domain);
+++++++ if (dom->type == IOMMU_DOMAIN_DMA)
+++++++ iommu_put_dma_cookie(&domain->domain);
------- if (domain->flags & PD_IOMMUV2_MASK)
------- free_gcr3_table(domain);
+++++++ if (domain->flags & PD_IOMMUV2_MASK)
+++++++ free_gcr3_table(domain);
------- protection_domain_free(domain);
------- break;
------- }
+++++++ protection_domain_free(domain);
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
amd_iommu_flush_iotlb_all(domain);
}
--- - dev_data = get_dev_data(dev);
++ + static int amd_iommu_def_domain_type(struct device *dev)
++ + {
++ + struct iommu_dev_data *dev_data;
++ +
+++++++ dev_data = dev_iommu_priv_get(dev);
++ + if (!dev_data)
++ + return 0;
++ +
++ + if (dev_data->iommu_v2)
++ + return IOMMU_DOMAIN_IDENTITY;
++ +
++ + return 0;
++ + }
++ +
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
++++++ struct domain_pgtable pgtable;
unsigned long flags;
- u64 pt_root;
spin_lock_irqsave(&domain->lock, flags);
++++++ /* First save pgtable configuration*/
++++++ amd_iommu_domain_get_pgtable(domain, &pgtable);
++++++
/* Update data structure */
- pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE);
- atomic64_set(&domain->pt_root, pt_root);
------ domain->mode = PAGE_MODE_NONE;
+++++++ atomic64_set(&domain->pt_root, 0);
/* Make changes visible to IOMMUs */
update_domain(domain);
- /* Restore old pgtable in domain->ptroot to free page-table */
- pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode);
- atomic64_set(&domain->pt_root, pt_root);
-
/* Page-table is not visible to IOMMU anymore, so free it */
------- free_pagetable(domain);
+++++++ free_pagetable(&pgtable);
spin_unlock_irqrestore(&domain->lock, flags);
}
return;
master = dev_iommu_priv_get(dev);
--- ---- smmu = master->smmu;
arm_smmu_detach_dev(master);
-- - iommu_group_remove_device(dev);
-- - iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
-- - group = iommu_group_get_for_dev(dev);
-- -
-- - if (IS_ERR(group)) {
-- - ret = PTR_ERR(group);
-- - goto unlink;
-- - }
-- -
-- - iommu_group_put(group);
-- -
-- - domain = iommu_get_domain_for_dev(dev);
-- - dmar_domain = to_dmar_domain(domain);
-- - if (domain->type == IOMMU_DOMAIN_DMA) {
-- - if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
-- - ret = iommu_request_dm_for_dev(dev);
-- - if (ret) {
-- - dmar_remove_one_dev_info(dev);
-- - dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
-- - domain_add_dev_info(si_domain, dev);
-- - dev_info(dev,
-- - "Device uses a private identity domain.\n");
-- - }
-- - }
-- - } else {
-- - if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
-- - ret = iommu_request_dma_domain_for_dev(dev);
-- - if (ret) {
-- - dmar_remove_one_dev_info(dev);
-- - dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
-- - if (!get_private_domain_for_dev(dev)) {
-- - dev_warn(dev,
-- - "Failed to get a private domain.\n");
-- - ret = -ENOMEM;
-- - goto unlink;
-- - }
-- -
-- - dev_info(dev,
-- - "Device uses a private dma domain.\n");
-- - }
-- - }
-- - }
-- -
---- --- if (device_needs_bounce(dev)) {
---- --- dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
---- --- set_dma_ops(dev, &bounce_dma_ops);
---- --- }
-- -
-- - return 0;
---- ---
-- - unlink:
-- - iommu_device_unlink(&iommu->iommu, dev);
-- - return ret;
++ + return &iommu->iommu;
}
-- - static void intel_iommu_remove_device(struct device *dev)
++ + static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
dmar_remove_one_dev_info(dev);
-- - iommu_group_remove_device(dev);
++++ +++ set_dma_ops(dev, NULL);
++++ +++}
++ ++
-- - iommu_device_unlink(&iommu->iommu, dev);
++++ +++static void intel_iommu_probe_finalize(struct device *dev)
++++ +++{
++++ +++ struct iommu_domain *domain;
++ ++
++++ +++ domain = iommu_get_domain_for_dev(dev);
if (device_needs_bounce(dev))
++++ +++ set_dma_ops(dev, &bounce_dma_ops);
++++ +++ else if (domain && domain->type == IOMMU_DOMAIN_DMA)
++++ +++ set_dma_ops(dev, &intel_dma_ops);
++++ +++ else
set_dma_ops(dev, NULL);
}
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
-- - .add_device = intel_iommu_add_device,
-- - .remove_device = intel_iommu_remove_device,
++ + .probe_device = intel_iommu_probe_device,
++++ +++ .probe_finalize = intel_iommu_probe_finalize,
++ + .release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
++ + .def_domain_type = device_def_domain_type,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
++++ +++#ifdef CONFIG_INTEL_IOMMU_SVM
++++ +++ .cache_invalidate = intel_iommu_sva_invalidate,
++++ +++ .sva_bind_gpasid = intel_svm_bind_gpasid,
++++ +++ .sva_unbind_gpasid = intel_svm_unbind_gpasid,
++++ +++ .sva_bind = intel_svm_bind,
++++ +++ .sva_unbind = intel_svm_unbind,
++++ +++ .sva_get_pasid = intel_svm_get_pasid,
++++ +++#endif
};
static void quirk_iommu_igfx(struct pci_dev *dev)
return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
}
--- -static int iommu_alloc_default_domain(struct device *dev);
++++++ +static int iommu_alloc_default_domain(struct iommu_group *group,
++++++ + struct device *dev);
++ + static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
++ + unsigned type);
++ + static int __iommu_attach_device(struct iommu_domain *domain,
++ + struct device *dev);
++ + static int __iommu_attach_group(struct iommu_domain *domain,
++ + struct iommu_group *group);
++ + static void __iommu_detach_group(struct iommu_domain *domain,
++ + struct iommu_group *group);
++ + static int iommu_create_device_direct_mappings(struct iommu_group *group,
++ + struct device *dev);
++ + static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
++ +
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
dev->iommu = NULL;
}
-- - int iommu_probe_device(struct device *dev)
++ + static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
++ + struct iommu_device *iommu_dev;
++ + struct iommu_group *group;
int ret;
-- - WARN_ON(dev->iommu_group);
+ if (!ops)
-- - return -EINVAL;
++ + + return -ENODEV;
+
if (!dev_iommu_get(dev))
return -ENOMEM;
return ret;
}
-- - void iommu_release_device(struct device *dev)
++ + int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
++ + struct iommu_group *group;
++ + int ret;
-- - if (dev->iommu_group)
-- - ops->remove_device(dev);
++ + ret = __iommu_probe_device(dev, NULL);
++ + if (ret)
++ + goto err_out;
++ +
++++++ + group = iommu_group_get(dev);
++++++ + if (!group)
++++++ + goto err_release;
++++++ +
++ + /*
++ + * Try to allocate a default domain - needs support from the
++ + * IOMMU driver. There are still some drivers which don't
++ + * support default domains, so the return value is not yet
++ + * checked.
++ + */
--- - iommu_alloc_default_domain(dev);
--- -
--- - group = iommu_group_get(dev);
--- - if (!group)
--- - goto err_release;
++++++ + iommu_alloc_default_domain(group, dev);
++ +
++ + if (group->default_domain)
++ + ret = __iommu_attach_device(group->default_domain, dev);
++ +
++ + iommu_create_device_direct_mappings(group, dev);
++ +
++ + iommu_group_put(group);
++ +
++ + if (ret)
++ + goto err_release;
++ +
++ + if (ops->probe_finalize)
++ + ops->probe_finalize(dev);
++ +
++ + return 0;
++ +
++ + err_release:
++ + iommu_release_device(dev);
++ +
++ + err_out:
++ + return ret;
-- - if (dev->iommu) {
-- - module_put(ops->owner);
-- - dev_iommu_free(dev);
-- - }
}
-- - static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
-- - unsigned type);
-- - static int __iommu_attach_device(struct iommu_domain *domain,
-- - struct device *dev);
-- - static int __iommu_attach_group(struct iommu_domain *domain,
-- - struct iommu_group *group);
-- - static void __iommu_detach_group(struct iommu_domain *domain,
-- - struct iommu_group *group);
++ + void iommu_release_device(struct device *dev)
++ + {
++ + const struct iommu_ops *ops = dev->bus->iommu_ops;
++ +
++ + if (!dev->iommu)
++ + return;
++ +
++ + iommu_device_unlink(dev->iommu->iommu_dev, dev);
++ + iommu_group_remove_device(dev);
++ +
++ + ops->release_device(dev);
++ +
++ + module_put(ops->owner);
++ + dev_iommu_free(dev);
++ + }
static int __init iommu_set_def_domain_type(char *str)
{
dev->iommu_group = group;
-- - iommu_group_create_direct_mappings(group, dev);
-- -
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
------ if (group->domain)
++++++ if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
--- -static int iommu_alloc_default_domain(struct device *dev)
++ + static int iommu_get_def_domain_type(struct device *dev)
++ + {
++ + const struct iommu_ops *ops = dev->bus->iommu_ops;
++ + unsigned int type = 0;
++ +
++ + if (ops->def_domain_type)
++ + type = ops->def_domain_type(dev);
++ +
++ + return (type == 0) ? iommu_def_domain_type : type;
++ + }
++ +
++ + static int iommu_group_alloc_default_domain(struct bus_type *bus,
++ + struct iommu_group *group,
++ + unsigned int type)
++ + {
++ + struct iommu_domain *dom;
++ +
++ + dom = __iommu_domain_alloc(bus, type);
++ + if (!dom && type != IOMMU_DOMAIN_DMA) {
++ + dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
++ + if (dom)
++ + pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
++ + type, group->name);
++ + }
++ +
++ + if (!dom)
++ + return -ENOMEM;
++ +
++ + group->default_domain = dom;
++ + if (!group->domain)
++ + group->domain = dom;
++ +
++ + if (!iommu_dma_strict) {
++ + int attr = 1;
++ + iommu_domain_set_attr(dom,
++ + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
++ + &attr);
++ + }
++ +
++ + return 0;
++ + }
++ +
--- - struct iommu_group *group;
++++++ +static int iommu_alloc_default_domain(struct iommu_group *group,
++++++ + struct device *dev)
++ + {
--- - group = iommu_group_get(dev);
--- - if (!group)
--- - return -ENODEV;
--- -
++ + unsigned int type;
++ +
++ + if (group->default_domain)
++ + return 0;
++ +
++ + type = iommu_get_def_domain_type(dev);
++ +
++ + return iommu_group_alloc_default_domain(dev->bus, group, type);
++ + }
++ +
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
if (IS_ERR(group))
return group;
-- - /*
-- - * Try to allocate a default domain - needs support from the
-- - * IOMMU driver.
-- - */
-- - if (!group->default_domain) {
-- - struct iommu_domain *dom;
-- -
-- - dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
-- - if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
-- - dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
-- - if (dom) {
-- - dev_warn(dev,
-- - "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
-- - iommu_def_domain_type);
-- - }
-- - }
-- -
-- - group->default_domain = dom;
-- - if (!group->domain)
-- - group->domain = dom;
-- -
-- - if (dom && !iommu_dma_strict) {
-- - int attr = 1;
-- - iommu_domain_set_attr(dom,
-- - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
-- - &attr);
-- - }
-- - }
-- -
ret = iommu_group_add_device(group, dev);
-- - if (ret) {
-- - iommu_group_put(group);
-- - return ERR_PTR(ret);
-- - }
++ + if (ret)
++ + goto out_put_group;
return group;
++ +
++ + out_put_group:
++ + iommu_group_put(group);
++ +
++ + return ERR_PTR(ret);
}
- EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
- - EXPORT_SYMBOL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
return 0;
}
--- - const struct iommu_ops *ops;
--- - int ret;
---
--- ret = __iommu_attach_device(domain, dev);
--
-- ops = domain->ops;
++ + struct __group_domain_type {
++ + struct device *dev;
++ + unsigned int type;
++ + };
++ +
++ + static int probe_get_default_domain_type(struct device *dev, void *data)
++ + {
++ + const struct iommu_ops *ops = dev->bus->iommu_ops;
++ + struct __group_domain_type *gtype = data;
++ + unsigned int type = 0;
++ +
++ + if (ops->def_domain_type)
++ + type = ops->def_domain_type(dev);
++ +
++ + if (type) {
++ + if (gtype->type && gtype->type != type) {
++ + dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
++ + iommu_domain_type_str(type),
++ + dev_name(gtype->dev),
++ + iommu_domain_type_str(gtype->type));
++ + gtype->type = 0;
++ + }
++ +
++ + if (!gtype->dev) {
++ + gtype->dev = dev;
++ + gtype->type = type;
++ + }
++ + }
++ +
++ + return 0;
++ + }
++ +
++ + static void probe_alloc_default_domain(struct bus_type *bus,
++ + struct iommu_group *group)
++ + {
++ + struct __group_domain_type gtype;
++ +
++ + memset(>ype, 0, sizeof(gtype));
++ +
++ + /* Ask for default domain requirements of all devices in the group */
++ + __iommu_group_for_each_dev(group, >ype,
++ + probe_get_default_domain_type);
++ +
++ + if (!gtype.type)
++ + gtype.type = iommu_def_domain_type;
++ +
++ + iommu_group_alloc_default_domain(bus, group, gtype.type);
++ +
++ + }
++ +
++ + static int iommu_group_do_dma_attach(struct device *dev, void *data)
++ + {
++ + struct iommu_domain *domain = data;
- ret = __iommu_attach_device(domain, dev);
-
- - ops = domain->ops;
- -
--- - if (ret == 0 && ops->probe_finalize)
--- - ops->probe_finalize(dev);
--- -
--- - return ret;
++ +
++++++ + return __iommu_attach_device(domain, dev);
++ + }
++ +
++ + static int __iommu_group_dma_attach(struct iommu_group *group)
++ + {
++ + return __iommu_group_for_each_dev(group, group->default_domain,
++ + iommu_group_do_dma_attach);
++ + }
++ +
++++++ +static int iommu_group_do_probe_finalize(struct device *dev, void *data)
++++++ +{
++++++ + struct iommu_domain *domain = data;
++++++ +
++++++ + if (domain->ops->probe_finalize)
++++++ + domain->ops->probe_finalize(dev);
++++++ +
++++++ + return 0;
++++++ +}
++++++ +
++++++ +static void __iommu_group_dma_finalize(struct iommu_group *group)
++++++ +{
++++++ + __iommu_group_for_each_dev(group, group->default_domain,
++++++ + iommu_group_do_probe_finalize);
++++++ +}
++++++++
++ + static int iommu_do_create_direct_mappings(struct device *dev, void *data)
++ + {
++ + struct iommu_group *group = data;
++ +
++ + iommu_create_device_direct_mappings(group, dev);
++ +
++ + return 0;
++ + }
++ +
++ + static int iommu_group_create_direct_mappings(struct iommu_group *group)
++ + {
++ + return __iommu_group_for_each_dev(group, group,
++ + iommu_do_create_direct_mappings);
++ + }
++ +
++ + int bus_iommu_probe(struct bus_type *bus)
++ + {
++ + struct iommu_group *group, *next;
++ + LIST_HEAD(group_list);
++ + int ret;
++ +
++ + /*
++ + * This code-path does not allocate the default domain when
++ + * creating the iommu group, so do it after the groups are
++ + * created.
++ + */
++ + ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
++ + if (ret)
++ + return ret;
++ +
++ + list_for_each_entry_safe(group, next, &group_list, entry) {
++ + /* Remove item from the list */
++ + list_del_init(&group->entry);
++ +
++ + mutex_lock(&group->mutex);
++ +
++ + /* Try to allocate default domain */
++ + probe_alloc_default_domain(bus, group);
++ +
++ + if (!group->default_domain) {
++ + mutex_unlock(&group->mutex);
++ + continue;
++ + }
++ +
++ + iommu_group_create_direct_mappings(group);
++ +
++ + ret = __iommu_group_dma_attach(group);
++ +
++ + mutex_unlock(&group->mutex);
++ +
++ + if (ret)
++ + break;
++++++ +
++++++ + __iommu_group_dma_finalize(group);
++ + }
++ +
++ + return ret;
++ + }
++ +
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
-- - int err;
struct notifier_block *nb;
++ + int err;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
}
EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
---- ---static int
---- ---request_default_domain_for_dev(struct device *dev, unsigned long type)
---- ---{
---- --- struct iommu_domain *domain;
---- --- struct iommu_group *group;
---- --- int ret;
---- ---
---- --- /* Device must already be in a group before calling this function */
---- --- group = iommu_group_get(dev);
---- --- if (!group)
---- --- return -EINVAL;
---- ---
---- --- mutex_lock(&group->mutex);
---- ---
---- --- ret = 0;
---- --- if (group->default_domain && group->default_domain->type == type)
---- --- goto out;
---- ---
---- --- /* Don't change mappings of existing devices */
---- --- ret = -EBUSY;
---- --- if (iommu_group_device_count(group) != 1)
---- --- goto out;
---- ---
---- --- ret = -ENOMEM;
---- --- domain = __iommu_domain_alloc(dev->bus, type);
---- --- if (!domain)
---- --- goto out;
---- ---
---- --- /* Attach the device to the domain */
---- --- ret = __iommu_attach_group(domain, group);
---- --- if (ret) {
---- --- iommu_domain_free(domain);
---- --- goto out;
---- --- }
---- ---
---- --- /* Make the domain the default for this group */
---- --- if (group->default_domain)
---- --- iommu_domain_free(group->default_domain);
---- --- group->default_domain = domain;
---- ---
-- - iommu_group_create_direct_mappings(group, dev);
-- -- iommu_create_device_direct_mappings(group, dev);
---- ---
---- --- dev_info(dev, "Using iommu %s mapping\n",
---- --- type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
---- ---
---- --- ret = 0;
---- ---out:
---- --- mutex_unlock(&group->mutex);
---- --- iommu_group_put(group);
---- ---
---- --- return ret;
---- ---}
---- ---
---- ---/* Request that a device is direct mapped by the IOMMU */
---- ---int iommu_request_dm_for_dev(struct device *dev)
---- ---{
---- --- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
---- ---}
---- ---
---- ---/* Request that a device can't be direct mapped by the IOMMU */
---- ---int iommu_request_dma_domain_for_dev(struct device *dev)
---- ---{
---- --- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
---- ---}
---- ---
void iommu_set_default_passthrough(bool cmd_line)
{
if (cmd_line)