* devices attached when it is switched into IOMMUv2 mode.
*/
ret = -EBUSY;
------ - if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
- goto out;
-
- ret = -ENOMEM;
- domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
++++++ + if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
goto out;
------ ret = -ENOMEM;
------ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
------ if (domain->gcr3_tbl == NULL)
------ goto out;
------
------ - domain->glx = levels;
------ - domain->flags |= PD_IOMMUV2_MASK;
------ -
------ - amd_iommu_domain_update(domain);
------ -
------ - ret = 0;
++++++ + if (!pdom->gcr3_tbl)
++++++ + ret = domain_enable_v2(pdom, pasids);
out:
------ - spin_unlock_irqrestore(&domain->lock, flags);
------ -
++++++ + spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
+++++ + /*
+++++ + * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+++++ + * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+++++ + * the returned SAGAW.
+++++ + */
+++++ + static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+++++ + {
+++++ + unsigned long fl_sagaw, sl_sagaw;
+++++ +
+++++ ++ fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
+++++ + sl_sagaw = cap_sagaw(iommu->cap);
+++++ +
+++++ + /* Second level only. */
+++++ + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+++++ + return sl_sagaw;
+++++ +
+++++ + /* First level only. */
+++++ + if (!ecap_slts(iommu->ecap))
+++++ + return fl_sagaw;
+++++ +
+++++ + return fl_sagaw & sl_sagaw;
+++++ + }
+++++ +
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
}
static struct device_domain_info *
----- --iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
----- -- u8 bus, u8 devfn)
+++++ ++domain_lookup_dev_info(struct dmar_domain *domain,
+++++ ++ struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct device_domain_info *info;
+++++ + unsigned long flags;
----- -- if (!iommu->qi)
----- -- return NULL;
----- --
----- - spin_lock(&domain->lock);
+++++ + spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
----- - spin_unlock(&domain->lock);
----- - return info->ats_supported ? info : NULL;
+++++ + spin_unlock_irqrestore(&domain->lock, flags);
- return info->ats_supported ? info : NULL;
+++++ ++ return info;
}
}
----- - spin_unlock(&domain->lock);
+++++ + spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
}
}
domain->has_iotlb_device = has_iotlb_device;
----- - spin_unlock(&domain->lock);
+++++ + spin_unlock_irqrestore(&domain->lock, flags);
}
----- --static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+++++ ++static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
return iommu_fwspec_add_ids(dev, args->args, 1);
}
- static bool viommu_capable(enum iommu_cap cap)
+++++++ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
+++++ + {
+++++ + switch (cap) {
+++++ + case IOMMU_CAP_CACHE_COHERENCY:
+++++ + return true;
+++++ + default:
+++++ + return false;
+++++ + }
+++++ + }
+++++ +
static struct iommu_ops viommu_ops = {
+++++ + .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,