int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-struct device_domain_info *get_domain_info(struct device *dev)
-{
- struct device_domain_info *info;
-
- if (!dev)
- return NULL;
-
- info = dev_iommu_priv_get(dev);
- if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
- return NULL;
-
- return info;
-}
-
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
return &context[devfn];
}
-static bool attach_deferred(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
-}
-
/**
* is_downstream_to_pci_bridge - test if a device belongs to the PCI
* sub-hierarchy of a candidate PCI-PCI bridge
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static inline void unlink_domain_info(struct device_domain_info *info)
-{
- assert_spin_locked(&device_domain_lock);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- dev_iommu_priv_set(info->dev, NULL);
-}
-
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info, *tmp;
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-struct dmar_domain *find_domain(struct device *dev)
-{
- struct device_domain_info *info;
-
- if (unlikely(!dev || !dev->iommu))
- return NULL;
-
- if (unlikely(attach_deferred(dev)))
- return NULL;
-
- /* No lock here, assumes no domain exit in normal case */
- info = get_domain_info(dev);
- if (likely(info))
- return info->domain;
-
- return NULL;
-}
-
static inline struct device_domain_info *
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
struct device *dev,
struct dmar_domain *domain)
{
- struct device_domain_info *info;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long flags;
int ret;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return NULL;
-
- if (!dev_is_real_dma_subdevice(dev)) {
- info->bus = bus;
- info->devfn = devfn;
- info->segment = iommu->segment;
- } else {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->segment = pci_domain_nr(pdev->bus);
- }
-
- info->dev = dev;
- info->domain = domain;
- info->iommu = iommu;
-
- if (dev && dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(info->dev);
-
- if (ecap_dev_iotlb_support(iommu->ecap) &&
- pci_ats_supported(pdev) &&
- dmar_find_matched_atsr_unit(pdev))
- info->ats_supported = 1;
-
- if (sm_supported(iommu)) {
- if (pasid_supported(iommu)) {
- int features = pci_pasid_features(pdev);
- if (features >= 0)
- info->pasid_supported = features | 1;
- }
-
- if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_pri_supported(pdev))
- info->pri_supported = 1;
- }
- }
-
spin_lock_irqsave(&device_domain_lock, flags);
+ info->domain = domain;
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
-
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
- kfree(info);
return NULL;
}
-
list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- if (dev)
- dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
intel_pasid_free_table(info->dev);
}
- unlink_domain_info(info);
+ list_del(&info->link);
spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
-
- kfree(info);
}
static void dmar_remove_one_dev_info(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (info)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* normally dev is not mapped */
if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
- old_domain = find_domain(dev);
- if (old_domain)
+ if (info->domain)
dmar_remove_one_dev_info(dev);
}
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
+ struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
+ struct device_domain_info *info;
struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
- iommu = device_to_iommu(dev, NULL, NULL);
+ iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return ERR_PTR(-ENODEV);
- if (translation_pre_enabled(iommu))
- dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ if (dev_is_real_dma_subdevice(dev)) {
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->segment = pci_domain_nr(pdev->bus);
+ } else {
+ info->bus = bus;
+ info->devfn = devfn;
+ info->segment = iommu->segment;
+ }
+
+ info->dev = dev;
+ info->iommu = iommu;
+ if (dev_is_pci(dev)) {
+ if (ecap_dev_iotlb_support(iommu->ecap) &&
+ pci_ats_supported(pdev) &&
+ dmar_find_matched_atsr_unit(pdev))
+ info->ats_supported = 1;
+
+ if (sm_supported(iommu)) {
+ if (pasid_supported(iommu)) {
+ int features = pci_pasid_features(pdev);
+
+ if (features >= 0)
+ info->pasid_supported = features | 1;
+ }
+
+ if (info->ats_supported && ecap_prs(iommu->ecap) &&
+ pci_pri_supported(pdev))
+ info->pri_supported = 1;
+ }
+ }
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_add(&info->global, &device_domain_list);
+ dev_iommu_priv_set(dev, info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
static void intel_iommu_release_device(struct device *dev)
{
- struct intel_iommu *iommu;
-
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ unsigned long flags;
dmar_remove_one_dev_info(dev);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ dev_iommu_priv_set(dev, NULL);
+ list_del(&info->global);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ kfree(info);
set_dma_ops(dev, NULL);
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
- struct device_domain_info *info;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
unsigned long flags;
u64 ctx_lo;
int ret;
- domain = find_domain(dev);
+ domain = info->domain;
if (!domain)
return -EINVAL;
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = get_domain_info(dev);
- if (!info || !info->pasid_supported)
+ if (!info->pasid_supported)
goto out;
context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
static int intel_iommu_enable_sva(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
int ret;
static int intel_iommu_disable_sva(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
int ret;
static int intel_iommu_enable_iopf(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
if (info && info->pri_supported)
return 0;
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
- return attach_deferred(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ return translation_pre_enabled(info->iommu) && !info->domain;
}
/*