pci_seg->irq_lookup_table = NULL;
}
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= amd_iommu_last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 ivhd_size;
int ret;
devid_to = e->ext >> 8;
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
+ pci_seg->alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
+ pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
if (alloc_dev_table(pci_seg))
return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
if (alloc_rlookup_table(pci_seg))
return NULL;
list_del(&pci_seg->list);
free_irq_lookup_table(pci_seg);
free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
free_dev_table(pci_seg);
kfree(pci_seg);
}
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int i, remap_cache_sz, ret;
+ int remap_cache_sz, ret;
acpi_status status;
if (!amd_iommu_detected)
goto out;
/*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
return 0;
}
-static void clone_aliases(struct device *dev)
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev;
* part of the PCI DMA aliases if it's bus differs
* from the original device.
*/
- clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
pci_for_each_dma_alias(pdev, clone_alias, NULL);
}
-static void setup_aliases(struct device *dev)
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u16 ivrs_alias;
/* For ACPI HID devices, there are no aliases */
* Add the IVRS alias to the pci aliases if it is on the same
* bus. The IVRS table may know about a quirk that we don't.
*/
- ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
- clone_aliases(dev);
+ clone_aliases(iommu, dev);
}
static struct iommu_dev_data *find_dev_data(u16 devid)
return true;
}
-static int iommu_init_device(struct device *dev)
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{
struct iommu_dev_data *dev_data;
int devid;
return -ENOMEM;
dev_data->dev = dev;
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
return 0;
}
-static void iommu_ignore_device(struct device *dev)
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{
int devid;
amd_iommu_rlookup_table[devid] = NULL;
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
}
static void amd_iommu_uninit_device(struct device *dev)
{
struct amd_iommu *iommu;
struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
if (ret)
return ret;
- alias = amd_iommu_alias_table[dev_data->devid];
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
if (alias != dev_data->devid) {
ret = iommu_flush_dte(iommu, alias);
if (ret)
/* Update device table */
set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
- clone_aliases(dev_data->dev);
+ clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
}
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
- clone_aliases(dev_data->dev);
+ clone_aliases(iommu, dev_data->dev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
- ret = iommu_init_device(dev);
+ ret = iommu_init_device(iommu, dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
- iommu_ignore_device(dev);
+ iommu_ignore_device(iommu, dev);
} else {
amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
set_dte_entry(dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
- clone_aliases(dev_data->dev);
+ clone_aliases(iommu, dev_data->dev);
}
}
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
struct amd_iommu *iommu;
unsigned long flags;
u16 alias;
if (!iommu)
goto out_unlock;
+ pci_seg = iommu->pci_seg;
table = irq_lookup_table[devid];
if (table)
goto out_unlock;
- alias = amd_iommu_alias_table[devid];
+ alias = pci_seg->alias_table[devid];
table = irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);