static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;
-static void dmar_init_reserved_ranges(void)
+static int dmar_init_reserved_ranges(void)
{
struct pci_dev *pdev = NULL;
struct iova *iova;
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve IOAPIC range failed\n");
+ return -ENODEV;
+ }
/* Reserve all PCI MMIO to avoid peer-to-peer access */
for_each_pci_dev(pdev) {
iova = reserve_iova(&reserved_iova_list,
IOVA_PFN(r->start),
IOVA_PFN(r->end));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve iova failed\n");
+ return -ENODEV;
+ }
}
}
-
+ return 0;
}
static void domain_reserve_special_ranges(struct dmar_domain *domain)
return 0;
}
-int __init init_dmars(void)
+static int __init init_dmars(int force_on)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
* enable translation
*/
for_each_drhd_unit(drhd) {
- if (drhd->ignored)
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(drhd->iommu);
continue;
+ }
iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
if (no_iommu || dmar_disabled)
return -ENODEV;
- iommu_init_mempool();
- dmar_init_reserved_ranges();
+ if (iommu_init_mempool()) {
+ if (force_on)
+ panic("tboot: Failed to initialize iommu memory\n");
+ return -ENODEV;
+ }
+
+ if (dmar_init_reserved_ranges()) {
+ if (force_on)
+ panic("tboot: Failed to reserve iommu ranges\n");
+ return -ENODEV;
+ }
init_no_remapping_devices();
- ret = init_dmars();
+ ret = init_dmars(force_on);
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");