Merge branches 'x86/vt-d', 'arm/omap', 'core', 'x86/amd' and 'arm/smmu' into next
authorJoerg Roedel <joro@8bytes.org>
Tue, 25 Jun 2013 21:34:29 +0000 (23:34 +0200)
committerJoerg Roedel <joro@8bytes.org>
Tue, 25 Jun 2013 21:34:29 +0000 (23:34 +0200)
drivers/iommu/amd_iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iopgtable.h
drivers/iommu/omap-iovmm.c

index 21d02b0..6dc6594 100644 (file)
@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
 
        /*
         * If it's a multifunction device that does not support our
-        * required ACS flags, add to the same group as function 0.
+        * required ACS flags, add to the same group as lowest numbered
+        * function that also does not suport the required ACS flags.
         */
        if (dma_pdev->multifunction &&
-           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
-               swap_pci_ref(&dma_pdev,
-                            pci_get_slot(dma_pdev->bus,
-                                         PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
-                                         0)));
+           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+               u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+               for (i = 0; i < 8; i++) {
+                       struct pci_dev *tmp;
+
+                       tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+                       if (!tmp)
+                               continue;
+
+                       if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+                               swap_pci_ref(&dma_pdev, tmp);
+                               break;
+                       }
+                       pci_dev_put(tmp);
+               }
+       }
 
        /*
         * Devices on the root bus go through the iommu.  If that's not us,
@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 
                        /* Large PTE found which maps this address */
                        unmap_size = PTE_PAGE_SIZE(*pte);
+
+                       /* Only unmap from the first pte in the page */
+                       if ((unmap_size - 1) & bus_addr)
+                               break;
                        count      = PAGE_SIZE_PTE_COUNT(unmap_size);
                        for (i = 0; i < count; i++)
                                pte[i] = 0ULL;
@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
                unmapped += unmap_size;
        }
 
-       BUG_ON(!is_power_of_2(unmapped));
+       BUG_ON(unmapped && !is_power_of_2(unmapped));
 
        return unmapped;
 }
@@ -1893,34 +1910,59 @@ static void domain_id_free(int id)
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 
+#define DEFINE_FREE_PT_FN(LVL, FN)                             \
+static void free_pt_##LVL (unsigned long __pt)                 \
+{                                                              \
+       unsigned long p;                                        \
+       u64 *pt;                                                \
+       int i;                                                  \
+                                                               \
+       pt = (u64 *)__pt;                                       \
+                                                               \
+       for (i = 0; i < 512; ++i) {                             \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                  \
+                       continue;                               \
+                                                               \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
+               FN(p);                                          \
+       }                                                       \
+       free_page((unsigned long)pt);                           \
+}
+
+DEFINE_FREE_PT_FN(l2, free_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
 static void free_pagetable(struct protection_domain *domain)
 {
-       int i, j;
-       u64 *p1, *p2, *p3;
-
-       p1 = domain->pt_root;
-
-       if (!p1)
-               return;
-
-       for (i = 0; i < 512; ++i) {
-               if (!IOMMU_PTE_PRESENT(p1[i]))
-                       continue;
-
-               p2 = IOMMU_PTE_PAGE(p1[i]);
-               for (j = 0; j < 512; ++j) {
-                       if (!IOMMU_PTE_PRESENT(p2[j]))
-                               continue;
-                       p3 = IOMMU_PTE_PAGE(p2[j]);
-                       free_page((unsigned long)p3);
-               }
+       unsigned long root = (unsigned long)domain->pt_root;
 
-               free_page((unsigned long)p2);
+       switch (domain->mode) {
+       case PAGE_MODE_NONE:
+               break;
+       case PAGE_MODE_1_LEVEL:
+               free_page(root);
+               break;
+       case PAGE_MODE_2_LEVEL:
+               free_pt_l2(root);
+               break;
+       case PAGE_MODE_3_LEVEL:
+               free_pt_l3(root);
+               break;
+       case PAGE_MODE_4_LEVEL:
+               free_pt_l4(root);
+               break;
+       case PAGE_MODE_5_LEVEL:
+               free_pt_l5(root);
+               break;
+       case PAGE_MODE_6_LEVEL:
+               free_pt_l6(root);
+               break;
+       default:
+               BUG();
        }
-
-       free_page((unsigned long)p1);
-
-       domain->pt_root = NULL;
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)
index a7967ce..785675a 100644 (file)
@@ -309,6 +309,7 @@ parse_dmar_table(void)
        struct acpi_table_dmar *dmar;
        struct acpi_dmar_header *entry_header;
        int ret = 0;
+       int drhd_count = 0;
 
        /*
         * Do it again, earlier dmar_tbl mapping could be mapped with
@@ -347,6 +348,7 @@ parse_dmar_table(void)
 
                switch (entry_header->type) {
                case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+                       drhd_count++;
                        ret = dmar_parse_one_drhd(entry_header);
                        break;
                case ACPI_DMAR_TYPE_RESERVED_MEMORY:
@@ -371,6 +373,8 @@ parse_dmar_table(void)
 
                entry_header = ((void *)entry_header + entry_header->length);
        }
+       if (drhd_count == 0)
+               pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
        return ret;
 }
 
index b4f0e28..eec0d3e 100644 (file)
@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev)
 
        /*
         * If it's a multifunction device that does not support our
-        * required ACS flags, add to the same group as function 0.
+        * required ACS flags, add to the same group as lowest numbered
+        * function that also does not suport the required ACS flags.
         */
        if (dma_pdev->multifunction &&
-           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
-               swap_pci_ref(&dma_pdev,
-                            pci_get_slot(dma_pdev->bus,
-                                         PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
-                                         0)));
+           !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+               u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+               for (i = 0; i < 8; i++) {
+                       struct pci_dev *tmp;
+
+                       tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+                       if (!tmp)
+                               continue;
+
+                       if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+                               swap_pci_ref(&dma_pdev, tmp);
+                               break;
+                       }
+                       pci_dev_put(tmp);
+               }
+       }
 
        /*
         * Devices on the root bus go through the iommu.  If that's not us,
index 5b19b2d..f71673d 100644 (file)
@@ -664,8 +664,7 @@ error:
         */
 
        if (x2apic_present)
-               WARN(1, KERN_WARNING
-                       "Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
+               pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
 
        return -1;
 }
index d8f98b1..fbe9ca7 100644 (file)
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
+static size_t iommu_pgsize(struct iommu_domain *domain,
+                          unsigned long addr_merge, size_t size)
+{
+       unsigned int pgsize_idx;
+       size_t pgsize;
+
+       /* Max page size that still fits into 'size' */
+       pgsize_idx = __fls(size);
+
+       /* need to consider alignment requirements ? */
+       if (likely(addr_merge)) {
+               /* Max page size allowed by address */
+               unsigned int align_pgsize_idx = __ffs(addr_merge);
+               pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+       }
+
+       /* build a mask of acceptable page sizes */
+       pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+       /* throw away page sizes not supported by the hardware */
+       pgsize &= domain->ops->pgsize_bitmap;
+
+       /* make sure we're still sane */
+       BUG_ON(!pgsize);
+
+       /* pick the biggest page */
+       pgsize_idx = __fls(pgsize);
+       pgsize = 1UL << pgsize_idx;
+
+       return pgsize;
+}
+
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
 {
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
         * size of the smallest page supported by the hardware
         */
        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
-                       "0x%x\n", iova, (unsigned long)paddr,
-                       (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+                      iova, &paddr, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
-                               (unsigned long)paddr, (unsigned long)size);
+       pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               unsigned long pgsize, addr_merge = iova | paddr;
-               unsigned int pgsize_idx;
-
-               /* Max page size that still fits into 'size' */
-               pgsize_idx = __fls(size);
-
-               /* need to consider alignment requirements ? */
-               if (likely(addr_merge)) {
-                       /* Max page size allowed by both iova and paddr */
-                       unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-                       pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-               }
-
-               /* build a mask of acceptable page sizes */
-               pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
-               /* throw away page sizes not supported by the hardware */
-               pgsize &= domain->ops->pgsize_bitmap;
-
-               /* make sure we're still sane */
-               BUG_ON(!pgsize);
-
-               /* pick the biggest page */
-               pgsize_idx = __fls(pgsize);
-               pgsize = 1UL << pgsize_idx;
+               size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-               pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
-                                       (unsigned long)paddr, pgsize);
+               pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+                        iova, &paddr, pgsize);
 
                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
                if (ret)
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
         * by the hardware
         */
        if (!IS_ALIGNED(iova | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
-                                       iova, (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+                      iova, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
-                                                       (unsigned long)size);
+       pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 
        /*
         * Keep iterating until we either unmap 'size' bytes (or more)
         * or we hit an area that isn't mapped.
         */
        while (unmapped < size) {
-               size_t left = size - unmapped;
+               size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-               unmapped_page = domain->ops->unmap(domain, iova, left);
+               unmapped_page = domain->ops->unmap(domain, iova, pgsize);
                if (!unmapped_page)
                        break;
 
-               pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
-                                       (unsigned long)unmapped_page);
+               pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+                        iova, unmapped_page);
 
                iova += unmapped_page;
                unmapped += unmapped_page;
index e02e5d7..0ba3766 100644 (file)
@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
        iopgd = iopgd_offset(obj, da);
 
        if (!iopgd_is_table(*iopgd)) {
-               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
-                       "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
+               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
+                               obj->name, errs, da, iopgd, *iopgd);
                return IRQ_NONE;
        }
 
        iopte = iopte_offset(iopgd, da);
 
-       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
-               "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
-               iopte, *iopte);
+       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
+                       obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
        return IRQ_NONE;
 }
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                else if (iopte_is_large(*pte))
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
-                       dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
+                       dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
+                                                       (unsigned long long)da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
                else if (iopgd_is_super(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
-                       dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
+                       dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
+                                                       (unsigned long long)da);
        }
 
        return ret;
index cd4ae9e..f4003d5 100644 (file)
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
 
 #define to_iommu(dev)                                                  \
-       (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
+       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
index 46d8756..d147259 100644 (file)
@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt)
                }
 
                if (i && sg->offset) {
-                       pr_err("%s: sg[%d] offset not allowed in internal "
-                                       "entries\n", __func__, i);
+                       pr_err("%s: sg[%d] offset not allowed in internal entries\n",
+                               __func__, i);
                        return 0;
                }