Merge branches 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/omap', 'generic-dma-ops' and...
authorJoerg Roedel <jroedel@suse.de>
Thu, 4 Jul 2019 15:26:48 +0000 (17:26 +0200)
committerJoerg Roedel <jroedel@suse.de>
Thu, 4 Jul 2019 15:26:48 +0000 (17:26 +0200)
20 files changed:
Documentation/ABI/testing/sysfs-kernel-iommu_groups
arch/arm64/mm/dma-mapping.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.c
drivers/iommu/intel-pasid.h
drivers/iommu/intel-svm.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/omap-iommu-debug.c
drivers/iommu/omap-iommu.c
include/linux/device.h
include/linux/dma-iommu.h
include/linux/intel-iommu.h
include/linux/intel-svm.h
include/linux/iommu.h
include/uapi/linux/iommu.h [new file with mode: 0644]

index 35c64e0..017f5bc 100644 (file)
@@ -24,3 +24,12 @@ Description:    /sys/kernel/iommu_groups/reserved_regions list IOVA
                region is described on a single line: the 1st field is
                the base IOVA, the second is the end IOVA and the third
                field describes the type of the region.
+
+What:          /sys/kernel/iommu_groups/reserved_regions
+Date:          June 2019
+KernelVersion:  v5.3
+Contact:       Eric Auger <eric.auger@redhat.com>
+Description:    In case an RMRR is used only by graphics or USB devices
+               it is now exposed as "direct-relaxable" instead of "direct".
+               In device assignment use case, for instance, those RMRR
+               are considered to be relaxable and safe.
index 5992eb9..9c588a1 100644 (file)
@@ -1,24 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * SWIOTLB-based DMA API implementation
- *
  * Copyright (C) 2012 ARM Ltd.
  * Author: Catalin Marinas <catalin.marinas@arm.com>
  */
 
 #include <linux/gfp.h>
-#include <linux/acpi.h>
-#include <linux/memblock.h>
 #include <linux/cache.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/genalloc.h>
-#include <linux/dma-direct.h>
 #include <linux/dma-noncoherent.h>
-#include <linux/dma-contiguous.h>
-#include <linux/vmalloc.h>
-#include <linux/swiotlb.h>
-#include <linux/pci.h>
+#include <linux/dma-iommu.h>
 
 #include <asm/cacheflush.h>
 
@@ -47,37 +36,6 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        __dma_flush_area(page_address(page), size);
 }
 
-#ifdef CONFIG_IOMMU_DMA
-static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
-                                     struct page *page, size_t size)
-{
-       int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-
-       if (!ret)
-               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-
-       return ret;
-}
-
-static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
-                             unsigned long pfn, size_t size)
-{
-       int ret = -ENXIO;
-       unsigned long nr_vma_pages = vma_pages(vma);
-       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long off = vma->vm_pgoff;
-
-       if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     pfn + off,
-                                     vma->vm_end - vma->vm_start,
-                                     vma->vm_page_prot);
-       }
-
-       return ret;
-}
-#endif /* CONFIG_IOMMU_DMA */
-
 static int __init arm64_dma_init(void)
 {
        WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
@@ -89,380 +47,18 @@ static int __init arm64_dma_init(void)
 arch_initcall(arm64_dma_init);
 
 #ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-iommu.h>
-#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-
-/* Thankfully, all cache ops are by VA so we can ignore phys here */
-static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
-{
-       __dma_flush_area(virt, PAGE_SIZE);
-}
-
-static void *__iommu_alloc_attrs(struct device *dev, size_t size,
-                                dma_addr_t *handle, gfp_t gfp,
-                                unsigned long attrs)
-{
-       bool coherent = dev_is_dma_coherent(dev);
-       int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-       size_t iosize = size;
-       void *addr;
-
-       if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
-               return NULL;
-
-       size = PAGE_ALIGN(size);
-
-       /*
-        * Some drivers rely on this, and we probably don't want the
-        * possibility of stale kernel data being read by devices anyway.
-        */
-       gfp |= __GFP_ZERO;
-
-       if (!gfpflags_allow_blocking(gfp)) {
-               struct page *page;
-               /*
-                * In atomic context we can't remap anything, so we'll only
-                * get the virtually contiguous buffer we need by way of a
-                * physically contiguous allocation.
-                */
-               if (coherent) {
-                       page = alloc_pages(gfp, get_order(size));
-                       addr = page ? page_address(page) : NULL;
-               } else {
-                       addr = dma_alloc_from_pool(size, &page, gfp);
-               }
-               if (!addr)
-                       return NULL;
-
-               *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
-               if (*handle == DMA_MAPPING_ERROR) {
-                       if (coherent)
-                               __free_pages(page, get_order(size));
-                       else
-                               dma_free_from_pool(addr, size);
-                       addr = NULL;
-               }
-       } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-               struct page *page;
-
-               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
-                                       get_order(size), gfp & __GFP_NOWARN);
-               if (!page)
-                       return NULL;
-
-               *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
-               if (*handle == DMA_MAPPING_ERROR) {
-                       dma_release_from_contiguous(dev, page,
-                                                   size >> PAGE_SHIFT);
-                       return NULL;
-               }
-               addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-                                                  prot,
-                                                  __builtin_return_address(0));
-               if (addr) {
-                       if (!coherent)
-                               __dma_flush_area(page_to_virt(page), iosize);
-                       memset(addr, 0, size);
-               } else {
-                       iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
-                       dma_release_from_contiguous(dev, page,
-                                                   size >> PAGE_SHIFT);
-               }
-       } else {
-               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
-               struct page **pages;
-
-               pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-                                       handle, flush_page);
-               if (!pages)
-                       return NULL;
-
-               addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-                                             __builtin_return_address(0));
-               if (!addr)
-                       iommu_dma_free(dev, pages, iosize, handle);
-       }
-       return addr;
-}
-
-static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-                              dma_addr_t handle, unsigned long attrs)
-{
-       size_t iosize = size;
-
-       size = PAGE_ALIGN(size);
-       /*
-        * @cpu_addr will be one of 4 things depending on how it was allocated:
-        * - A remapped array of pages for contiguous allocations.
-        * - A remapped array of pages from iommu_dma_alloc(), for all
-        *   non-atomic allocations.
-        * - A non-cacheable alias from the atomic pool, for atomic
-        *   allocations by non-coherent devices.
-        * - A normal lowmem address, for atomic allocations by
-        *   coherent devices.
-        * Hence how dodgy the below logic looks...
-        */
-       if (dma_in_atomic_pool(cpu_addr, size)) {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
-               dma_free_from_pool(cpu_addr, size);
-       } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               struct page *page = vmalloc_to_page(cpu_addr);
-
-               iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
-               dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
-               dma_common_free_remap(cpu_addr, size, VM_USERMAP);
-       } else if (is_vmalloc_addr(cpu_addr)){
-               struct vm_struct *area = find_vm_area(cpu_addr);
-
-               if (WARN_ON(!area || !area->pages))
-                       return;
-               iommu_dma_free(dev, area->pages, iosize, &handle);
-               dma_common_free_remap(cpu_addr, size, VM_USERMAP);
-       } else {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
-               __free_pages(virt_to_page(cpu_addr), get_order(size));
-       }
-}
-
-static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
-                             void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                             unsigned long attrs)
-{
-       struct vm_struct *area;
-       int ret;
-
-       vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (!is_vmalloc_addr(cpu_addr)) {
-               unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
-               return __swiotlb_mmap_pfn(vma, pfn, size);
-       }
-
-       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               /*
-                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-                * hence in the vmalloc space.
-                */
-               unsigned long pfn = vmalloc_to_pfn(cpu_addr);
-               return __swiotlb_mmap_pfn(vma, pfn, size);
-       }
-
-       area = find_vm_area(cpu_addr);
-       if (WARN_ON(!area || !area->pages))
-               return -ENXIO;
-
-       return iommu_dma_mmap(area->pages, size, vma);
-}
-
-static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
-                              void *cpu_addr, dma_addr_t dma_addr,
-                              size_t size, unsigned long attrs)
-{
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct vm_struct *area = find_vm_area(cpu_addr);
-
-       if (!is_vmalloc_addr(cpu_addr)) {
-               struct page *page = virt_to_page(cpu_addr);
-               return __swiotlb_get_sgtable_page(sgt, page, size);
-       }
-
-       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               /*
-                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-                * hence in the vmalloc space.
-                */
-               struct page *page = vmalloc_to_page(cpu_addr);
-               return __swiotlb_get_sgtable_page(sgt, page, size);
-       }
-
-       if (WARN_ON(!area || !area->pages))
-               return -ENXIO;
-
-       return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
-                                        GFP_KERNEL);
-}
-
-static void __iommu_sync_single_for_cpu(struct device *dev,
-                                       dma_addr_t dev_addr, size_t size,
-                                       enum dma_data_direction dir)
-{
-       phys_addr_t phys;
-
-       if (dev_is_dma_coherent(dev))
-               return;
-
-       phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
-       arch_sync_dma_for_cpu(dev, phys, size, dir);
-}
-
-static void __iommu_sync_single_for_device(struct device *dev,
-                                          dma_addr_t dev_addr, size_t size,
-                                          enum dma_data_direction dir)
-{
-       phys_addr_t phys;
-
-       if (dev_is_dma_coherent(dev))
-               return;
-
-       phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
-       arch_sync_dma_for_device(dev, phys, size, dir);
-}
-
-static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
-                                  unsigned long offset, size_t size,
-                                  enum dma_data_direction dir,
-                                  unsigned long attrs)
-{
-       bool coherent = dev_is_dma_coherent(dev);
-       int prot = dma_info_to_prot(dir, coherent, attrs);
-       dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
-
-       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           dev_addr != DMA_MAPPING_ERROR)
-               __dma_map_area(page_address(page) + offset, size, dir);
-
-       return dev_addr;
-}
-
-static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
-                              size_t size, enum dma_data_direction dir,
-                              unsigned long attrs)
-{
-       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
-               __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
-
-       iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
-}
-
-static void __iommu_sync_sg_for_cpu(struct device *dev,
-                                   struct scatterlist *sgl, int nelems,
-                                   enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       if (dev_is_dma_coherent(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
-}
-
-static void __iommu_sync_sg_for_device(struct device *dev,
-                                      struct scatterlist *sgl, int nelems,
-                                      enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       if (dev_is_dma_coherent(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
-}
-
-static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
-                               int nelems, enum dma_data_direction dir,
-                               unsigned long attrs)
-{
-       bool coherent = dev_is_dma_coherent(dev);
-
-       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
-               __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
-
-       return iommu_dma_map_sg(dev, sgl, nelems,
-                               dma_info_to_prot(dir, coherent, attrs));
-}
-
-static void __iommu_unmap_sg_attrs(struct device *dev,
-                                  struct scatterlist *sgl, int nelems,
-                                  enum dma_data_direction dir,
-                                  unsigned long attrs)
-{
-       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
-               __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
-
-       iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
-}
-
-static const struct dma_map_ops iommu_dma_ops = {
-       .alloc = __iommu_alloc_attrs,
-       .free = __iommu_free_attrs,
-       .mmap = __iommu_mmap_attrs,
-       .get_sgtable = __iommu_get_sgtable,
-       .map_page = __iommu_map_page,
-       .unmap_page = __iommu_unmap_page,
-       .map_sg = __iommu_map_sg_attrs,
-       .unmap_sg = __iommu_unmap_sg_attrs,
-       .sync_single_for_cpu = __iommu_sync_single_for_cpu,
-       .sync_single_for_device = __iommu_sync_single_for_device,
-       .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
-       .sync_sg_for_device = __iommu_sync_sg_for_device,
-       .map_resource = iommu_dma_map_resource,
-       .unmap_resource = iommu_dma_unmap_resource,
-};
-
-static int __init __iommu_dma_init(void)
-{
-       return iommu_dma_init();
-}
-arch_initcall(__iommu_dma_init);
-
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                 const struct iommu_ops *ops)
-{
-       struct iommu_domain *domain;
-
-       if (!ops)
-               return;
-
-       /*
-        * The IOMMU core code allocates the default DMA domain, which the
-        * underlying IOMMU driver needs to support via the dma-iommu layer.
-        */
-       domain = iommu_get_domain_for_dev(dev);
-
-       if (!domain)
-               goto out_err;
-
-       if (domain->type == IOMMU_DOMAIN_DMA) {
-               if (iommu_dma_init_domain(domain, dma_base, size, dev))
-                       goto out_err;
-
-               dev->dma_ops = &iommu_dma_ops;
-       }
-
-       return;
-
-out_err:
-        pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
-                dev_name(dev));
-}
-
 void arch_teardown_dma_ops(struct device *dev)
 {
        dev->dma_ops = NULL;
 }
-
-#else
-
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                 const struct iommu_ops *iommu)
-{ }
-
-#endif  /* CONFIG_IOMMU_DMA */
+#endif
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
 {
        dev->dma_coherent = coherent;
-       __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+       if (iommu)
+               iommu_setup_dma_ops(dev, dma_base, size);
 
 #ifdef CONFIG_XEN
        if (xen_initial_domain())
index dce1d8d..73740b9 100644 (file)
@@ -619,9 +619,9 @@ retry:
                pasid = ((event[0] >> 16) & 0xFFFF)
                        | ((event[1] << 6) & 0xF0000);
                tag = event[1] & 0x03FF;
-               dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
-                       pasid, address, flags);
+                       pasid, address, flags, tag);
                break;
        default:
                dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
        }
 }
 
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+               dma_addr_t iova, size_t size)
+{
+       if (unlikely(amd_iommu_np_cache)) {
+               domain_flush_pages(domain, iova, size);
+               domain_flush_complete(domain);
+       }
+}
+
 
 /*
  * This function flushes the DTEs for all devices in domain
@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
        }
        address += offset;
 
-       if (unlikely(amd_iommu_np_cache)) {
-               domain_flush_pages(&dma_dom->domain, address, size);
-               domain_flush_complete(&dma_dom->domain);
-       }
+       domain_flush_np_cache(&dma_dom->domain, address, size);
 
 out:
        return address;
@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
                s->dma_length   = s->length;
        }
 
+       if (s)
+               domain_flush_np_cache(domain, s->dma_address, s->dma_length);
+
        return nelems;
 
 out_unmap:
@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct protection_domain *domain;
        struct dma_ops_domain *dma_dom;
        unsigned long startaddr;
-       int npages = 2;
+       int npages;
 
        domain = get_domain(dev);
        if (IS_ERR(domain))
@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
        ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
        mutex_unlock(&domain->api_lock);
 
+       domain_flush_np_cache(domain, iova, page_size);
+
        return ret;
 }
 
index 07d84db..eb104c7 100644 (file)
@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)
 
 static void iommu_disable(struct amd_iommu *iommu)
 {
+       if (!iommu->mmio_base)
+               return;
+
        /* Disable command buffer */
        iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 
@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
        amd_iommu_dev_table = NULL;
 
        free_iommu_all();
-
-#ifdef CONFIG_GART_IOMMU
-       /*
-        * We failed to initialize the AMD IOMMU - try fallback to GART
-        * if possible.
-        */
-       gart_iommu_init();
-
-#endif
 }
 
 /* SB IOAPIC is always on this device in AMD systems */
@@ -2625,8 +2619,6 @@ static int __init state_next(void)
                init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
                if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
                        pr_info("AMD IOMMU disabled on kernel command-line\n");
-                       free_dma_resources();
-                       free_iommu_resources();
                        init_state = IOMMU_CMDLINE_DISABLED;
                        ret = -EINVAL;
                }
@@ -2667,6 +2659,19 @@ static int __init state_next(void)
                BUG();
        }
 
+       if (ret) {
+               free_dma_resources();
+               if (!irq_remapping_enabled) {
+                       disable_iommus();
+                       free_iommu_resources();
+               } else {
+                       struct amd_iommu *iommu;
+
+                       uninit_device_table_dma();
+                       for_each_iommu(iommu)
+                               iommu_flush_all_caches(iommu);
+               }
+       }
        return ret;
 }
 
@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
        int ret;
 
        ret = iommu_go_to_state(IOMMU_INITIALIZED);
-       if (ret) {
-               free_dma_resources();
-               if (!irq_remapping_enabled) {
-                       disable_iommus();
-                       free_iommu_resources();
-               } else {
-                       uninit_device_table_dma();
-                       for_each_iommu(iommu)
-                               iommu_flush_all_caches(iommu);
-               }
+#ifdef CONFIG_GART_IOMMU
+       if (ret && list_empty(&amd_iommu_list)) {
+               /*
+                * We failed to initialize the AMD IOMMU - try fallback
+                * to GART if possible.
+                */
+               gart_iommu_init();
        }
+#endif
 
        for_each_iommu(iommu)
                amd_iommu_debugfs_setup(iommu);
index 3793182..f802255 100644 (file)
@@ -10,7 +10,9 @@
 
 #include <linux/acpi_iort.h>
 #include <linux/device.h>
+#include <linux/dma-contiguous.h>
 #include <linux/dma-iommu.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/gfp.h>
 #include <linux/huge_mm.h>
 #include <linux/iommu.h>
@@ -67,11 +69,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
        return cookie;
 }
 
-int iommu_dma_init(void)
-{
-       return iova_cache_get();
-}
-
 /**
  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
  * @domain: IOMMU domain to prepare for DMA-API usage
@@ -229,8 +226,8 @@ resv_iova:
                start = window->res->end - window->offset + 1;
                /* If window is last entry */
                if (window->node.next == &bridge->dma_ranges &&
-                   end != ~(dma_addr_t)0) {
-                       end = ~(dma_addr_t)0;
+                   end != ~(phys_addr_t)0) {
+                       end = ~(phys_addr_t)0;
                        goto resv_iova;
                }
        }
@@ -302,7 +299,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
  * any change which could make prior IOVAs invalid will fail.
  */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
                u64 size, struct device *dev)
 {
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -353,7 +350,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 
        return iova_reserve_iommu_regions(dev, domain);
 }
-EXPORT_SYMBOL(iommu_dma_init_domain);
 
 /**
  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
@@ -364,7 +360,7 @@ EXPORT_SYMBOL(iommu_dma_init_domain);
  *
  * Return: corresponding IOMMU API page protection flags
  */
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
                     unsigned long attrs)
 {
        int prot = coherent ? IOMMU_CACHE : 0;
@@ -441,9 +437,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
                                size >> iova_shift(iovad));
 }
 
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
                size_t size)
 {
+       struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
        size_t iova_off = iova_offset(iovad, dma_addr);
@@ -457,6 +454,30 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
        iommu_dma_free_iova(cookie, dma_addr, size);
 }
 
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+               size_t size, int prot)
+{
+       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+       size_t iova_off = 0;
+       dma_addr_t iova;
+
+       if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
+               iova_off = iova_offset(&cookie->iovad, phys);
+               size = iova_align(&cookie->iovad, size + iova_off);
+       }
+
+       iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+       if (!iova)
+               return DMA_MAPPING_ERROR;
+
+       if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+               iommu_dma_free_iova(cookie, iova, size);
+               return DMA_MAPPING_ERROR;
+       }
+       return iova + iova_off;
+}
+
 static void __iommu_dma_free_pages(struct page **pages, int count)
 {
        while (count--)
@@ -522,55 +543,45 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
        return pages;
 }
 
-/**
- * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
- * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by iommu_dma_alloc()
- * @size: Size of buffer in bytes
- * @handle: DMA address of buffer
- *
- * Frees both the pages associated with the buffer, and the array
- * describing them
- */
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
-               dma_addr_t *handle)
+static struct page **__iommu_dma_get_pages(void *cpu_addr)
 {
-       __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
-       __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
-       *handle = DMA_MAPPING_ERROR;
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (!area || !area->pages)
+               return NULL;
+       return area->pages;
 }
 
 /**
- * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
  * @dev: Device to allocate memory for. Must be a real device
  *      attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  * @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
- * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
- *             given VA/PA are visible to the given non-coherent device.
  *
  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
  * but an IOMMU which supports smaller pages might not map the whole thing.
  *
- * Return: Array of struct page pointers describing the buffer,
- *        or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
  */
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
-               unsigned long attrs, int prot, dma_addr_t *handle,
-               void (*flush_page)(struct device *, const void *, phys_addr_t))
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
+       bool coherent = dev_is_dma_coherent(dev);
+       int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+       pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+       unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
        struct page **pages;
        struct sg_table sgt;
        dma_addr_t iova;
-       unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+       void *vaddr;
 
-       *handle = DMA_MAPPING_ERROR;
+       *dma_handle = DMA_MAPPING_ERROR;
 
        min_size = alloc_sizes & -alloc_sizes;
        if (min_size < PAGE_SIZE) {
@@ -596,26 +607,29 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
        if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
                goto out_free_iova;
 
-       if (!(prot & IOMMU_CACHE)) {
-               struct sg_mapping_iter miter;
-               /*
-                * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
-                * sufficient here, so skip it by using the "wrong" direction.
-                */
-               sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
-               while (sg_miter_next(&miter))
-                       flush_page(dev, miter.addr, page_to_phys(miter.page));
-               sg_miter_stop(&miter);
+       if (!(ioprot & IOMMU_CACHE)) {
+               struct scatterlist *sg;
+               int i;
+
+               for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+                       arch_dma_prep_coherent(sg_page(sg), sg->length);
        }
 
-       if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+       if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
                        < size)
                goto out_free_sg;
 
-       *handle = iova;
+       vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+                       __builtin_return_address(0));
+       if (!vaddr)
+               goto out_unmap;
+
+       *dma_handle = iova;
        sg_free_table(&sgt);
-       return pages;
+       return vaddr;
 
+out_unmap:
+       __iommu_dma_unmap(dev, iova, size);
 out_free_sg:
        sg_free_table(&sgt);
 out_free_iova:
@@ -626,54 +640,94 @@ out_free_pages:
 }
 
 /**
- * iommu_dma_mmap - Map a buffer into provided user VMA
- * @pages: Array representing buffer from iommu_dma_alloc()
+ * __iommu_dma_mmap - Map a buffer into provided user VMA
+ * @pages: Array representing buffer from __iommu_dma_alloc()
  * @size: Size of buffer in bytes
  * @vma: VMA describing requested userspace mapping
  *
  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
  * for verifying the correct size and protection of @vma beforehand.
  */
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
+static int __iommu_dma_mmap(struct page **pages, size_t size,
+               struct vm_area_struct *vma)
 {
        return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
-static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-               size_t size, int prot, struct iommu_domain *domain)
+static void iommu_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 {
-       struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       size_t iova_off = 0;
-       dma_addr_t iova;
+       phys_addr_t phys;
 
-       if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
-               iova_off = iova_offset(&cookie->iovad, phys);
-               size = iova_align(&cookie->iovad, size + iova_off);
-       }
+       if (dev_is_dma_coherent(dev))
+               return;
 
-       iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
-       if (!iova)
-               return DMA_MAPPING_ERROR;
+       phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+       arch_sync_dma_for_cpu(dev, phys, size, dir);
+}
 
-       if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
-               iommu_dma_free_iova(cookie, iova, size);
-               return DMA_MAPPING_ERROR;
-       }
-       return iova + iova_off;
+static void iommu_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+       phys_addr_t phys;
+
+       if (dev_is_dma_coherent(dev))
+               return;
+
+       phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+       arch_sync_dma_for_device(dev, phys, size, dir);
+}
+
+static void iommu_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nelems,
+               enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       if (dev_is_dma_coherent(dev))
+               return;
+
+       for_each_sg(sgl, sg, nelems, i)
+               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void iommu_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nelems,
+               enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       if (dev_is_dma_coherent(dev))
+               return;
+
+       for_each_sg(sgl, sg, nelems, i)
+               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, int prot)
+static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
 {
-       return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
-                       iommu_get_dma_domain(dev));
+       phys_addr_t phys = page_to_phys(page) + offset;
+       bool coherent = dev_is_dma_coherent(dev);
+       int prot = dma_info_to_prot(dir, coherent, attrs);
+       dma_addr_t dma_handle;
+
+       dma_handle =__iommu_dma_map(dev, phys, size, prot);
+       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           dma_handle != DMA_MAPPING_ERROR)
+               arch_sync_dma_for_device(dev, phys, size, dir);
+       return dma_handle;
 }
 
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
-               enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
+       __iommu_dma_unmap(dev, dma_handle, size);
 }
 
 /*
@@ -758,18 +812,22 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
  * impedance-matching, to be able to hand off a suitably-aligned list,
  * but still preserve the original offsets and sizes for the caller.
  */
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
-               int nents, int prot)
+static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
        struct scatterlist *s, *prev = NULL;
+       int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
        dma_addr_t iova;
        size_t iova_len = 0;
        unsigned long mask = dma_get_seg_boundary(dev);
        int i;
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+
        /*
         * Work out how much IOVA space we need, and align the segments to
         * IOVA granules for the IOMMU driver to handle. With some clever
@@ -829,12 +887,16 @@ out_restore_sg:
        return 0;
 }
 
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
 {
        dma_addr_t start, end;
        struct scatterlist *tmp;
        int i;
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
        /*
         * The scatterlist segments are mapped into a single
         * contiguous IOVA allocation, so this is incredibly easy.
@@ -846,21 +908,231 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
                sg = tmp;
        }
        end = sg_dma_address(sg) + sg_dma_len(sg);
-       __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
+       __iommu_dma_unmap(dev, start, end - start);
 }
 
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        return __iommu_dma_map(dev, phys, size,
-                       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
-                       iommu_get_dma_domain(dev));
+                       dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
 }
 
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+       __iommu_dma_unmap(dev, handle, size);
+}
+
+static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
+{
+       size_t alloc_size = PAGE_ALIGN(size);
+       int count = alloc_size >> PAGE_SHIFT;
+       struct page *page = NULL, **pages = NULL;
+
+       /* Non-coherent atomic allocation? Easy */
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_free_from_pool(cpu_addr, alloc_size))
+               return;
+
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+               /*
+                * If it the address is remapped, then it's either non-coherent
+                * or highmem CMA, or an iommu_dma_alloc_remap() construction.
+                */
+               pages = __iommu_dma_get_pages(cpu_addr);
+               if (!pages)
+                       page = vmalloc_to_page(cpu_addr);
+               dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
+       } else {
+               /* Lowmem means a coherent atomic or CMA allocation */
+               page = virt_to_page(cpu_addr);
+       }
+
+       if (pages)
+               __iommu_dma_free_pages(pages, count);
+       if (page && !dma_release_from_contiguous(dev, page, count))
+               __free_pages(page, get_order(alloc_size));
+}
+
+static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t handle, unsigned long attrs)
+{
+       __iommu_dma_unmap(dev, handle, size);
+       __iommu_dma_free(dev, size, cpu_addr);
+}
+
+static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
+               struct page **pagep, gfp_t gfp, unsigned long attrs)
+{
+       bool coherent = dev_is_dma_coherent(dev);
+       size_t alloc_size = PAGE_ALIGN(size);
+       struct page *page = NULL;
+       void *cpu_addr;
+
+       if (gfpflags_allow_blocking(gfp))
+               page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
+                                                get_order(alloc_size),
+                                                gfp & __GFP_NOWARN);
+       if (!page)
+               page = alloc_pages(gfp, get_order(alloc_size));
+       if (!page)
+               return NULL;
+
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
+               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+
+               cpu_addr = dma_common_contiguous_remap(page, alloc_size,
+                               VM_USERMAP, prot, __builtin_return_address(0));
+               if (!cpu_addr)
+                       goto out_free_pages;
+
+               if (!coherent)
+                       arch_dma_prep_coherent(page, size);
+       } else {
+               cpu_addr = page_address(page);
+       }
+
+       *pagep = page;
+       memset(cpu_addr, 0, alloc_size);
+       return cpu_addr;
+out_free_pages:
+       if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
+               __free_pages(page, get_order(alloc_size));
+       return NULL;
+}
+
+static void *iommu_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+       bool coherent = dev_is_dma_coherent(dev);
+       int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+       struct page *page = NULL;
+       void *cpu_addr;
+
+       gfp |= __GFP_ZERO;
+
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
+           !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+               return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           !gfpflags_allow_blocking(gfp) && !coherent)
+               cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+       else
+               cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
+       if (!cpu_addr)
+               return NULL;
+
+       *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+       if (*handle == DMA_MAPPING_ERROR) {
+               __iommu_dma_free(dev, size, cpu_addr);
+               return NULL;
+       }
+
+       return cpu_addr;
+}
+
+static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
+{
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long pfn, off = vma->vm_pgoff;
+       int ret;
+
+       vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
+               return -ENXIO;
+
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+               struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+               if (pages)
+                       return __iommu_dma_mmap(pages, size, vma);
+               pfn = vmalloc_to_pfn(cpu_addr);
+       } else {
+               pfn = page_to_pfn(virt_to_page(cpu_addr));
+       }
+
+       return remap_pfn_range(vma, vma->vm_start, pfn + off,
+                              vma->vm_end - vma->vm_start,
+                              vma->vm_page_prot);
+}
+
+static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
+{
+       struct page *page;
+       int ret;
+
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+               struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+               if (pages) {
+                       return sg_alloc_table_from_pages(sgt, pages,
+                                       PAGE_ALIGN(size) >> PAGE_SHIFT,
+                                       0, size, GFP_KERNEL);
+               }
+
+               page = vmalloc_to_page(cpu_addr);
+       } else {
+               page = virt_to_page(cpu_addr);
+       }
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (!ret)
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return ret;
+}
+
+static const struct dma_map_ops iommu_dma_ops = {
+       .alloc                  = iommu_dma_alloc,
+       .free                   = iommu_dma_free,
+       .mmap                   = iommu_dma_mmap,
+       .get_sgtable            = iommu_dma_get_sgtable,
+       .map_page               = iommu_dma_map_page,
+       .unmap_page             = iommu_dma_unmap_page,
+       .map_sg                 = iommu_dma_map_sg,
+       .unmap_sg               = iommu_dma_unmap_sg,
+       .sync_single_for_cpu    = iommu_dma_sync_single_for_cpu,
+       .sync_single_for_device = iommu_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = iommu_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = iommu_dma_sync_sg_for_device,
+       .map_resource           = iommu_dma_map_resource,
+       .unmap_resource         = iommu_dma_unmap_resource,
+};
+
+/*
+ * The IOMMU core code allocates the default DMA domain, which the underlying
+ * IOMMU driver needs to support via the dma-iommu layer.
+ */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+{
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+       if (!domain)
+               goto out_err;
+
+       /*
+        * The IOMMU core code allocates the default DMA domain, which the
+        * underlying IOMMU driver needs to support via the dma-iommu layer.
+        */
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               if (iommu_dma_init_domain(domain, dma_base, size, dev))
+                       goto out_err;
+               dev->dma_ops = &iommu_dma_ops;
+       }
+
+       return;
+out_err:
+        pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+                dev_name(dev));
 }
 
 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
@@ -881,7 +1153,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
        if (!msi_page)
                return NULL;
 
-       iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
+       iova = __iommu_dma_map(dev, msi_addr, size, prot);
        if (iova == DMA_MAPPING_ERROR)
                goto out_free_page;
 
@@ -943,3 +1215,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
        msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
        msg->address_lo += lower_32_bits(msi_page->iova);
 }
+
+static int iommu_dma_init(void)
+{
+       return iova_cache_get();
+}
+arch_initcall(iommu_dma_init);
index 7fabf9b..73a5529 100644 (file)
 
 #include <asm/irq_remapping.h>
 
+#include "intel-pasid.h"
+
+struct tbl_walk {
+       u16 bus;
+       u16 devfn;
+       u32 pasid;
+       struct root_entry *rt_entry;
+       struct context_entry *ctx_entry;
+       struct pasid_entry *pasid_tbl_entry;
+};
+
 struct iommu_regset {
        int offset;
        const char *regs;
@@ -131,16 +142,86 @@ out:
 }
 DEFINE_SHOW_ATTRIBUTE(iommu_regset);
 
-static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
-                              int bus)
+static inline void print_tbl_walk(struct seq_file *m)
 {
-       struct context_entry *context;
-       int devfn;
+       struct tbl_walk *tbl_wlk = m->private;
+
+       seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
+                  tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
+                  PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
+                  tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
+                  tbl_wlk->ctx_entry->lo);
+
+       /*
+        * A legacy mode DMAR doesn't support PASID, hence default it to -1
+        * indicating that it's invalid. Also, default all PASID related fields
+        * to 0.
+        */
+       if (!tbl_wlk->pasid_tbl_entry)
+               seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
+                          (u64)0, (u64)0, (u64)0);
+       else
+               seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
+                          tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
+                          tbl_wlk->pasid_tbl_entry->val[1],
+                          tbl_wlk->pasid_tbl_entry->val[2]);
+}
 
-       seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
-       seq_puts(m, "  Entry\tB:D.F\tHigh\tLow\n");
+static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
+                          u16 dir_idx)
+{
+       struct tbl_walk *tbl_wlk = m->private;
+       u8 tbl_idx;
+
+       for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
+               if (pasid_pte_is_present(tbl_entry)) {
+                       tbl_wlk->pasid_tbl_entry = tbl_entry;
+                       tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
+                       print_tbl_walk(m);
+               }
+
+               tbl_entry++;
+       }
+}
+
+static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
+                          u16 pasid_dir_size)
+{
+       struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
+       struct pasid_entry *pasid_tbl;
+       u16 dir_idx;
+
+       for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
+               pasid_tbl = get_pasid_table_from_pde(dir_entry);
+               if (pasid_tbl)
+                       pasid_tbl_walk(m, pasid_tbl, dir_idx);
+
+               dir_entry++;
+       }
+}
+
+static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
+{
+       struct context_entry *context;
+       u16 devfn, pasid_dir_size;
+       u64 pasid_dir_ptr;
 
        for (devfn = 0; devfn < 256; devfn++) {
+               struct tbl_walk tbl_wlk = {0};
+
+               /*
+                * Scalable mode root entry points to upper scalable mode
+                * context table and lower scalable mode context table. Each
+                * scalable mode context table has 128 context entries where as
+                * legacy mode context table has 256 context entries. So in
+                * scalable mode, the context entries for former 128 devices are
+                * in the lower scalable mode context table, while the latter
+                * 128 devices are in the upper scalable mode context table.
+                * In scalable mode, when devfn > 127, iommu_context_addr()
+                * automatically refers to upper scalable mode context table and
+                * hence the caller doesn't have to worry about differences
+                * between scalable mode and non scalable mode.
+                */
                context = iommu_context_addr(iommu, bus, devfn, 0);
                if (!context)
                        return;
@@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
                if (!context_present(context))
                        continue;
 
-               seq_printf(m, "  %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
-                          bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
-                          context[0].hi, context[0].lo);
+               tbl_wlk.bus = bus;
+               tbl_wlk.devfn = devfn;
+               tbl_wlk.rt_entry = &iommu->root_entry[bus];
+               tbl_wlk.ctx_entry = context;
+               m->private = &tbl_wlk;
+
+               if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+                       pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+                       pasid_dir_size = get_pasid_dir_size(context);
+                       pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
+                       continue;
+               }
+
+               print_tbl_walk(m);
        }
 }
 
-static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
 {
        unsigned long flags;
-       int bus;
+       u16 bus;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+       seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
                   (u64)virt_to_phys(iommu->root_entry));
-       seq_puts(m, "Root Table Entries:\n");
+       seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
 
-       for (bus = 0; bus < 256; bus++) {
-               if (!(iommu->root_entry[bus].lo & 1))
-                       continue;
+       /*
+        * No need to check if the root entry is present or not because
+        * iommu_context_addr() performs the same check before returning
+        * context entry.
+        */
+       for (bus = 0; bus < 256; bus++)
+               ctx_tbl_walk(m, iommu, bus);
 
-               seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
-                          iommu->root_entry[bus].hi,
-                          iommu->root_entry[bus].lo);
-
-               ctx_tbl_entry_show(m, iommu, bus);
-               seq_putc(m, '\n');
-       }
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
-               root_tbl_entry_show(m, iommu);
+               root_tbl_walk(m, iommu);
                seq_putc(m, '\n');
        }
        rcu_read_unlock();
index 162b323..ac4172c 100644 (file)
@@ -294,14 +294,16 @@ static inline void context_clear_entry(struct context_entry *context)
 static struct dmar_domain *si_domain;
 static int hw_pass_through = 1;
 
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY            BIT(0)
+
 /*
- * Domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
+ * This is a DMA domain allocated through the iommu domain allocation
+ * interface. But one or more devices belonging to this domain have
+ * been chosen to use a private domain. We should avoid to use the
+ * map/unmap/iova_to_phys APIs on it.
  */
-#define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 0)
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 1)
+#define DOMAIN_FLAG_LOSE_CHILDREN              BIT(1)
 
 #define for_each_domain_iommu(idx, domain)                     \
        for (idx = 0; idx < g_num_of_iommus; idx++)             \
@@ -314,7 +316,6 @@ struct dmar_rmrr_unit {
        u64     end_address;            /* reserved end address */
        struct dmar_dev_scope *devices; /* target devices */
        int     devices_cnt;            /* target device count */
-       struct iommu_resv_region *resv; /* reserved region handle */
 };
 
 struct dmar_atsr_unit {
@@ -342,6 +343,9 @@ static void domain_context_clear(struct intel_iommu *iommu,
                                 struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
+static bool device_is_rmrr_locked(struct device *dev);
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -349,6 +353,7 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_sm;
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -356,21 +361,17 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
-static int intel_iommu_sm;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
-#define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) &&                 \
-                                ecap_pasid((iommu)->ecap))
-
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
@@ -535,22 +536,11 @@ static inline void free_devinfo_mem(void *vaddr)
        kmem_cache_free(iommu_devinfo_cache, vaddr);
 }
 
-static inline int domain_type_is_vm(struct dmar_domain *domain)
-{
-       return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
-}
-
 static inline int domain_type_is_si(struct dmar_domain *domain)
 {
        return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
 }
 
-static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
-{
-       return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
-                               DOMAIN_FLAG_STATIC_IDENTITY);
-}
-
 static inline int domain_pfn_supported(struct dmar_domain *domain,
                                       unsigned long pfn)
 {
@@ -598,7 +588,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
        int iommu_id;
 
        /* si_domain and vm domain should not get here. */
-       BUG_ON(domain_type_is_vm_or_si(domain));
+       if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+               return NULL;
+
        for_each_domain_iommu(iommu_id, domain)
                break;
 
@@ -729,12 +721,39 @@ static int iommu_dummy(struct device *dev)
        return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
 }
 
+/**
+ * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+ *                              sub-hierarchy of a candidate PCI-PCI bridge
+ * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
+ * @bridge: the candidate PCI-PCI bridge
+ *
+ * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
+ */
+static bool
+is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
+{
+       struct pci_dev *pdev, *pbridge;
+
+       if (!dev_is_pci(dev) || !dev_is_pci(bridge))
+               return false;
+
+       pdev = to_pci_dev(dev);
+       pbridge = to_pci_dev(bridge);
+
+       if (pbridge->subordinate &&
+           pbridge->subordinate->number <= pdev->bus->number &&
+           pbridge->subordinate->busn_res.end >= pdev->bus->number)
+               return true;
+
+       return false;
+}
+
 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
        struct dmar_drhd_unit *drhd = NULL;
        struct intel_iommu *iommu;
        struct device *tmp;
-       struct pci_dev *ptmp, *pdev = NULL;
+       struct pci_dev *pdev = NULL;
        u16 segment = 0;
        int i;
 
@@ -780,13 +799,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
                                goto out;
                        }
 
-                       if (!pdev || !dev_is_pci(tmp))
-                               continue;
-
-                       ptmp = to_pci_dev(tmp);
-                       if (ptmp->subordinate &&
-                           ptmp->subordinate->number <= pdev->bus->number &&
-                           ptmp->subordinate->busn_res.end >= pdev->bus->number)
+                       if (is_downstream_to_pci_bridge(dev, tmp))
                                goto got_pdev;
                }
 
@@ -908,7 +921,6 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
        return pte;
 }
 
-
 /* return address's pte at specific level */
 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
                                         unsigned long pfn,
@@ -1577,7 +1589,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
        raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
 }
 
-
 static int iommu_init_domains(struct intel_iommu *iommu)
 {
        u32 ndomains, nlongs;
@@ -1615,8 +1626,6 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                return -ENOMEM;
        }
 
-
-
        /*
         * If Caching mode is set, then invalid translations are tagged
         * with domain-id 0, hence we need to pre-allocate it. We also
@@ -1646,32 +1655,15 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
        if (!iommu->domains || !iommu->domain_ids)
                return;
 
-again:
        spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
-               struct dmar_domain *domain;
-
                if (info->iommu != iommu)
                        continue;
 
                if (!info->dev || !info->domain)
                        continue;
 
-               domain = info->domain;
-
                __dmar_remove_one_dev_info(info);
-
-               if (!domain_type_is_vm_or_si(domain)) {
-                       /*
-                        * The domain_exit() function  can't be called under
-                        * device_domain_lock, as it takes this lock itself.
-                        * So release the lock here and re-run the loop
-                        * afterwards.
-                        */
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       domain_exit(domain);
-                       goto again;
-               }
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
@@ -1841,71 +1833,12 @@ static inline int guestwidth_to_adjustwidth(int gaw)
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
-                      int guest_width)
-{
-       int adjust_width, agaw;
-       unsigned long sagaw;
-       int err;
-
-       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
-       err = init_iova_flush_queue(&domain->iovad,
-                                   iommu_flush_iova, iova_entry_free);
-       if (err)
-               return err;
-
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       if (guest_width > cap_mgaw(iommu->cap))
-               guest_width = cap_mgaw(iommu->cap);
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       agaw = width_to_agaw(adjust_width);
-       sagaw = cap_sagaw(iommu->cap);
-       if (!test_bit(agaw, &sagaw)) {
-               /* hardware doesn't support it, choose a bigger one */
-               pr_debug("Hardware doesn't support agaw %d\n", agaw);
-               agaw = find_next_bit(&sagaw, 5, agaw);
-               if (agaw >= 5)
-                       return -ENODEV;
-       }
-       domain->agaw = agaw;
-
-       if (ecap_coherent(iommu->ecap))
-               domain->iommu_coherency = 1;
-       else
-               domain->iommu_coherency = 0;
-
-       if (ecap_sc_support(iommu->ecap))
-               domain->iommu_snooping = 1;
-       else
-               domain->iommu_snooping = 0;
-
-       if (intel_iommu_superpage)
-               domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
-       else
-               domain->iommu_superpage = 0;
-
-       domain->nid = iommu->node;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
 static void domain_exit(struct dmar_domain *domain)
 {
        struct page *freelist;
 
        /* Remove associated devices and clear attached or cached domains */
-       rcu_read_lock();
        domain_remove_dev_info(domain);
-       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
@@ -2336,7 +2269,7 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                          struct scatterlist *sg, unsigned long phys_pfn,
                          unsigned long nr_pages, int prot)
 {
-       int ret;
+       int iommu_id, ret;
        struct intel_iommu *iommu;
 
        /* Do the real mapping first */
@@ -2344,18 +2277,8 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        if (ret)
                return ret;
 
-       /* Notify about the new mapping */
-       if (domain_type_is_vm(domain)) {
-               /* VM typed domains can have more than one IOMMUs */
-               int iommu_id;
-
-               for_each_domain_iommu(iommu_id, domain) {
-                       iommu = g_iommus[iommu_id];
-                       __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
-               }
-       } else {
-               /* General domains only have one IOMMU */
-               iommu = domain_get_iommu(domain);
+       for_each_domain_iommu(iommu_id, domain) {
+               iommu = g_iommus[iommu_id];
                __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
        }
 
@@ -2435,8 +2358,18 @@ static struct dmar_domain *find_domain(struct device *dev)
 {
        struct device_domain_info *info;
 
+       if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
+               struct iommu_domain *domain;
+
+               dev->archdata.iommu = NULL;
+               domain = iommu_get_domain_for_dev(dev);
+               if (domain)
+                       intel_iommu_attach_device(domain, dev);
+       }
+
        /* No lock here, assumes no domain exit in normal case */
        info = dev->archdata.iommu;
+
        if (likely(info))
                return info->domain;
        return NULL;
@@ -2580,6 +2513,31 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
        return 0;
 }
 
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       domain->iommu_coherency = 0;
+       domain->iommu_snooping = 0;
+       domain->iommu_superpage = 0;
+       domain->max_addr = 0;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+       return 0;
+}
+
 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
 {
        struct device_domain_info *info;
@@ -2617,13 +2575,20 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
        domain = alloc_domain(0);
        if (!domain)
                return NULL;
-       if (domain_init(domain, iommu, gaw)) {
+
+       if (domain_init(domain, gaw)) {
                domain_exit(domain);
                return NULL;
        }
 
-out:
+       if (init_iova_flush_queue(&domain->iovad,
+                                 iommu_flush_iova,
+                                 iova_entry_free)) {
+               pr_warn("iova flush queue initialization failed\n");
+               intel_iommu_strict = 1;
+       }
 
+out:
        return domain;
 }
 
@@ -2663,29 +2628,6 @@ static struct dmar_domain *set_domain_for_dev(struct device *dev,
        return domain;
 }
 
-static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
-{
-       struct dmar_domain *domain, *tmp;
-
-       domain = find_domain(dev);
-       if (domain)
-               goto out;
-
-       domain = find_or_alloc_domain(dev, gaw);
-       if (!domain)
-               goto out;
-
-       tmp = set_domain_for_dev(dev, domain);
-       if (!tmp || domain != tmp) {
-               domain_exit(domain);
-               domain = tmp;
-       }
-
-out:
-
-       return domain;
-}
-
 static int iommu_domain_identity_map(struct dmar_domain *domain,
                                     unsigned long long start,
                                     unsigned long long end)
@@ -2750,75 +2692,21 @@ static int domain_prepare_identity_map(struct device *dev,
        return iommu_domain_identity_map(domain, start, end);
 }
 
-static int iommu_prepare_identity_map(struct device *dev,
-                                     unsigned long long start,
-                                     unsigned long long end)
-{
-       struct dmar_domain *domain;
-       int ret;
-
-       domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain)
-               return -ENOMEM;
-
-       ret = domain_prepare_identity_map(dev, domain, start, end);
-       if (ret)
-               domain_exit(domain);
-
-       return ret;
-}
-
-static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
-                                        struct device *dev)
-{
-       if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
-               return 0;
-       return iommu_prepare_identity_map(dev, rmrr->base_address,
-                                         rmrr->end_address);
-}
-
-#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
-{
-       struct pci_dev *pdev;
-       int ret;
-
-       pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-       if (!pdev)
-               return;
-
-       pr_info("Prepare 0-16MiB unity mapping for LPC\n");
-       ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
-
-       if (ret)
-               pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
-
-       pci_dev_put(pdev);
-}
-#else
-static inline void iommu_prepare_isa(void)
-{
-       return;
-}
-#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
 static int __init si_domain_init(int hw)
 {
-       int nid, ret;
+       struct dmar_rmrr_unit *rmrr;
+       struct device *dev;
+       int i, nid, ret;
 
        si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
        if (!si_domain)
                return -EFAULT;
 
-       if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                domain_exit(si_domain);
                return -EFAULT;
        }
 
-       pr_debug("Identity mapping domain allocated\n");
-
        if (hw)
                return 0;
 
@@ -2834,6 +2722,31 @@ static int __init si_domain_init(int hw)
                }
        }
 
+       /*
+        * Normally we use DMA domains for devices which have RMRRs. But we
+        * loose this requirement for graphic and usb devices. Identity map
+        * the RMRRs for graphic and USB devices so that they could use the
+        * si_domain.
+        */
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, dev) {
+                       unsigned long long start = rmrr->base_address;
+                       unsigned long long end = rmrr->end_address;
+
+                       if (device_is_rmrr_locked(dev))
+                               continue;
+
+                       if (WARN_ON(end < start ||
+                                   end >> agaw_to_width(si_domain->agaw)))
+                               continue;
+
+                       ret = iommu_domain_identity_map(si_domain, start, end);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        return 0;
 }
 
@@ -2841,9 +2754,6 @@ static int identity_mapping(struct device *dev)
 {
        struct device_domain_info *info;
 
-       if (likely(!iommu_identity_mapping))
-               return 0;
-
        info = dev->archdata.iommu;
        if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
@@ -2882,7 +2792,8 @@ static bool device_has_rmrr(struct device *dev)
                 */
                for_each_active_dev_scope(rmrr->devices,
                                          rmrr->devices_cnt, i, tmp)
-                       if (tmp == dev) {
+                       if (tmp == dev ||
+                           is_downstream_to_pci_bridge(dev, tmp)) {
                                rcu_read_unlock();
                                return true;
                        }
@@ -2891,6 +2802,35 @@ static bool device_has_rmrr(struct device *dev)
        return false;
 }
 
+/**
+ * device_rmrr_is_relaxable - Test whether the RMRR of this device
+ * is relaxable (ie. is allowed to be not enforced under some conditions)
+ * @dev: device handle
+ *
+ * We assume that PCI USB devices with RMRRs have them largely
+ * for historical reasons and that the RMRR space is not actively used post
+ * boot.  This exclusion may change if vendors begin to abuse it.
+ *
+ * The same exception is made for graphics devices, with the requirement that
+ * any use of the RMRR regions will be torn down before assigning the device
+ * to a guest.
+ *
+ * Return: true if the RMRR is relaxable, false otherwise
+ */
+static bool device_rmrr_is_relaxable(struct device *dev)
+{
+       struct pci_dev *pdev;
+
+       if (!dev_is_pci(dev))
+               return false;
+
+       pdev = to_pci_dev(dev);
+       if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
+               return true;
+       else
+               return false;
+}
+
 /*
  * There are a couple cases where we need to restrict the functionality of
  * devices associated with RMRRs.  The first is when evaluating a device for
@@ -2905,52 +2845,51 @@ static bool device_has_rmrr(struct device *dev)
  * We therefore prevent devices associated with an RMRR from participating in
  * the IOMMU API, which eliminates them from device assignment.
  *
- * In both cases we assume that PCI USB devices with RMRRs have them largely
- * for historical reasons and that the RMRR space is not actively used post
- * boot.  This exclusion may change if vendors begin to abuse it.
- *
- * The same exception is made for graphics devices, with the requirement that
- * any use of the RMRR regions will be torn down before assigning the device
- * to a guest.
+ * In both cases, devices which have relaxable RMRRs are not concerned by this
+ * restriction. See device_rmrr_is_relaxable comment.
  */
 static bool device_is_rmrr_locked(struct device *dev)
 {
        if (!device_has_rmrr(dev))
                return false;
 
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
-                       return false;
-       }
+       if (device_rmrr_is_relaxable(dev))
+               return false;
 
        return true;
 }
 
-static int iommu_should_identity_map(struct device *dev, int startup)
+/*
+ * Return the required default domain type for a specific device.
+ *
+ * @dev: the device in query
+ * @startup: true if this is during early boot
+ *
+ * Returns:
+ *  - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
+ *  - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
+ *  - 0: both identity and dynamic domains work for this device
+ */
+static int device_def_domain_type(struct device *dev)
 {
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
                if (device_is_rmrr_locked(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                /*
                 * Prevent any device marked as untrusted from getting
                 * placed into the statically identity mapping domain.
                 */
                if (pdev->untrusted)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
-                       return 1;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
-                       return 1;
-
-               if (!(iommu_identity_mapping & IDENTMAP_ALL))
-                       return 0;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                /*
                 * We want to start off with all devices in the 1:1 domain, and
@@ -2971,94 +2910,18 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                 */
                if (!pci_is_pcie(pdev)) {
                        if (!pci_is_root_bus(pdev->bus))
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                        if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        } else {
                if (device_has_rmrr(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        }
 
-       /*
-        * At boot time, we don't yet know if devices will be 64-bit capable.
-        * Assume that they will â€” if they turn out not to be, then we can
-        * take them out of the 1:1 domain later.
-        */
-       if (!startup) {
-               /*
-                * If the device's dma_mask is less than the system's memory
-                * size then this is not a candidate for identity mapping.
-                */
-               u64 dma_mask = *dev->dma_mask;
-
-               if (dev->coherent_dma_mask &&
-                   dev->coherent_dma_mask < dma_mask)
-                       dma_mask = dev->coherent_dma_mask;
-
-               return dma_mask >= dma_get_required_mask(dev);
-       }
-
-       return 1;
-}
-
-static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
-{
-       int ret;
-
-       if (!iommu_should_identity_map(dev, 1))
-               return 0;
-
-       ret = domain_add_dev_info(si_domain, dev);
-       if (!ret)
-               dev_info(dev, "%s identity mapping\n",
-                        hw ? "Hardware" : "Software");
-       else if (ret == -ENODEV)
-               /* device not associated with an iommu */
-               ret = 0;
-
-       return ret;
-}
-
-
-static int __init iommu_prepare_static_identity_mapping(int hw)
-{
-       struct pci_dev *pdev = NULL;
-       struct dmar_drhd_unit *drhd;
-       /* To avoid a -Wunused-but-set-variable warning. */
-       struct intel_iommu *iommu __maybe_unused;
-       struct device *dev;
-       int i;
-       int ret = 0;
-
-       for_each_pci_dev(pdev) {
-               ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
-               if (ret)
-                       return ret;
-       }
-
-       for_each_active_iommu(iommu, drhd)
-               for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
-                       struct acpi_device_physical_node *pn;
-                       struct acpi_device *adev;
-
-                       if (dev->bus != &acpi_bus_type)
-                               continue;
-
-                       adev= to_acpi_device(dev);
-                       mutex_lock(&adev->physical_node_lock);
-                       list_for_each_entry(pn, &adev->physical_node_list, node) {
-                               ret = dev_prepare_static_identity_mapping(pn->dev, hw);
-                               if (ret)
-                                       break;
-                       }
-                       mutex_unlock(&adev->physical_node_lock);
-                       if (ret)
-                               return ret;
-               }
-
-       return 0;
+       return (iommu_identity_mapping & IDENTMAP_ALL) ?
+                       IOMMU_DOMAIN_IDENTITY : 0;
 }
 
 static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3283,11 +3146,8 @@ out_unmap:
 static int __init init_dmars(void)
 {
        struct dmar_drhd_unit *drhd;
-       struct dmar_rmrr_unit *rmrr;
-       bool copied_tables = false;
-       struct device *dev;
        struct intel_iommu *iommu;
-       int i, ret;
+       int ret;
 
        /*
         * for each drhd
@@ -3320,7 +3180,12 @@ static int __init init_dmars(void)
                goto error;
        }
 
-       for_each_active_iommu(iommu, drhd) {
+       for_each_iommu(iommu, drhd) {
+               if (drhd->ignored) {
+                       iommu_disable_translation(iommu);
+                       continue;
+               }
+
                /*
                 * Find the max pasid size of all IOMMU's in the system.
                 * We need to ensure the system pasid table is no bigger
@@ -3380,7 +3245,6 @@ static int __init init_dmars(void)
                        } else {
                                pr_info("Copied translation tables from previous kernel for %s\n",
                                        iommu->name);
-                               copied_tables = true;
                        }
                }
 
@@ -3416,62 +3280,9 @@ static int __init init_dmars(void)
 
        check_tylersburg_isoch();
 
-       if (iommu_identity_mapping) {
-               ret = si_domain_init(hw_pass_through);
-               if (ret)
-                       goto free_iommu;
-       }
-
-
-       /*
-        * If we copied translations from a previous kernel in the kdump
-        * case, we can not assign the devices to domains now, as that
-        * would eliminate the old mappings. So skip this part and defer
-        * the assignment to device driver initialization time.
-        */
-       if (copied_tables)
-               goto domains_done;
-
-       /*
-        * If pass through is not set or not enabled, setup context entries for
-        * identity mappings for rmrr, gfx, and isa and may fall back to static
-        * identity mapping if iommu_identity_mapping is set.
-        */
-       if (iommu_identity_mapping) {
-               ret = iommu_prepare_static_identity_mapping(hw_pass_through);
-               if (ret) {
-                       pr_crit("Failed to setup IOMMU pass-through\n");
-                       goto free_iommu;
-               }
-       }
-       /*
-        * For each rmrr
-        *   for each dev attached to rmrr
-        *   do
-        *     locate drhd for dev, alloc domain for dev
-        *     allocate free domain
-        *     allocate page table entries for rmrr
-        *     if context not allocated for bus
-        *           allocate and init context
-        *           set present in root table for this bus
-        *     init context with domain, translation etc
-        *    endfor
-        * endfor
-        */
-       pr_info("Setting RMRR:\n");
-       for_each_rmrr_units(rmrr) {
-               /* some BIOS lists non-exist devices in DMAR table. */
-               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
-                                         i, dev) {
-                       ret = iommu_prepare_rmrr_dev(rmrr, dev);
-                       if (ret)
-                               pr_err("Mapping reserved region failed\n");
-               }
-       }
-
-       iommu_prepare_isa();
-
-domains_done:
+       ret = si_domain_init(hw_pass_through);
+       if (ret)
+               goto free_iommu;
 
        /*
         * for each drhd
@@ -3509,11 +3320,6 @@ domains_done:
                ret = dmar_set_interrupt(iommu);
                if (ret)
                        goto free_iommu;
-
-               if (!translation_pre_enabled(iommu))
-                       iommu_enable_translation(iommu);
-
-               iommu_disable_protect_mem_regions(iommu);
        }
 
        return 0;
@@ -3563,16 +3369,17 @@ static unsigned long intel_alloc_iova(struct device *dev,
        return iova_pfn;
 }
 
-struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
+static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
 {
        struct dmar_domain *domain, *tmp;
        struct dmar_rmrr_unit *rmrr;
        struct device *i_dev;
        int i, ret;
 
+       /* Device shouldn't be attached by any domains. */
        domain = find_domain(dev);
        if (domain)
-               goto out;
+               return NULL;
 
        domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain)
@@ -3602,10 +3409,10 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
        }
 
 out:
-
        if (!domain)
                dev_err(dev, "Allocating domain failed\n");
-
+       else
+               domain->domain.type = IOMMU_DOMAIN_DMA;
 
        return domain;
 }
@@ -3613,17 +3420,19 @@ out:
 /* Check if the dev needs to go through non-identity map and unmap process.*/
 static bool iommu_need_mapping(struct device *dev)
 {
-       int found;
+       int ret;
 
        if (iommu_dummy(dev))
                return false;
 
-       if (!iommu_identity_mapping)
-               return true;
+       ret = identity_mapping(dev);
+       if (ret) {
+               u64 dma_mask = *dev->dma_mask;
 
-       found = identity_mapping(dev);
-       if (found) {
-               if (iommu_should_identity_map(dev, 0))
+               if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
+                       dma_mask = dev->coherent_dma_mask;
+
+               if (dma_mask >= dma_get_required_mask(dev))
                        return false;
 
                /*
@@ -3631,17 +3440,20 @@ static bool iommu_need_mapping(struct device *dev)
                 * non-identity mapping.
                 */
                dmar_remove_one_dev_info(dev);
-               dev_info(dev, "32bit DMA uses non-identity mapping\n");
-       } else {
-               /*
-                * In case of a detached 64 bit DMA device from vm, the device
-                * is put into si_domain for identity mapping.
-                */
-               if (iommu_should_identity_map(dev, 0) &&
-                   !domain_add_dev_info(si_domain, dev)) {
-                       dev_info(dev, "64bit DMA uses identity mapping\n");
-                       return false;
+               ret = iommu_request_dma_domain_for_dev(dev);
+               if (ret) {
+                       struct iommu_domain *domain;
+                       struct dmar_domain *dmar_domain;
+
+                       domain = iommu_get_domain_for_dev(dev);
+                       if (domain) {
+                               dmar_domain = to_dmar_domain(domain);
+                               dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+                       }
+                       get_private_domain_for_dev(dev);
                }
+
+               dev_info(dev, "32bit DMA uses non-identity mapping\n");
        }
 
        return true;
@@ -3660,7 +3472,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        BUG_ON(dir == DMA_NONE);
 
-       domain = get_valid_domain_for_dev(dev);
+       domain = find_domain(dev);
        if (!domain)
                return DMA_MAPPING_ERROR;
 
@@ -3875,7 +3687,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        if (!iommu_need_mapping(dev))
                return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
 
-       domain = get_valid_domain_for_dev(dev);
+       domain = find_domain(dev);
        if (!domain)
                return 0;
 
@@ -4194,13 +4006,10 @@ static void __init init_iommu_pm_ops(void)
 static inline void init_iommu_pm_ops(void) {}
 #endif /* CONFIG_PM */
 
-
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
-       int prot = DMA_PTE_READ|DMA_PTE_WRITE;
        struct dmar_rmrr_unit *rmrru;
-       size_t length;
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -4211,23 +4020,15 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
        rmrru->base_address = rmrr->base_address;
        rmrru->end_address = rmrr->end_address;
 
-       length = rmrr->end_address - rmrr->base_address + 1;
-       rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
-                                             IOMMU_RESV_DIRECT);
-       if (!rmrru->resv)
-               goto free_rmrru;
-
        rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
                                ((void *)rmrr) + rmrr->header.length,
                                &rmrru->devices_cnt);
        if (rmrru->devices_cnt && rmrru->devices == NULL)
-               goto free_all;
+               goto free_rmrru;
 
        list_add(&rmrru->list, &dmar_rmrr_units);
 
        return 0;
-free_all:
-       kfree(rmrru->resv);
 free_rmrru:
        kfree(rmrru);
 out:
@@ -4445,7 +4246,6 @@ static void intel_iommu_free_dmars(void)
        list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
                list_del(&rmrru->list);
                dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
-               kfree(rmrru->resv);
                kfree(rmrru);
        }
 
@@ -4550,42 +4350,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
        return 0;
 }
 
-/*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
-static int device_notifier(struct notifier_block *nb,
-                                 unsigned long action, void *data)
-{
-       struct device *dev = data;
-       struct dmar_domain *domain;
-
-       if (iommu_dummy(dev))
-               return 0;
-
-       if (action == BUS_NOTIFY_REMOVED_DEVICE) {
-               domain = find_domain(dev);
-               if (!domain)
-                       return 0;
-
-               dmar_remove_one_dev_info(dev);
-               if (!domain_type_is_vm_or_si(domain) &&
-                   list_empty(&domain->devices))
-                       domain_exit(domain);
-       } else if (action == BUS_NOTIFY_ADD_DEVICE) {
-               if (iommu_should_identity_map(dev, 1))
-                       domain_add_dev_info(si_domain, dev);
-       }
-
-       return 0;
-}
-
-static struct notifier_block device_nb = {
-       .notifier_call = device_notifier,
-};
-
 static int intel_iommu_memory_notifier(struct notifier_block *nb,
                                       unsigned long val, void *v)
 {
@@ -4812,6 +4576,49 @@ static int __init platform_optin_force_iommu(void)
        return 1;
 }
 
+static int __init probe_acpi_namespace_devices(void)
+{
+       struct dmar_drhd_unit *drhd;
+       /* To avoid a -Wunused-but-set-variable warning. */
+       struct intel_iommu *iommu __maybe_unused;
+       struct device *dev;
+       int i, ret = 0;
+
+       for_each_active_iommu(iommu, drhd) {
+               for_each_active_dev_scope(drhd->devices,
+                                         drhd->devices_cnt, i, dev) {
+                       struct acpi_device_physical_node *pn;
+                       struct iommu_group *group;
+                       struct acpi_device *adev;
+
+                       if (dev->bus != &acpi_bus_type)
+                               continue;
+
+                       adev = to_acpi_device(dev);
+                       mutex_lock(&adev->physical_node_lock);
+                       list_for_each_entry(pn,
+                                           &adev->physical_node_list, node) {
+                               group = iommu_group_get(pn->dev);
+                               if (group) {
+                                       iommu_group_put(group);
+                                       continue;
+                               }
+
+                               pn->dev->bus->iommu_ops = &intel_iommu_ops;
+                               ret = iommu_probe_device(pn->dev);
+                               if (ret)
+                                       break;
+                       }
+                       mutex_unlock(&adev->physical_node_lock);
+
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 int __init intel_iommu_init(void)
 {
        int ret = -ENODEV;
@@ -4901,7 +4708,6 @@ int __init intel_iommu_init(void)
                goto out_free_reserved_range;
        }
        up_write(&dmar_global_lock);
-       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
        swiotlb = 0;
@@ -4919,11 +4725,25 @@ int __init intel_iommu_init(void)
        }
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
-       bus_register_notifier(&pci_bus_type, &device_nb);
        if (si_domain && !hw_pass_through)
                register_memory_notifier(&intel_iommu_memory_nb);
        cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
                          intel_iommu_cpu_dead);
+
+       down_read(&dmar_global_lock);
+       if (probe_acpi_namespace_devices())
+               pr_warn("ACPI name space devices didn't probe correctly\n");
+       up_read(&dmar_global_lock);
+
+       /* Finally, we enable the DMA remapping hardware. */
+       for_each_iommu(iommu, drhd) {
+               if (!drhd->ignored && !translation_pre_enabled(iommu))
+                       iommu_enable_translation(iommu);
+
+               iommu_disable_protect_mem_regions(iommu);
+       }
+       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+
        intel_iommu_enabled = 1;
        intel_iommu_debugfs_init();
 
@@ -4962,6 +4782,7 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
 
 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
+       struct dmar_domain *domain;
        struct intel_iommu *iommu;
        unsigned long flags;
 
@@ -4971,6 +4792,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
                return;
 
        iommu = info->iommu;
+       domain = info->domain;
 
        if (info->dev) {
                if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4985,9 +4807,14 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
        unlink_domain_info(info);
 
        spin_lock_irqsave(&iommu->lock, flags);
-       domain_detach_iommu(info->domain, iommu);
+       domain_detach_iommu(domain, iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
+       /* free the private domain */
+       if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
+           !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+               domain_exit(info->domain);
+
        free_devinfo_mem(info);
 }
 
@@ -5002,62 +4829,55 @@ static void dmar_remove_one_dev_info(struct device *dev)
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
-       int adjust_width;
-
-       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       domain->agaw = width_to_agaw(adjust_width);
-
-       domain->iommu_coherency = 0;
-       domain->iommu_snooping = 0;
-       domain->iommu_superpage = 0;
-       domain->max_addr = 0;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
+       switch (type) {
+       case IOMMU_DOMAIN_DMA:
+       /* fallthrough */
+       case IOMMU_DOMAIN_UNMANAGED:
+               dmar_domain = alloc_domain(0);
+               if (!dmar_domain) {
+                       pr_err("Can't allocate dmar_domain\n");
+                       return NULL;
+               }
+               if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+                       pr_err("Domain initialization failed\n");
+                       domain_exit(dmar_domain);
+                       return NULL;
+               }
 
-       dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
-       if (!dmar_domain) {
-               pr_err("Can't allocate dmar_domain\n");
-               return NULL;
-       }
-       if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               pr_err("Domain initialization failed\n");
-               domain_exit(dmar_domain);
+               if (type == IOMMU_DOMAIN_DMA &&
+                   init_iova_flush_queue(&dmar_domain->iovad,
+                                         iommu_flush_iova, iova_entry_free)) {
+                       pr_warn("iova flush queue initialization failed\n");
+                       intel_iommu_strict = 1;
+               }
+
+               domain_update_iommu_cap(dmar_domain);
+
+               domain = &dmar_domain->domain;
+               domain->geometry.aperture_start = 0;
+               domain->geometry.aperture_end   =
+                               __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+               domain->geometry.force_aperture = true;
+
+               return domain;
+       case IOMMU_DOMAIN_IDENTITY:
+               return &si_domain->domain;
+       default:
                return NULL;
        }
-       domain_update_iommu_cap(dmar_domain);
-
-       domain = &dmar_domain->domain;
-       domain->geometry.aperture_start = 0;
-       domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
-       domain->geometry.force_aperture = true;
 
-       return domain;
+       return NULL;
 }
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
-       domain_exit(to_dmar_domain(domain));
+       if (domain != &si_domain->domain)
+               domain_exit(to_dmar_domain(domain));
 }
 
 /*
@@ -5233,7 +5053,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 {
        int ret;
 
-       if (device_is_rmrr_locked(dev)) {
+       if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
+           device_is_rmrr_locked(dev)) {
                dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
                return -EPERM;
        }
@@ -5246,15 +5067,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                struct dmar_domain *old_domain;
 
                old_domain = find_domain(dev);
-               if (old_domain) {
-                       rcu_read_lock();
+               if (old_domain)
                        dmar_remove_one_dev_info(dev);
-                       rcu_read_unlock();
-
-                       if (!domain_type_is_vm_or_si(old_domain) &&
-                           list_empty(&old_domain->devices))
-                               domain_exit(old_domain);
-               }
        }
 
        ret = prepare_domain_attach_device(domain, dev);
@@ -5300,6 +5114,9 @@ static int intel_iommu_map(struct iommu_domain *domain,
        int prot = 0;
        int ret;
 
+       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+               return -EINVAL;
+
        if (iommu_prot & IOMMU_READ)
                prot |= DMA_PTE_READ;
        if (iommu_prot & IOMMU_WRITE)
@@ -5341,6 +5158,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
        /* Cope with horrid API which requires us to unmap more than the
           size argument if it happens to be a large-page mapping. */
        BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+               return 0;
 
        if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -5372,6 +5191,9 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        int level = 0;
        u64 phys = 0;
 
+       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+               return 0;
+
        pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
        if (pte)
                phys = dma_pte_addr(pte);
@@ -5427,9 +5249,12 @@ static bool intel_iommu_capable(enum iommu_cap cap)
 
 static int intel_iommu_add_device(struct device *dev)
 {
+       struct dmar_domain *dmar_domain;
+       struct iommu_domain *domain;
        struct intel_iommu *iommu;
        struct iommu_group *group;
        u8 bus, devfn;
+       int ret;
 
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
@@ -5437,12 +5262,45 @@ static int intel_iommu_add_device(struct device *dev)
 
        iommu_device_link(&iommu->iommu, dev);
 
+       if (translation_pre_enabled(iommu))
+               dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+
        group = iommu_group_get_for_dev(dev);
 
        if (IS_ERR(group))
                return PTR_ERR(group);
 
        iommu_group_put(group);
+
+       domain = iommu_get_domain_for_dev(dev);
+       dmar_domain = to_dmar_domain(domain);
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
+                       ret = iommu_request_dm_for_dev(dev);
+                       if (ret) {
+                               dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+                               domain_add_dev_info(si_domain, dev);
+                               dev_info(dev,
+                                        "Device uses a private identity domain.\n");
+                       }
+               }
+       } else {
+               if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
+                       ret = iommu_request_dma_domain_for_dev(dev);
+                       if (ret) {
+                               dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+                               if (!get_private_domain_for_dev(dev)) {
+                                       dev_warn(dev,
+                                                "Failed to get a private domain.\n");
+                                       return -ENOMEM;
+                               }
+
+                               dev_info(dev,
+                                        "Device uses a private dma domain.\n");
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -5463,22 +5321,51 @@ static void intel_iommu_remove_device(struct device *dev)
 static void intel_iommu_get_resv_regions(struct device *device,
                                         struct list_head *head)
 {
+       int prot = DMA_PTE_READ | DMA_PTE_WRITE;
        struct iommu_resv_region *reg;
        struct dmar_rmrr_unit *rmrr;
        struct device *i_dev;
        int i;
 
-       rcu_read_lock();
+       down_read(&dmar_global_lock);
        for_each_rmrr_units(rmrr) {
                for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
                                          i, i_dev) {
-                       if (i_dev != device)
+                       struct iommu_resv_region *resv;
+                       enum iommu_resv_type type;
+                       size_t length;
+
+                       if (i_dev != device &&
+                           !is_downstream_to_pci_bridge(device, i_dev))
                                continue;
 
-                       list_add_tail(&rmrr->resv->list, head);
+                       length = rmrr->end_address - rmrr->base_address + 1;
+
+                       type = device_rmrr_is_relaxable(device) ?
+                               IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
+
+                       resv = iommu_alloc_resv_region(rmrr->base_address,
+                                                      length, prot, type);
+                       if (!resv)
+                               break;
+
+                       list_add_tail(&resv->list, head);
                }
        }
-       rcu_read_unlock();
+       up_read(&dmar_global_lock);
+
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
+       if (dev_is_pci(device)) {
+               struct pci_dev *pdev = to_pci_dev(device);
+
+               if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+                       reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+                                                     IOMMU_RESV_DIRECT);
+                       if (reg)
+                               list_add_tail(&reg->list, head);
+               }
+       }
+#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
 
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
@@ -5493,10 +5380,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
 {
        struct iommu_resv_region *entry, *next;
 
-       list_for_each_entry_safe(entry, next, head, list) {
-               if (entry->type == IOMMU_RESV_MSI)
-                       kfree(entry);
-       }
+       list_for_each_entry_safe(entry, next, head, list)
+               kfree(entry);
 }
 
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
@@ -5508,7 +5393,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
        u64 ctx_lo;
        int ret;
 
-       domain = get_valid_domain_for_dev(dev);
+       domain = find_domain(dev);
        if (!domain)
                return -EINVAL;
 
@@ -5550,6 +5435,19 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
        return ret;
 }
 
+static void intel_iommu_apply_resv_region(struct device *dev,
+                                         struct iommu_domain *domain,
+                                         struct iommu_resv_region *region)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long start, end;
+
+       start = IOVA_PFN(region->start);
+       end   = IOVA_PFN(region->start + region->length - 1);
+
+       WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+}
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 {
@@ -5699,6 +5597,12 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
                        dmar_domain->default_pasid : -EINVAL;
 }
 
+static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+                                          struct device *dev)
+{
+       return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
@@ -5715,11 +5619,13 @@ const struct iommu_ops intel_iommu_ops = {
        .remove_device          = intel_iommu_remove_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = intel_iommu_put_resv_regions,
+       .apply_resv_region      = intel_iommu_apply_resv_region,
        .device_group           = pci_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
        .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
        .dev_enable_feat        = intel_iommu_dev_enable_feat,
        .dev_disable_feat       = intel_iommu_dev_disable_feat,
+       .is_attach_deferred     = intel_iommu_is_attach_deferred,
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
index fe51d8a..040a445 100644 (file)
@@ -169,23 +169,6 @@ attach_out:
        return 0;
 }
 
-/* Get PRESENT bit of a PASID directory entry. */
-static inline bool
-pasid_pde_is_present(struct pasid_dir_entry *pde)
-{
-       return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
-}
-
-/* Get PASID table from a PASID directory entry. */
-static inline struct pasid_entry *
-get_pasid_table_from_pde(struct pasid_dir_entry *pde)
-{
-       if (!pasid_pde_is_present(pde))
-               return NULL;
-
-       return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
-}
-
 void intel_pasid_free_table(struct device *dev)
 {
        struct device_domain_info *info;
index 23537b3..fc8cd8f 100644 (file)
 #define PDE_PFN_MASK                   PAGE_MASK
 #define PASID_PDE_SHIFT                        6
 #define MAX_NR_PASID_BITS              20
+#define PASID_TBL_ENTRIES              BIT(PASID_PDE_SHIFT)
+
+#define is_pasid_enabled(entry)                (((entry)->lo >> 3) & 0x1)
+#define get_pasid_dir_size(entry)      (1 << ((((entry)->lo >> 9) & 0x7) + 7))
 
 /*
  * Domain ID reserved for pasid entries programmed for first-level
@@ -49,6 +53,28 @@ struct pasid_table {
        struct list_head        dev;            /* device list */
 };
 
+/* Get PRESENT bit of a PASID directory entry. */
+static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
+{
+       return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
+}
+
+/* Get PASID table from a PASID directory entry. */
+static inline struct pasid_entry *
+get_pasid_table_from_pde(struct pasid_dir_entry *pde)
+{
+       if (!pasid_pde_is_present(pde))
+               return NULL;
+
+       return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
+}
+
+/* Get PRESENT bit of a PASID table entry. */
+static inline bool pasid_pte_is_present(struct pasid_entry *pte)
+{
+       return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
+}
+
 extern u32 intel_pasid_max_id;
 int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
 void intel_pasid_free_id(int pasid);
index eceaa7e..780de0c 100644 (file)
@@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                }
 
                list_add_tail(&svm->list, &global_svm_list);
+       } else {
+               /*
+                * Binding a new device with existing PASID, need to setup
+                * the PASID entry.
+                */
+               spin_lock(&iommu->lock);
+               ret = intel_pasid_setup_first_level(iommu, dev,
+                                               mm ? mm->pgd : init_mm.pgd,
+                                               svm->pasid, FLPT_DEFAULT_DID,
+                                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+               spin_unlock(&iommu->lock);
+               if (ret) {
+                       kfree(sdev);
+                       goto out;
+               }
        }
        list_add_rcu(&sdev->list, &svm->devs);
 
index 4160aa9..4786ca0 100644 (file)
@@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu)
                iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
 }
 
-static int alloc_irte(struct intel_iommu *iommu, int irq,
+static int alloc_irte(struct intel_iommu *iommu,
                      struct irq_2_iommu *irq_iommu, u16 count)
 {
        struct ir_table *table = iommu->ir_table;
@@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
                goto out_free_parent;
 
        down_read(&dmar_global_lock);
-       index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
+       index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
        up_read(&dmar_global_lock);
        if (index < 0) {
                pr_warn("Failed to allocate IRTE\n");
index 9f0a284..0c674d8 100644 (file)
@@ -61,10 +61,11 @@ struct iommu_group_attribute {
 };
 
 static const char * const iommu_group_resv_type_string[] = {
-       [IOMMU_RESV_DIRECT]     = "direct",
-       [IOMMU_RESV_RESERVED]   = "reserved",
-       [IOMMU_RESV_MSI]        = "msi",
-       [IOMMU_RESV_SW_MSI]     = "msi",
+       [IOMMU_RESV_DIRECT]                     = "direct",
+       [IOMMU_RESV_DIRECT_RELAXABLE]           = "direct-relaxable",
+       [IOMMU_RESV_RESERVED]                   = "reserved",
+       [IOMMU_RESV_MSI]                        = "msi",
+       [IOMMU_RESV_SW_MSI]                     = "msi",
 };
 
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
@@ -95,15 +96,43 @@ void iommu_device_unregister(struct iommu_device *iommu)
        spin_unlock(&iommu_device_lock);
 }
 
+static struct iommu_param *iommu_get_dev_param(struct device *dev)
+{
+       struct iommu_param *param = dev->iommu_param;
+
+       if (param)
+               return param;
+
+       param = kzalloc(sizeof(*param), GFP_KERNEL);
+       if (!param)
+               return NULL;
+
+       mutex_init(&param->lock);
+       dev->iommu_param = param;
+       return param;
+}
+
+static void iommu_free_dev_param(struct device *dev)
+{
+       kfree(dev->iommu_param);
+       dev->iommu_param = NULL;
+}
+
 int iommu_probe_device(struct device *dev)
 {
        const struct iommu_ops *ops = dev->bus->iommu_ops;
-       int ret = -EINVAL;
+       int ret;
 
        WARN_ON(dev->iommu_group);
+       if (!ops)
+               return -EINVAL;
 
-       if (ops)
-               ret = ops->add_device(dev);
+       if (!iommu_get_dev_param(dev))
+               return -ENOMEM;
+
+       ret = ops->add_device(dev);
+       if (ret)
+               iommu_free_dev_param(dev);
 
        return ret;
 }
@@ -114,6 +143,8 @@ void iommu_release_device(struct device *dev)
 
        if (dev->iommu_group)
                ops->remove_device(dev);
+
+       iommu_free_dev_param(dev);
 }
 
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -225,18 +256,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
                        pos = pos->next;
                } else if ((start >= a) && (end <= b)) {
                        if (new->type == type)
-                               goto done;
+                               return 0;
                        else
                                pos = pos->next;
                } else {
                        if (new->type == type) {
                                phys_addr_t new_start = min(a, start);
                                phys_addr_t new_end = max(b, end);
+                               int ret;
 
                                list_del(&entry->list);
                                entry->start = new_start;
                                entry->length = new_end - new_start + 1;
-                               iommu_insert_resv_region(entry, regions);
+                               ret = iommu_insert_resv_region(entry, regions);
+                               kfree(entry);
+                               return ret;
                        } else {
                                pos = pos->next;
                        }
@@ -249,7 +283,6 @@ insert:
                return -ENOMEM;
 
        list_add_tail(&region->list, pos);
-done:
        return 0;
 }
 
@@ -561,7 +594,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
                start = ALIGN(entry->start, pg_size);
                end   = ALIGN(entry->start + entry->length, pg_size);
 
-               if (entry->type != IOMMU_RESV_DIRECT)
+               if (entry->type != IOMMU_RESV_DIRECT &&
+                   entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
                        continue;
 
                for (addr = start; addr < end; addr += pg_size) {
@@ -843,6 +877,206 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 
 /**
+ * iommu_register_device_fault_handler() - Register a device fault handler
+ * @dev: the device
+ * @handler: the fault handler
+ * @data: private data passed as argument to the handler
+ *
+ * When an IOMMU fault event is received, this handler gets called with the
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ *   page faults if possible.
+ *
+ * Return 0 if the fault handler was installed successfully, or an error.
+ */
+int iommu_register_device_fault_handler(struct device *dev,
+                                       iommu_dev_fault_handler_t handler,
+                                       void *data)
+{
+       struct iommu_param *param = dev->iommu_param;
+       int ret = 0;
+
+       if (!param)
+               return -EINVAL;
+
+       mutex_lock(&param->lock);
+       /* Only allow one fault handler registered for each device */
+       if (param->fault_param) {
+               ret = -EBUSY;
+               goto done_unlock;
+       }
+
+       get_device(dev);
+       param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
+       if (!param->fault_param) {
+               put_device(dev);
+               ret = -ENOMEM;
+               goto done_unlock;
+       }
+       param->fault_param->handler = handler;
+       param->fault_param->data = data;
+       mutex_init(&param->fault_param->lock);
+       INIT_LIST_HEAD(&param->fault_param->faults);
+
+done_unlock:
+       mutex_unlock(&param->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
+
+/**
+ * iommu_unregister_device_fault_handler() - Unregister the device fault handler
+ * @dev: the device
+ *
+ * Remove the device fault handler installed with
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_unregister_device_fault_handler(struct device *dev)
+{
+       struct iommu_param *param = dev->iommu_param;
+       int ret = 0;
+
+       if (!param)
+               return -EINVAL;
+
+       mutex_lock(&param->lock);
+
+       if (!param->fault_param)
+               goto unlock;
+
+       /* we cannot unregister handler if there are pending faults */
+       if (!list_empty(&param->fault_param->faults)) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       kfree(param->fault_param);
+       param->fault_param = NULL;
+       put_device(dev);
+unlock:
+       mutex_unlock(&param->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
+
+/**
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
+ *
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+       struct iommu_param *param = dev->iommu_param;
+       struct iommu_fault_event *evt_pending = NULL;
+       struct iommu_fault_param *fparam;
+       int ret = 0;
+
+       if (!param || !evt)
+               return -EINVAL;
+
+       /* we only report device fault if there is a handler registered */
+       mutex_lock(&param->lock);
+       fparam = param->fault_param;
+       if (!fparam || !fparam->handler) {
+               ret = -EINVAL;
+               goto done_unlock;
+       }
+
+       if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+           (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+               evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+                                     GFP_KERNEL);
+               if (!evt_pending) {
+                       ret = -ENOMEM;
+                       goto done_unlock;
+               }
+               mutex_lock(&fparam->lock);
+               list_add_tail(&evt_pending->list, &fparam->faults);
+               mutex_unlock(&fparam->lock);
+       }
+
+       ret = fparam->handler(&evt->fault, fparam->data);
+       if (ret && evt_pending) {
+               mutex_lock(&fparam->lock);
+               list_del(&evt_pending->list);
+               mutex_unlock(&fparam->lock);
+               kfree(evt_pending);
+       }
+done_unlock:
+       mutex_unlock(&param->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+
+int iommu_page_response(struct device *dev,
+                       struct iommu_page_response *msg)
+{
+       bool pasid_valid;
+       int ret = -EINVAL;
+       struct iommu_fault_event *evt;
+       struct iommu_fault_page_request *prm;
+       struct iommu_param *param = dev->iommu_param;
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+       if (!domain || !domain->ops->page_response)
+               return -ENODEV;
+
+       if (!param || !param->fault_param)
+               return -EINVAL;
+
+       if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+           msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+               return -EINVAL;
+
+       /* Only send response if there is a fault report pending */
+       mutex_lock(&param->fault_param->lock);
+       if (list_empty(&param->fault_param->faults)) {
+               dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+               goto done_unlock;
+       }
+       /*
+        * Check if we have a matching page request pending to respond,
+        * otherwise return -EINVAL
+        */
+       list_for_each_entry(evt, &param->fault_param->faults, list) {
+               prm = &evt->fault.prm;
+               pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+               if ((pasid_valid && prm->pasid != msg->pasid) ||
+                   prm->grpid != msg->grpid)
+                       continue;
+
+               /* Sanitize the reply */
+               msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+               ret = domain->ops->page_response(dev, evt, msg);
+               list_del(&evt->list);
+               kfree(evt);
+               break;
+       }
+
+done_unlock:
+       mutex_unlock(&param->fault_param->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
+/**
  * iommu_group_id - Return ID for a group
  * @group: the group to ID
  *
@@ -1895,24 +2129,23 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
        return region;
 }
 
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
+static int
+request_default_domain_for_dev(struct device *dev, unsigned long type)
 {
-       struct iommu_domain *dm_domain;
+       struct iommu_domain *domain;
        struct iommu_group *group;
        int ret;
 
        /* Device must already be in a group before calling this function */
-       group = iommu_group_get_for_dev(dev);
-       if (IS_ERR(group))
-               return PTR_ERR(group);
+       group = iommu_group_get(dev);
+       if (!group)
+               return -EINVAL;
 
        mutex_lock(&group->mutex);
 
        /* Check if the default domain is already direct mapped */
        ret = 0;
-       if (group->default_domain &&
-           group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+       if (group->default_domain && group->default_domain->type == type)
                goto out;
 
        /* Don't change mappings of existing devices */
@@ -1922,23 +2155,26 @@ int iommu_request_dm_for_dev(struct device *dev)
 
        /* Allocate a direct mapped domain */
        ret = -ENOMEM;
-       dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
-       if (!dm_domain)
+       domain = __iommu_domain_alloc(dev->bus, type);
+       if (!domain)
                goto out;
 
        /* Attach the device to the domain */
-       ret = __iommu_attach_group(dm_domain, group);
+       ret = __iommu_attach_group(domain, group);
        if (ret) {
-               iommu_domain_free(dm_domain);
+               iommu_domain_free(domain);
                goto out;
        }
 
+       iommu_group_create_direct_mappings(group, dev);
+
        /* Make the direct mapped domain the default for this group */
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
-       group->default_domain = dm_domain;
+       group->default_domain = domain;
 
-       dev_info(dev, "Using iommu direct mapping\n");
+       dev_info(dev, "Using iommu %s mapping\n",
+                type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
 
        ret = 0;
 out:
@@ -1948,6 +2184,18 @@ out:
        return ret;
 }
 
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
+}
+
+/* Request that a device can't be direct mapped by the IOMMU */
+int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
+}
+
 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
        const struct iommu_ops *ops = NULL;
index ff31bdd..8e19bfa 100644 (file)
@@ -236,17 +236,6 @@ DEBUG_FOPS_RO(regs);
 DEFINE_SHOW_ATTRIBUTE(tlb);
 DEFINE_SHOW_ATTRIBUTE(pagetable);
 
-#define __DEBUG_ADD_FILE(attr, mode)                                   \
-       {                                                               \
-               struct dentry *dent;                                    \
-               dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
-                                          obj, &attr##_fops);          \
-               if (!dent)                                              \
-                       goto err;                                       \
-       }
-
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-
 void omap_iommu_debugfs_add(struct omap_iommu *obj)
 {
        struct dentry *d;
@@ -254,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
        if (!iommu_debug_root)
                return;
 
-       obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
-       if (!obj->debug_dir)
-               return;
+       d = debugfs_create_dir(obj->name, iommu_debug_root);
+       obj->debug_dir = d;
 
-       d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
-                              &obj->nr_tlb_entries);
-       if (!d)
-               return;
-
-       DEBUG_ADD_FILE_RO(regs);
-       DEBUG_ADD_FILE_RO(tlb);
-       DEBUG_ADD_FILE_RO(pagetable);
-
-       return;
-
-err:
-       debugfs_remove_recursive(obj->debug_dir);
+       debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries);
+       debugfs_create_file("regs", 0400, d, obj, &regs_fops);
+       debugfs_create_file("tlb", 0400, d, obj, &tlb_fops);
+       debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops);
 }
 
 void omap_iommu_debugfs_remove(struct omap_iommu *obj)
@@ -284,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj)
 void __init omap_iommu_debugfs_init(void)
 {
        iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
-       if (!iommu_debug_root)
-               pr_err("can't create debugfs dir\n");
 }
 
 void __exit omap_iommu_debugfs_exit(void)
index 62f9c61..dfb961d 100644 (file)
@@ -35,8 +35,7 @@
 
 static const struct iommu_ops omap_iommu_ops;
 
-#define to_iommu(dev)                                                  \
-       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+#define to_iommu(dev)  ((struct omap_iommu *)dev_get_drvdata(dev))
 
 /* bitmap of the page sizes currently supported */
 #define OMAP_IOMMU_PGSIZES     (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
index 848fc71..3b06b6b 100644 (file)
@@ -42,6 +42,7 @@ struct iommu_ops;
 struct iommu_group;
 struct iommu_fwspec;
 struct dev_pin_info;
+struct iommu_param;
 
 struct bus_attribute {
        struct attribute        attr;
@@ -960,6 +961,7 @@ struct dev_links_info {
  *             device (i.e. the bus driver that discovered the device).
  * @iommu_group: IOMMU group the device belongs to.
  * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
+ * @iommu_param: Per device generic IOMMU runtime data
  *
  * @offline_disabled: If set, the device is permanently online.
  * @offline:   Set after successful invocation of bus type's .offline().
@@ -1053,6 +1055,7 @@ struct device {
        void    (*release)(struct device *dev);
        struct iommu_group      *iommu_group;
        struct iommu_fwspec     *iommu_fwspec;
+       struct iommu_param      *iommu_param;
 
        bool                    offline_disabled:1;
        bool                    offline:1;
index 37258c8..2112f21 100644 (file)
@@ -5,59 +5,21 @@
 #ifndef __DMA_IOMMU_H
 #define __DMA_IOMMU_H
 
-#ifdef __KERNEL__
+#include <linux/errno.h>
 #include <linux/types.h>
-#include <asm/errno.h>
 
 #ifdef CONFIG_IOMMU_DMA
 #include <linux/dma-mapping.h>
 #include <linux/iommu.h>
 #include <linux/msi.h>
 
-int iommu_dma_init(void);
-
 /* Domain management interface for IOMMU drivers */
 int iommu_get_dma_cookie(struct iommu_domain *domain);
 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
 void iommu_put_dma_cookie(struct iommu_domain *domain);
 
 /* Setup call for arch DMA mapping code */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
-               u64 size, struct device *dev);
-
-/* General helpers for DMA-API <-> IOMMU-API interaction */
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
-                    unsigned long attrs);
-
-/*
- * These implement the bulk of the relevant DMA mapping callbacks, but require
- * the arch code to take care of attributes and cache maintenance
- */
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
-               unsigned long attrs, int prot, dma_addr_t *handle,
-               void (*flush_page)(struct device *, const void *, phys_addr_t));
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
-               dma_addr_t *handle);
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
-
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, int prot);
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
-               int nents, int prot);
-
-/*
- * Arch code with no special attribute handling may use these
- * directly as DMA mapping callbacks for simplicity
- */
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
-               enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir, unsigned long attrs);
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
-               size_t size, enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
 
 /* The DMA API isn't _quite_ the whole story, though... */
 /*
@@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
 
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
-#else
+#else /* CONFIG_IOMMU_DMA */
 
 struct iommu_domain;
 struct msi_desc;
 struct msi_msg;
 struct device;
 
-static inline int iommu_dma_init(void)
+static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
+               u64 size)
 {
-       return 0;
 }
 
 static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
@@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
 }
 
 #endif /* CONFIG_IOMMU_DMA */
-#endif /* __KERNEL__ */
 #endif /* __DMA_IOMMU_H */
index 6a8dd4a..f2ae8a0 100644 (file)
@@ -435,6 +435,12 @@ enum {
 #define VTD_FLAG_TRANS_PRE_ENABLED     (1 << 0)
 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
 
+extern int intel_iommu_sm;
+
+#define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
+#define pasid_supported(iommu) (sm_supported(iommu) &&                 \
+                                ecap_pasid((iommu)->ecap))
+
 struct pasid_entry;
 struct pasid_state_entry;
 struct page_req_dsc;
@@ -642,7 +648,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
 
-struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
 void *alloc_pgtable_page(int node);
 void free_pgtable_page(void *vaddr);
 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
index 54ffcc6..94f047a 100644 (file)
@@ -49,7 +49,7 @@ struct svm_dev_ops {
 
 /**
  * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev:       Device to be granted acccess
+ * @dev:       Device to be granted access
  * @pasid:     Address for allocated PASID
  * @flags:     Flags. Later for requesting supervisor mode, etc.
  * @ops:       Callbacks to device driver
index 86b4e0a..fdc355c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/of.h>
+#include <uapi/linux/iommu.h>
 
 #define IOMMU_READ     (1 << 0)
 #define IOMMU_WRITE    (1 << 1)
@@ -43,6 +44,7 @@ struct device;
 struct iommu_domain;
 struct notifier_block;
 struct iommu_sva;
+struct iommu_fault_event;
 
 /* iommu fault flags */
 #define IOMMU_FAULT_READ       0x0
@@ -52,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
                        struct device *, unsigned long, int, void *);
 typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
                                       void *);
+typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
 
 struct iommu_domain_geometry {
        dma_addr_t aperture_start; /* First address that can be mapped    */
@@ -129,6 +132,12 @@ enum iommu_attr {
 enum iommu_resv_type {
        /* Memory regions which must be mapped 1:1 at all times */
        IOMMU_RESV_DIRECT,
+       /*
+        * Memory regions which are advertised to be 1:1 but are
+        * commonly considered relaxable in some conditions,
+        * for instance in device assignment use case (USB, Graphics)
+        */
+       IOMMU_RESV_DIRECT_RELAXABLE,
        /* Arbitrary "never map this or give it to a device" address ranges */
        IOMMU_RESV_RESERVED,
        /* Hardware MSI region (untranslated) */
@@ -218,6 +227,7 @@ struct iommu_sva_ops {
  * @sva_bind: Bind process address space to device
  * @sva_unbind: Unbind process address space from device
  * @sva_get_pasid: Get PASID associated to a SVA handle
+ * @page_response: handle page request response
  * @pgsize_bitmap: bitmap of all possible supported page sizes
  */
 struct iommu_ops {
@@ -278,6 +288,10 @@ struct iommu_ops {
        void (*sva_unbind)(struct iommu_sva *handle);
        int (*sva_get_pasid)(struct iommu_sva *handle);
 
+       int (*page_response)(struct device *dev,
+                            struct iommu_fault_event *evt,
+                            struct iommu_page_response *msg);
+
        unsigned long pgsize_bitmap;
 };
 
@@ -295,6 +309,48 @@ struct iommu_device {
        struct device *dev;
 };
 
+/**
+ * struct iommu_fault_event - Generic fault event
+ *
+ * Can represent recoverable faults such as a page requests or
+ * unrecoverable faults such as DMA or IRQ remapping faults.
+ *
+ * @fault: fault descriptor
+ * @list: pending fault event list, used for tracking responses
+ */
+struct iommu_fault_event {
+       struct iommu_fault fault;
+       struct list_head list;
+};
+
+/**
+ * struct iommu_fault_param - per-device IOMMU fault data
+ * @handler: Callback function to handle IOMMU faults at device level
+ * @data: handler private data
+ * @faults: holds the pending faults which needs response
+ * @lock: protect pending faults list
+ */
+struct iommu_fault_param {
+       iommu_dev_fault_handler_t handler;
+       void *data;
+       struct list_head faults;
+       struct mutex lock;
+};
+
+/**
+ * struct iommu_param - collection of per-device IOMMU data
+ *
+ * @fault_param: IOMMU detected device fault reporting data
+ *
+ * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
+ *     struct iommu_group      *iommu_group;
+ *     struct iommu_fwspec     *iommu_fwspec;
+ */
+struct iommu_param {
+       struct mutex lock;
+       struct iommu_fault_param *fault_param;
+};
+
 int  iommu_device_register(struct iommu_device *iommu);
 void iommu_device_unregister(struct iommu_device *iommu);
 int  iommu_device_sysfs_add(struct iommu_device *iommu,
@@ -356,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
+extern int iommu_request_dma_domain_for_dev(struct device *dev);
 extern struct iommu_resv_region *
 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
                        enum iommu_resv_type type);
@@ -384,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
                                         struct notifier_block *nb);
 extern int iommu_group_unregister_notifier(struct iommu_group *group,
                                           struct notifier_block *nb);
+extern int iommu_register_device_fault_handler(struct device *dev,
+                                       iommu_dev_fault_handler_t handler,
+                                       void *data);
+
+extern int iommu_unregister_device_fault_handler(struct device *dev);
+
+extern int iommu_report_device_fault(struct device *dev,
+                                    struct iommu_fault_event *evt);
+extern int iommu_page_response(struct device *dev,
+                              struct iommu_page_response *msg);
+
 extern int iommu_group_id(struct iommu_group *group);
 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
@@ -498,6 +566,7 @@ struct iommu_ops {};
 struct iommu_group {};
 struct iommu_fwspec {};
 struct iommu_device {};
+struct iommu_fault_param {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -620,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev)
        return -ENODEV;
 }
 
+static inline int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+       return -ENODEV;
+}
+
 static inline int iommu_attach_group(struct iommu_domain *domain,
                                     struct iommu_group *group)
 {
@@ -691,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group,
        return 0;
 }
 
+static inline
+int iommu_register_device_fault_handler(struct device *dev,
+                                       iommu_dev_fault_handler_t handler,
+                                       void *data)
+{
+       return -ENODEV;
+}
+
+static inline int iommu_unregister_device_fault_handler(struct device *dev)
+{
+       return 0;
+}
+
+static inline
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+       return -ENODEV;
+}
+
+static inline int iommu_page_response(struct device *dev,
+                                     struct iommu_page_response *msg)
+{
+       return -ENODEV;
+}
+
 static inline int iommu_group_id(struct iommu_group *group)
 {
        return -ENODEV;
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
new file mode 100644 (file)
index 0000000..fc00c5d
--- /dev/null
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * IOMMU user API definitions
+ */
+
+#ifndef _UAPI_IOMMU_H
+#define _UAPI_IOMMU_H
+
+#include <linux/types.h>
+
+#define IOMMU_FAULT_PERM_READ  (1 << 0) /* read */
+#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
+#define IOMMU_FAULT_PERM_EXEC  (1 << 2) /* exec */
+#define IOMMU_FAULT_PERM_PRIV  (1 << 3) /* privileged */
+
+/* Generic fault types, can be expanded IRQ remapping fault */
+enum iommu_fault_type {
+       IOMMU_FAULT_DMA_UNRECOV = 1,    /* unrecoverable fault */
+       IOMMU_FAULT_PAGE_REQ,           /* page request fault */
+};
+
+enum iommu_fault_reason {
+       IOMMU_FAULT_REASON_UNKNOWN = 0,
+
+       /* Could not access the PASID table (fetch caused external abort) */
+       IOMMU_FAULT_REASON_PASID_FETCH,
+
+       /* PASID entry is invalid or has configuration errors */
+       IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
+
+       /*
+        * PASID is out of range (e.g. exceeds the maximum PASID
+        * supported by the IOMMU) or disabled.
+        */
+       IOMMU_FAULT_REASON_PASID_INVALID,
+
+       /*
+        * An external abort occurred fetching (or updating) a translation
+        * table descriptor
+        */
+       IOMMU_FAULT_REASON_WALK_EABT,
+
+       /*
+        * Could not access the page table entry (Bad address),
+        * actual translation fault
+        */
+       IOMMU_FAULT_REASON_PTE_FETCH,
+
+       /* Protection flag check failed */
+       IOMMU_FAULT_REASON_PERMISSION,
+
+       /* access flag check failed */
+       IOMMU_FAULT_REASON_ACCESS,
+
+       /* Output address of a translation stage caused Address Size fault */
+       IOMMU_FAULT_REASON_OOR_ADDRESS,
+};
+
+/**
+ * struct iommu_fault_unrecoverable - Unrecoverable fault data
+ * @reason: reason of the fault, from &enum iommu_fault_reason
+ * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
+ * @pasid: Process Address Space ID
+ * @perm: requested permission access using by the incoming transaction
+ *        (IOMMU_FAULT_PERM_* values)
+ * @addr: offending page address
+ * @fetch_addr: address that caused a fetch abort, if any
+ */
+struct iommu_fault_unrecoverable {
+       __u32   reason;
+#define IOMMU_FAULT_UNRECOV_PASID_VALID                (1 << 0)
+#define IOMMU_FAULT_UNRECOV_ADDR_VALID         (1 << 1)
+#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID   (1 << 2)
+       __u32   flags;
+       __u32   pasid;
+       __u32   perm;
+       __u64   addr;
+       __u64   fetch_addr;
+};
+
+/**
+ * struct iommu_fault_page_request - Page Request data
+ * @flags: encodes whether the corresponding fields are valid and whether this
+ *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
+ * @addr: page address
+ * @private_data: device-specific private information
+ */
+struct iommu_fault_page_request {
+#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID   (1 << 0)
+#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE     (1 << 1)
+#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA     (1 << 2)
+       __u32   flags;
+       __u32   pasid;
+       __u32   grpid;
+       __u32   perm;
+       __u64   addr;
+       __u64   private_data[2];
+};
+
+/**
+ * struct iommu_fault - Generic fault data
+ * @type: fault type from &enum iommu_fault_type
+ * @padding: reserved for future use (should be zero)
+ * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
+ * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
+ * @padding2: sets the fault size to allow for future extensions
+ */
+struct iommu_fault {
+       __u32   type;
+       __u32   padding;
+       union {
+               struct iommu_fault_unrecoverable event;
+               struct iommu_fault_page_request prm;
+               __u8 padding2[56];
+       };
+};
+
+/**
+ * enum iommu_page_response_code - Return status of fault handlers
+ * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ *     populated, retry the access. This is "Success" in PCI PRI.
+ * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
+ *     this device if possible. This is "Response Failure" in PCI PRI.
+ * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ *     access. This is "Invalid Request" in PCI PRI.
+ */
+enum iommu_page_response_code {
+       IOMMU_PAGE_RESP_SUCCESS = 0,
+       IOMMU_PAGE_RESP_INVALID,
+       IOMMU_PAGE_RESP_FAILURE,
+};
+
+/**
+ * struct iommu_page_response - Generic page response information
+ * @version: API version of this structure
+ * @flags: encodes whether the corresponding fields are valid
+ *         (IOMMU_FAULT_PAGE_RESPONSE_* values)
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @code: response code from &enum iommu_page_response_code
+ */
+struct iommu_page_response {
+#define IOMMU_PAGE_RESP_VERSION_1      1
+       __u32   version;
+#define IOMMU_PAGE_RESP_PASID_VALID    (1 << 0)
+       __u32   flags;
+       __u32   pasid;
+       __u32   grpid;
+       __u32   code;
+};
+
+#endif /* _UAPI_IOMMU_H */