iommu: Add a gfp parameter to iommu_map()
authorJason Gunthorpe <jgg@nvidia.com>
Mon, 23 Jan 2023 20:35:54 +0000 (16:35 -0400)
committerJoerg Roedel <jroedel@suse.de>
Wed, 25 Jan 2023 10:52:00 +0000 (11:52 +0100)
The internal mechanisms support this, but instead of exposting the gfp to
the caller it wrappers it into iommu_map() and iommu_map_atomic()

Fix this instead of adding more variants for GFP_KERNEL_ACCOUNT.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Link: https://lore.kernel.org/r/1-v3-76b587fe28df+6e3-iommu_map_gfp_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
16 files changed:
arch/arm/mm/dma-mapping.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/host1x/cdma.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/iommu/dma-iommu.c
drivers/iommu/iommu.c
drivers/iommu/iommufd/pages.c
drivers/media/platform/qcom/venus/firmware.c
drivers/net/ipa/ipa_mem.c
drivers/net/wireless/ath/ath10k/snoc.c
drivers/net/wireless/ath/ath11k/ahb.c
drivers/remoteproc/remoteproc_core.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vdpa.c
include/linux/iommu.h

index c135f6e..8bc0107 100644 (file)
@@ -984,7 +984,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
 
                len = (j - i) << PAGE_SHIFT;
                ret = iommu_map(mapping->domain, iova, phys, len,
-                               __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
+                               __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
+                               GFP_KERNEL);
                if (ret < 0)
                        goto fail;
                iova += len;
@@ -1207,7 +1208,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 
                prot = __dma_info_to_prot(dir, attrs);
 
-               ret = iommu_map(mapping->domain, iova, phys, len, prot);
+               ret = iommu_map(mapping->domain, iova, phys, len, prot,
+                               GFP_KERNEL);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
@@ -1379,7 +1381,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
 
        prot = __dma_info_to_prot(dir, attrs);
 
-       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
+                       prot, GFP_KERNEL);
        if (ret < 0)
                goto fail;
 
@@ -1443,7 +1446,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
 
        prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
 
-       ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
+       ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
        if (ret < 0)
                goto fail;
 
index 648ecf5..a4ac94a 100644 (file)
@@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
                u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
                ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
-                               PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+                               PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
+                               GFP_KERNEL);
                if (ret < 0) {
                        nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
index 7bd2e65..6ca9f39 100644 (file)
@@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
 
        *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
        err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
-                       size, IOMMU_READ | IOMMU_WRITE);
+                       size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (err < 0)
                goto free_iova;
 
index 103fda0..4ddfcd2 100644 (file)
@@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
 
                pb->dma = iova_dma_addr(&host1x->iova, alloc);
                err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
-                               IOMMU_READ);
+                               IOMMU_READ, GFP_KERNEL);
                if (err)
                        goto iommu_free_iova;
        } else {
index c301b3b..aeeaca6 100644 (file)
@@ -277,7 +277,7 @@ iter_chunk:
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
                                        va_start, &pa_start, size, flags);
                                err = iommu_map(pd->domain, va_start, pa_start,
-                                                       size, flags);
+                                               size, flags, GFP_KERNEL);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
@@ -294,7 +294,7 @@ iter_chunk:
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
                                        va_start, &pa_start, size, flags);
                                err = iommu_map(pd->domain, va_start, pa_start,
-                                               size, flags);
+                                               size, flags, GFP_KERNEL);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
index f798c44..8bdb65e 100644 (file)
@@ -1615,7 +1615,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
        if (!iova)
                goto out_free_page;
 
-       if (iommu_map(domain, iova, msi_addr, size, prot))
+       if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
                goto out_free_iova;
 
        INIT_LIST_HEAD(&msi_page->list);
index de91dd8..4d596b8 100644 (file)
@@ -930,7 +930,7 @@ map_end:
                        if (map_size) {
                                ret = iommu_map(domain, addr - map_size,
                                                addr - map_size, map_size,
-                                               entry->prot);
+                                               entry->prot, GFP_KERNEL);
                                if (ret)
                                        goto out;
                                map_size = 0;
@@ -2360,31 +2360,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        return ret;
 }
 
-static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
-                     phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+             phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        const struct iommu_domain_ops *ops = domain->ops;
        int ret;
 
+       might_sleep_if(gfpflags_allow_blocking(gfp));
+
+       /* Discourage passing strange GFP flags */
+       if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+                               __GFP_HIGHMEM)))
+               return -EINVAL;
+
        ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
        if (ret == 0 && ops->iotlb_sync_map)
                ops->iotlb_sync_map(domain, iova, size);
 
        return ret;
 }
-
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
-             phys_addr_t paddr, size_t size, int prot)
-{
-       might_sleep();
-       return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
-}
 EXPORT_SYMBOL_GPL(iommu_map);
 
 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
 {
-       return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+       return iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(iommu_map_atomic);
 
index 1e1d350..22cc3bb 100644 (file)
@@ -456,7 +456,8 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
                        size % PAGE_SIZE);
 
        while (size) {
-               rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
+               rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
+                              GFP_KERNEL);
                if (rc)
                        goto err_unmap;
                iova += PAGE_SIZE;
@@ -500,7 +501,8 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
                else
                        rc = iommu_map(domain, iova,
                                       PFN_PHYS(batch->pfns[cur]) + page_offset,
-                                      next_iova - iova, area->iommu_prot);
+                                      next_iova - iova, area->iommu_prot,
+                                      GFP_KERNEL);
                if (rc)
                        goto err_unmap;
                iova = next_iova;
index 142d4c7..07d4dce 100644 (file)
@@ -158,7 +158,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
        core->fw.mapped_mem_size = mem_size;
 
        ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
+                       IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
        if (ret) {
                dev_err(dev, "could not map video firmware region\n");
                return ret;
index 9ec5af3..991a7d3 100644 (file)
@@ -466,7 +466,8 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
        size = PAGE_ALIGN(size + addr - phys);
        iova = phys;    /* We just want a direct mapping */
 
-       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+                       GFP_KERNEL);
        if (ret)
                return ret;
 
@@ -574,7 +575,8 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
        size = PAGE_ALIGN(size + addr - phys);
        iova = phys;    /* We just want a direct mapping */
 
-       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+                       GFP_KERNEL);
        if (ret)
                return ret;
 
index cfcb759..9a82f03 100644 (file)
@@ -1639,7 +1639,7 @@ static int ath10k_fw_init(struct ath10k *ar)
 
        ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
                        ar->msa.paddr, ar->msa.mem_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath10k_err(ar, "failed to map firmware region: %d\n", ret);
                goto err_iommu_detach;
index d34a4d6..df8fdc7 100644 (file)
@@ -1021,7 +1021,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
 
        ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
                        ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath11k_err(ab, "failed to map firmware region: %d\n", ret);
                goto err_iommu_detach;
@@ -1029,7 +1029,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
 
        ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
                        ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
                goto err_iommu_unmap;
index 1cd4815..80072b6 100644 (file)
@@ -643,7 +643,8 @@ static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
        if (!mapping)
                return -ENOMEM;
 
-       ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
+       ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
+                       GFP_KERNEL);
        if (ret) {
                dev_err(dev, "failed to map devmem: %d\n", ret);
                goto out;
@@ -737,7 +738,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
                }
 
                ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
-                               mem->flags);
+                               mem->flags, GFP_KERNEL);
                if (ret) {
                        dev_err(dev, "iommu_map failed: %d\n", ret);
                        goto free_mapping;
index 23c24fe..e14f86a 100644 (file)
@@ -1480,7 +1480,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
 
        list_for_each_entry(d, &iommu->domain_list, next) {
                ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
-                               npage << PAGE_SHIFT, prot | IOMMU_CACHE);
+                               npage << PAGE_SHIFT, prot | IOMMU_CACHE,
+                               GFP_KERNEL);
                if (ret)
                        goto unwind;
 
@@ -1777,8 +1778,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
                                size = npage << PAGE_SHIFT;
                        }
 
-                       ret = iommu_map(domain->domain, iova, phys,
-                                       size, dma->prot | IOMMU_CACHE);
+                       ret = iommu_map(domain->domain, iova, phys, size,
+                                       dma->prot | IOMMU_CACHE, GFP_KERNEL);
                        if (ret) {
                                if (!dma->iommu_mapped) {
                                        vfio_unpin_pages_remote(dma, iova,
@@ -1866,7 +1867,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
                return;
 
        ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
        if (!ret) {
                size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
 
index ec32f78..fd1536d 100644 (file)
@@ -792,7 +792,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
                        r = ops->set_map(vdpa, asid, iotlb);
        } else {
                r = iommu_map(v->domain, iova, pa, size,
-                             perm_to_iommu_flags(perm));
+                             perm_to_iommu_flags(perm), GFP_KERNEL);
        }
        if (r) {
                vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
index 46e1347..d202099 100644 (file)
@@ -467,7 +467,7 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
-                    phys_addr_t paddr, size_t size, int prot);
+                    phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
                            phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -773,7 +773,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
 }
 
 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
-                           phys_addr_t paddr, size_t size, int prot)
+                           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        return -ENODEV;
 }