iommu/msm: Update to {map,unmap}_pages
authorRobin Murphy <robin.murphy@arm.com>
Tue, 15 Nov 2022 15:26:38 +0000 (15:26 +0000)
committerJoerg Roedel <jroedel@suse.de>
Sat, 19 Nov 2022 09:44:14 +0000 (10:44 +0100)
Update map/unmap to the new multi-page interfaces, which is dead easy
since we just pass them through to io-pgtable anyway.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/24a8f522710ddd6bbac4da154aa28799e939ebe4.1668100209.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/msm_iommu.c

index 16179a9..c606249 100644 (file)
@@ -471,14 +471,16 @@ fail:
 }
 
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
-                        phys_addr_t pa, size_t len, int prot, gfp_t gfp)
+                        phys_addr_t pa, size_t pgsize, size_t pgcount,
+                        int prot, gfp_t gfp, size_t *mapped)
 {
        struct msm_priv *priv = to_msm_priv(domain);
        unsigned long flags;
        int ret;
 
        spin_lock_irqsave(&priv->pgtlock, flags);
-       ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
+       ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
+                                  GFP_ATOMIC, mapped);
        spin_unlock_irqrestore(&priv->pgtlock, flags);
 
        return ret;
@@ -493,16 +495,18 @@ static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                             size_t len, struct iommu_iotlb_gather *gather)
+                             size_t pgsize, size_t pgcount,
+                             struct iommu_iotlb_gather *gather)
 {
        struct msm_priv *priv = to_msm_priv(domain);
        unsigned long flags;
+       size_t ret;
 
        spin_lock_irqsave(&priv->pgtlock, flags);
-       len = priv->iop->unmap(priv->iop, iova, len, gather);
+       ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
        spin_unlock_irqrestore(&priv->pgtlock, flags);
 
-       return len;
+       return ret;
 }
 
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -679,8 +683,8 @@ static struct iommu_ops msm_iommu_ops = {
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = msm_iommu_attach_dev,
                .detach_dev     = msm_iommu_detach_dev,
-               .map            = msm_iommu_map,
-               .unmap          = msm_iommu_unmap,
+               .map_pages      = msm_iommu_map,
+               .unmap_pages    = msm_iommu_unmap,
                /*
                 * Nothing is needed here, the barrier to guarantee
                 * completion of the tlb sync operation is implicitly