From: Robin Murphy Date: Wed, 18 Sep 2019 16:17:48 +0000 (+0100) Subject: iommu/arm-smmu: Remove .tlb_inv_range indirection X-Git-Tag: v5.15~4995^2^7~2^2~20 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3f3b8d0c9c1838271543df9e655032117a663f88;p=platform%2Fkernel%2Flinux-starfive.git iommu/arm-smmu: Remove .tlb_inv_range indirection Fill in 'native' iommu_flush_ops callbacks for all the arm_smmu_flush_ops variants, and clear up the remains of the previous .tlb_inv_range abstraction. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b18aac4c..78292e8 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) } static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, } static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -362,84 +362,100 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, } while (size -= granule); } -/* - * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears - * almost negligible, but the benefit of getting the first one in as far ahead - * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think. - */ -static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_device *smmu = smmu_domain->smmu; - - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - wmb(); + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); +} - arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); +static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) +static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true); +} - ops->tlb_inv_range(iova, size, granule, false, cookie); - ops->tlb_sync(cookie); +static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) +static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); +} - ops->tlb_inv_range(iova, size, granule, true, cookie); - ops->tlb_sync(cookie); +static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true); } -static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) +static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_context_s2(cookie); +} +/* + * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears + * almost negligible, but the benefit of getting the first one in as far ahead + * of the sync as possible is significant, hence we don't just make this a + * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might + * think. + */ +static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + struct arm_smmu_device *smmu = smmu_domain->smmu; - ops->tlb_inv_range(iova, granule, granule, true, cookie); + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, + .tlb_add_page = arm_smmu_tlb_add_page_s1, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s1, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, + .tlb_add_page = arm_smmu_tlb_add_page_s2, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s2, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }, - .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, }; diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index b19b6ca..6edd35c 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -306,8 +306,6 @@ enum arm_smmu_domain_stage { struct arm_smmu_flush_ops { struct iommu_flush_ops tlb; - void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); void (*tlb_sync)(void *cookie); };