mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
}
-static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
-{}
-
static void mmu_tlb_sync_context(void *cookie)
{
//struct panfrost_device *pfdev = cookie;
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
.tlb_flush_leaf = mmu_tlb_flush_leaf,
- .tlb_add_flush = mmu_tlb_inv_range_nosync,
.tlb_sync = mmu_tlb_sync_context,
};
} while (size -= granule);
}
+static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+}
+
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_add_page = arm_smmu_tlb_inv_page_nosync,
.tlb_sync = arm_smmu_tlb_sync,
};
ARM_SMMU_DOMAIN_BYPASS,
};
+struct arm_smmu_flush_ops {
+ struct iommu_flush_ops tlb;
+ void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
+ bool leaf, void *cookie)
+};
+
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- const struct iommu_flush_ops *tlb_ops;
+ const struct arm_smmu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
size_t granule, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
- smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie);
- smmu_domain->tlb_ops->tlb_sync(cookie);
+ ops->tlb_inv_range(iova, size, granule, false, cookie);
+ ops->tlb.tlb_sync(cookie);
}
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+ ops->tlb_inv_range(iova, size, granule, true, cookie);
+ ops->tlb.tlb_sync(cookie);
+}
+
+static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
- smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie);
- smmu_domain->tlb_ops->tlb_sync(cookie);
+ ops->tlb_inv_range(iova, granule, granule, true, cookie);
}
-static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
};
-static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
};
-static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ .tlb_sync = arm_smmu_tlb_sync_vmid,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
- smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
+ smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
- smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
else
- smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = smmu_domain->tlb_ops,
+ .tlb = &smmu_domain->flush_ops->tlb,
.iommu_dev = smmu->dev,
};
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops) {
+ if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops) {
+ if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+ smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
return __arm_v7s_unmap(data, iova, size, 2, tablep);
}
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_add_page(&data->iop, iova, size);
return size;
}
*/
smp_wmb();
} else {
- io_pgtable_tlb_add_flush(iop, iova, blk_size,
- blk_size, true);
+ io_pgtable_tlb_add_page(iop, iova, blk_size);
}
iova += blk_size;
}
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
{
- dummy_tlb_flush(iova, size, granule, cookie);
+ dummy_tlb_flush(iova, granule, granule, cookie);
}
static void dummy_tlb_sync(void *cookie)
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
- .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_add_page = dummy_tlb_add_page,
.tlb_sync = dummy_tlb_sync,
};
tablep = iopte_deref(pte, data);
} else if (unmap_idx >= 0) {
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_add_page(&data->iop, iova, size);
return size;
}
*/
smp_wmb();
} else {
- io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+ io_pgtable_tlb_add_page(iop, iova, size);
}
return size;
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
{
- dummy_tlb_flush(iova, size, granule, cookie);
+ dummy_tlb_flush(iova, granule, granule, cookie);
}
static void dummy_tlb_sync(void *cookie)
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
- .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_add_page = dummy_tlb_add_page,
.tlb_sync = dummy_tlb_sync,
};
ipmmu_tlb_flush_all(cookie);
}
-static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
-{
- /* The hardware doesn't support selective TLB flush. */
-}
-
static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_flush_walk = ipmmu_tlb_flush,
.tlb_flush_leaf = ipmmu_tlb_flush,
- .tlb_add_flush = ipmmu_tlb_add_flush,
.tlb_sync = ipmmu_tlb_flush_all,
};
__flush_iotlb_sync(cookie);
}
+static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie)
+{
+ __flush_iotlb_range(iova, granule, granule, true, cookie);
+}
+
static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb,
.tlb_flush_walk = __flush_iotlb_walk,
.tlb_flush_leaf = __flush_iotlb_leaf,
- .tlb_add_flush = __flush_iotlb_range,
+ .tlb_add_page = __flush_iotlb_page,
.tlb_sync = __flush_iotlb_sync,
};
mtk_iommu_tlb_sync(cookie);
}
+static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule,
+ void *cookie)
+{
+ mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+}
+
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
.tlb_flush_walk = mtk_iommu_tlb_flush_walk,
.tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
- .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
+ .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
.tlb_sync = mtk_iommu_tlb_sync,
};
qcom_iommu_tlb_sync(cookie);
}
+static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule,
+ void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+}
+
static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context,
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
.tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
- .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
+ .tlb_add_page = qcom_iommu_tlb_add_page,
.tlb_sync = qcom_iommu_tlb_sync,
};
* address range.
* @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
* address range.
- * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a
- * virtual address range. This function exists purely as an
- * optimisation for IOMMUs that cannot batch TLB invalidation
- * operations efficiently and are therefore better suited to
- * issuing them early rather than deferring them until
- * iommu_tlb_sync().
+ * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
+ * single page. This function exists purely as an optimisation
+ * for IOMMUs that cannot batch TLB invalidation operations
+ * efficiently and are therefore better suited to issuing them
+ * early rather than deferring them until iommu_tlb_sync().
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
* any corresponding page table updates are visible to the
* IOMMU.
void *cookie);
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie);
- void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
+ void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
void (*tlb_sync)(void *cookie);
};
iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
}
-static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
- unsigned long iova, size_t size, size_t granule, bool leaf)
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova,
+ size_t granule)
{
- iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+ if (iop->cfg.tlb->tlb_add_page)
+ iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)