struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
+ struct mtk_iommu_data *data;
struct iommu_domain domain;
};
return -EINVAL;
}
+ dom->data = data;
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
return 0;
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
- if (data->enable_4GB)
+ if (dom->data->enable_4GB)
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+
+ mtk_iommu_tlb_flush_all(dom->data);
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
size_t length = gather->end - gather->start + 1;
mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
- data);
+ dom->data);
}
static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- mtk_iommu_tlb_flush_range_sync(iova, size, size, data);
+ mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
phys_addr_t pa;
pa = dom->iop->iova_to_phys(dom->iop, iova);
- if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+ if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
return pa;