static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
-/* TODO: These are temporary and will be removed once fully transition */
-extern int iommu_map_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long phys_addr,
- unsigned long page_size,
- int prot,
- gfp_t gfp);
-extern unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long page_size);
-extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long *page_size);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
#endif
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long *page_size)
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long address,
+ unsigned long *page_size)
{
int level;
u64 *pte;
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-int iommu_map_page(struct protection_domain *dom,
- unsigned long iova,
- unsigned long paddr,
- unsigned long size,
- int prot,
- gfp_t gfp)
+static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
struct page *freelist = NULL;
bool updated = false;
u64 __pte, *pte;
return ret;
}
-unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long iova,
- unsigned long size)
+static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t size,
+ struct iommu_iotlb_gather *gather)
{
- struct io_pgtable_ops *ops = &dom->iop.iop.ops;
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
+ pgtable->iop.ops.map = iommu_v1_map_page;
+ pgtable->iop.ops.unmap = iommu_v1_unmap_page;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
int prot = 0;
- int ret;
+ int ret = -EINVAL;
if (domain->iop.mode == PAGE_MODE_NONE)
return -EINVAL;
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
- domain_flush_np_cache(domain, iova, page_size);
+ if (ops->map) {
+ ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ domain_flush_np_cache(domain, iova, page_size);
+ }
return ret;
}
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
if (domain->iop.mode == PAGE_MODE_NONE)
return 0;
- return iommu_unmap_page(domain, iova, page_size);
+ return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,