iommu/amd: Introduce iommu_v1_map_page and iommu_v1_unmap_page
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tue, 15 Dec 2020 07:37:04 +0000 (01:37 -0600)
committerJoerg Roedel <jroedel@suse.de>
Thu, 28 Jan 2021 15:51:18 +0000 (16:51 +0100)
These implement map and unmap for AMD IOMMU v1 pagetable, which
will be used by the IO pagetable framework.

Also clean up unused extern function declarations.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-13-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index 83ca822..3770b1a 100644 (file)
@@ -133,19 +133,6 @@ void amd_iommu_apply_ivrs_quirks(void);
 static inline void amd_iommu_apply_ivrs_quirks(void) { }
 #endif
 
-/* TODO: These are temporary and will be removed once fully transition */
-extern int iommu_map_page(struct protection_domain *dom,
-                         unsigned long bus_addr,
-                         unsigned long phys_addr,
-                         unsigned long page_size,
-                         int prot,
-                         gfp_t gfp);
-extern unsigned long iommu_unmap_page(struct protection_domain *dom,
-                                     unsigned long bus_addr,
-                                     unsigned long page_size);
-extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
-                     unsigned long address,
-                     unsigned long *page_size);
 extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
                                         u64 *root, int mode);
 #endif
index b70eb79..1c4961e 100644 (file)
@@ -311,9 +311,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
  * This function checks if there is a PTE for a given dma address. If
  * there is one, it returns the pointer to it.
  */
-u64 *fetch_pte(struct amd_io_pgtable *pgtable,
-              unsigned long address,
-              unsigned long *page_size)
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+                     unsigned long address,
+                     unsigned long *page_size)
 {
        int level;
        u64 *pte;
@@ -386,13 +386,10 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
  * supporting all features of AMD IOMMU page tables like level skipping
  * and full 64 bit address spaces.
  */
-int iommu_map_page(struct protection_domain *dom,
-                  unsigned long iova,
-                  unsigned long paddr,
-                  unsigned long size,
-                  int prot,
-                  gfp_t gfp)
+static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
+                         phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
+       struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
        struct page *freelist = NULL;
        bool updated = false;
        u64 __pte, *pte;
@@ -455,11 +452,11 @@ out:
        return ret;
 }
 
-unsigned long iommu_unmap_page(struct protection_domain *dom,
-                              unsigned long iova,
-                              unsigned long size)
+static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
+                                     unsigned long iova,
+                                     size_t size,
+                                     struct iommu_iotlb_gather *gather)
 {
-       struct io_pgtable_ops *ops = &dom->iop.iop.ops;
        struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
        unsigned long long unmapped;
        unsigned long unmap_size;
@@ -548,6 +545,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
        cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
        cfg->tlb            = &v1_flush_ops;
 
+       pgtable->iop.ops.map          = iommu_v1_map_page;
+       pgtable->iop.ops.unmap        = iommu_v1_unmap_page;
        pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
 
        return &pgtable->iop;
index 144ea91..256d38a 100644 (file)
@@ -2065,8 +2065,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
                         gfp_t gfp)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct io_pgtable_ops *ops = &domain->iop.iop.ops;
        int prot = 0;
-       int ret;
+       int ret = -EINVAL;
 
        if (domain->iop.mode == PAGE_MODE_NONE)
                return -EINVAL;
@@ -2076,9 +2077,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
        if (iommu_prot & IOMMU_WRITE)
                prot |= IOMMU_PROT_IW;
 
-       ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
-       domain_flush_np_cache(domain, iova, page_size);
+       if (ops->map) {
+               ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+               domain_flush_np_cache(domain, iova, page_size);
+       }
 
        return ret;
 }
@@ -2088,11 +2090,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
                              struct iommu_iotlb_gather *gather)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct io_pgtable_ops *ops = &domain->iop.iop.ops;
 
        if (domain->iop.mode == PAGE_MODE_NONE)
                return 0;
 
-       return iommu_unmap_page(domain, iova, page_size);
+       return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
 }
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,