iommu/amd: Refactor fetch_pte to use struct amd_io_pgtable
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tue, 15 Dec 2020 07:37:02 +0000 (01:37 -0600)
committerJoerg Roedel <jroedel@suse.de>
Thu, 28 Jan 2021 15:51:18 +0000 (16:51 +0100)
To simplify the fetch_pte function. There is no functional change.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-11-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index 76276d9..83ca822 100644 (file)
@@ -143,7 +143,7 @@ extern int iommu_map_page(struct protection_domain *dom,
 extern unsigned long iommu_unmap_page(struct protection_domain *dom,
                                      unsigned long bus_addr,
                                      unsigned long page_size);
-extern u64 *fetch_pte(struct protection_domain *domain,
+extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
                      unsigned long address,
                      unsigned long *page_size);
 extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
index af6b7f1..d7924eb 100644 (file)
@@ -311,7 +311,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
  * This function checks if there is a PTE for a given dma address. If
  * there is one, it returns the pointer to it.
  */
-u64 *fetch_pte(struct protection_domain *domain,
+u64 *fetch_pte(struct amd_io_pgtable *pgtable,
               unsigned long address,
               unsigned long *page_size)
 {
@@ -320,11 +320,11 @@ u64 *fetch_pte(struct protection_domain *domain,
 
        *page_size = 0;
 
-       if (address > PM_LEVEL_SIZE(domain->iop.mode))
+       if (address > PM_LEVEL_SIZE(pgtable->mode))
                return NULL;
 
-       level      =  domain->iop.mode - 1;
-       pte        = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+       level      =  pgtable->mode - 1;
+       pte        = &pgtable->root[PM_LEVEL_INDEX(level, address)];
        *page_size =  PTE_LEVEL_PAGE_SIZE(level);
 
        while (level > 0) {
@@ -459,6 +459,8 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
                               unsigned long iova,
                               unsigned long size)
 {
+       struct io_pgtable_ops *ops = &dom->iop.iop.ops;
+       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
        unsigned long long unmapped;
        unsigned long unmap_size;
        u64 *pte;
@@ -468,8 +470,7 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
        unmapped = 0;
 
        while (unmapped < size) {
-               pte = fetch_pte(dom, iova, &unmap_size);
-
+               pte = fetch_pte(pgtable, iova, &unmap_size);
                if (pte) {
                        int i, count;
 
index bba3d18..f1a4f53 100644 (file)
@@ -2099,13 +2099,15 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
                                          dma_addr_t iova)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
        unsigned long offset_mask, pte_pgsize;
        u64 *pte, __pte;
 
        if (domain->iop.mode == PAGE_MODE_NONE)
                return iova;
 
-       pte = fetch_pte(domain, iova, &pte_pgsize);
+       pte = fetch_pte(pgtable, iova, &pte_pgsize);
 
        if (!pte || !IOMMU_PTE_PRESENT(*pte))
                return 0;