From fc65d0acaf23179b94de399c204328fa259acb90 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Fri, 23 Jul 2021 02:32:03 -0700 Subject: [PATCH] iommu/amd: Selective flush on unmap Recent patch attempted to enable selective page flushes on AMD IOMMU but neglected to adapt amd_iommu_iotlb_sync() to use the selective flushes. Adapt amd_iommu_iotlb_sync() to use selective flushes and change amd_iommu_unmap() to collect the flushes. As a defensive measure, to avoid potential issues as those that the Intel IOMMU driver encountered recently, flush the page-walk caches by always setting the "pde" parameter. This can be removed later. Cc: Joerg Roedel Cc: Will Deacon Cc: Jiajun Cao Cc: Robin Murphy Cc: Lu Baolu Cc: iommu@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Nadav Amit Link: https://lore.kernel.org/r/20210723093209.714328-2-namit@vmware.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a7d6d78..5e4b0ee 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2060,12 +2060,17 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, { struct protection_domain *domain = to_pdomain(dom); struct io_pgtable_ops *ops = &domain->iop.iop.ops; + size_t r; if ((amd_iommu_pgtable == AMD_IOMMU_V1) && (domain->iop.mode == PAGE_MODE_NONE)) return 0; - return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0; + r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0; + + iommu_iotlb_gather_add_page(dom, gather, iova, page_size); + + return r; } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, @@ -2168,7 +2173,13 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) static void amd_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { - amd_iommu_flush_iotlb_all(domain); + struct protection_domain *dom = to_pdomain(domain); + unsigned long flags; + + spin_lock_irqsave(&dom->lock, flags); + __domain_flush_pages(dom, gather->start, gather->end - gather->start, 1); + amd_iommu_domain_flush_complete(dom); + spin_unlock_irqrestore(&dom->lock, flags); } static int amd_iommu_def_domain_type(struct device *dev) -- 2.7.4