iommu/amd: Add support for fast IOTLB flushing
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Wed, 21 Feb 2018 07:19:45 +0000 (14:19 +0700)
committerJoerg Roedel <jroedel@suse.de>
Thu, 15 Mar 2018 12:37:34 +0000 (13:37 +0100)
Since AMD IOMMU driver currently flushes all TLB entries
when page size is more than one, use the same interface
for both iommu_ops.flush_iotlb_all() and iommu_ops.iotlb_sync().

Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index 14efeb3..997a947 100644 (file)
@@ -3056,9 +3056,6 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
        unmap_size = iommu_unmap_page(domain, iova, page_size);
        mutex_unlock(&domain->api_lock);
 
-       domain_flush_tlb_pde(domain);
-       domain_flush_complete(domain);
-
        return unmap_size;
 }
 
@@ -3176,6 +3173,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
        return dev_data->defer_attach;
 }
 
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       struct protection_domain *dom = to_pdomain(domain);
+
+       domain_flush_tlb_pde(dom);
+       domain_flush_complete(dom);
+}
+
+static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
+                                     unsigned long iova, size_t size)
+{
+}
+
 const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
@@ -3194,6 +3204,9 @@ const struct iommu_ops amd_iommu_ops = {
        .apply_resv_region = amd_iommu_apply_resv_region,
        .is_attach_deferred = amd_iommu_is_attach_deferred,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
+       .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+       .iotlb_range_add = amd_iommu_iotlb_range_add,
+       .iotlb_sync = amd_iommu_flush_iotlb_all,
 };
 
 /*****************************************************************************