iommu/vt-d: Fix incorrect cache invalidation for mm notification
authorLu Baolu <baolu.lu@linux.intel.com>
Wed, 22 Nov 2023 03:26:07 +0000 (11:26 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Dec 2023 07:52:18 +0000 (08:52 +0100)
commit e7ad6c2a4b1aa710db94060b716f53c812cef565 upstream.

Commit 6bbd42e2df8f ("mmu_notifiers: call invalidate_range() when
invalidating TLBs") moved the secondary TLB invalidations into the TLB
invalidation functions to ensure that all secondary TLB invalidations
happen at the same time as the CPU invalidation and added a flush-all
type of secondary TLB invalidation for the batched mode, where a range
of [0, -1UL) is used to indicates that the range extends to the end of
the address space.

However, using an end address of -1UL caused an overflow in the Intel
IOMMU driver, where the end address was rounded up to the next page.
As a result, both the IOTLB and device ATC were not invalidated correctly.

Add a flush all helper function and call it when the invalidation range
is from 0 to -1UL, ensuring that the entire caches are invalidated
correctly.

Fixes: 6bbd42e2df8f ("mmu_notifiers: call invalidate_range() when invalidating TLBs")
Cc: stable@vger.kernel.org
Cc: Huang Ying <ying.huang@intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Tested-by: Luo Yuzhang <yuzhang.luo@intel.com> # QAT
Tested-by: Tony Zhu <tony.zhu@intel.com> # DSA
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20231117090933.75267-1-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/iommu/intel/svm.c

index 50a481c895b867202a4317afb7ef19a5666eafe5..ac12f76c1212ac5f8f3a835f9afc1a0f6737af14 100644 (file)
@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
        rcu_read_unlock();
 }
 
+static void intel_flush_svm_all(struct intel_svm *svm)
+{
+       struct device_domain_info *info;
+       struct intel_svm_dev *sdev;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdev, &svm->devs, list) {
+               info = dev_iommu_priv_get(sdev->dev);
+
+               qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
+               if (info->ats_enabled) {
+                       qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+                                                svm->pasid, sdev->qdep,
+                                                0, 64 - VTD_PAGE_SHIFT);
+                       quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
+                                                 svm->pasid, sdev->qdep);
+               }
+       }
+       rcu_read_unlock();
+}
+
 /* Pages have been freed at this point */
 static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
 {
        struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
 
+       if (start == 0 && end == -1UL) {
+               intel_flush_svm_all(svm);
+               return;
+       }
+
        intel_flush_svm_range(svm, start,
                              (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
 }