iommu/vt-d: Add domain_flush_pasid_iotlb()
authorLu Baolu <baolu.lu@linux.intel.com>
Wed, 9 Aug 2023 12:47:56 +0000 (20:47 +0800)
committerJoerg Roedel <jroedel@suse.de>
Wed, 9 Aug 2023 15:44:37 +0000 (17:44 +0200)
The VT-d spec requires to use PASID-based-IOTLB invalidation descriptor
to invalidate IOTLB and the paging-structure caches for a first-stage
page table. Add a generic helper to do this.

RID2PASID is used if the domain has been attached to a physical device,
otherwise real PASIDs that the domain has been attached to will be used.
The 'real' PASID attachment is handled in the subsequent change.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20230802212427.1497170-4-jacob.jun.pan@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.c

index ddff43d..ecb7e44 100644 (file)
@@ -1467,6 +1467,18 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
        spin_unlock_irqrestore(&domain->lock, flags);
 }
 
+static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
+                                    struct dmar_domain *domain, u64 addr,
+                                    unsigned long npages, bool ih)
+{
+       u16 did = domain_id_iommu(domain, iommu);
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
@@ -1484,7 +1496,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                ih = 1 << 6;
 
        if (domain->use_first_level) {
-               qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, pages, ih);
+               domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
        } else {
                unsigned long bitmask = aligned_pages - 1;
 
@@ -1554,7 +1566,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
                u16 did = domain_id_iommu(dmar_domain, iommu);
 
                if (dmar_domain->use_first_level)
-                       qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, 0, -1, 0);
+                       domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
                else
                        iommu->flush.flush_iotlb(iommu, did, 0, 0,
                                                 DMA_TLB_DSI_FLUSH);