iommu/amd: Fix ivrs_acpihid cmdline parsing code
[platform/kernel/linux-starfive.git] / drivers / iommu / sun50i-iommu.c
index df871af..5b585ea 100644 (file)
@@ -93,6 +93,8 @@
 #define NUM_PT_ENTRIES                 256
 #define PT_SIZE                                (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
 
+#define SPAGE_SIZE                     4096
+
 struct sun50i_iommu {
        struct iommu_device iommu;
 
@@ -295,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
        dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
 }
 
+static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
+                                 unsigned long iova)
+{
+       u32 reg;
+       int ret;
+
+       iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
+       iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
+       iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
+                   IOMMU_TLB_IVLD_ENABLE_ENABLE);
+
+       ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
+                                       reg, !reg, 1, 2000);
+       if (ret)
+               dev_warn(iommu->dev, "TLB invalidation timed out!\n");
+}
+
+static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
+                                      unsigned long iova)
+{
+       u32 reg;
+       int ret;
+
+       iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
+       iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
+                   IOMMU_PC_IVLD_ENABLE_ENABLE);
+
+       ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
+                                       reg, !reg, 1, 2000);
+       if (ret)
+               dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
+}
+
+static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
+                                  unsigned long iova, size_t size)
+{
+       assert_spin_locked(&iommu->iommu_lock);
+
+       iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
+
+       sun50i_iommu_zap_iova(iommu, iova);
+       sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
+       if (size > SPAGE_SIZE) {
+               sun50i_iommu_zap_iova(iommu, iova + size);
+               sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
+       }
+       sun50i_iommu_zap_ptw_cache(iommu, iova);
+       sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
+       if (size > SZ_1M) {
+               sun50i_iommu_zap_ptw_cache(iommu, iova + size);
+               sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
+       }
+
+       iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+}
+
 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
 {
        u32 reg;
@@ -344,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
        spin_unlock_irqrestore(&iommu->iommu_lock, flags);
 }
 
+static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+                                       unsigned long iova, size_t size)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       struct sun50i_iommu *iommu = sun50i_domain->iommu;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->iommu_lock, flags);
+       sun50i_iommu_zap_range(iommu, iova, size);
+       spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
                                    struct iommu_iotlb_gather *gather)
 {
@@ -512,7 +582,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
                sun50i_iommu_free_page_table(iommu, drop_pt);
        }
 
-       sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
+       sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
        sun50i_table_flush(sun50i_domain, dte_addr, 1);
 
        return page_table;
@@ -602,7 +672,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
        struct sun50i_iommu_domain *sun50i_domain;
 
        if (type != IOMMU_DOMAIN_DMA &&
-           type != IOMMU_DOMAIN_IDENTITY &&
            type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
 
@@ -767,6 +836,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
                .attach_dev     = sun50i_iommu_attach_device,
                .detach_dev     = sun50i_iommu_detach_device,
                .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+               .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
                .iotlb_sync     = sun50i_iommu_iotlb_sync,
                .iova_to_phys   = sun50i_iommu_iova_to_phys,
                .map            = sun50i_iommu_map,
@@ -786,6 +856,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
                report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
        else
                dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
+
+       sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
 }
 
 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,