Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / drivers / iommu / amd / iommu.c
index 1e9f85e..9ea4096 100644 (file)
@@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
        (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
         MMIO_STATUS_EVT_INT_MASK | \
         MMIO_STATUS_PPR_INT_MASK | \
+        MMIO_STATUS_GALOG_OVERFLOW_MASK | \
         MMIO_STATUS_GALOG_INT_MASK)
 
 irqreturn_t amd_iommu_int_thread(int irq, void *data)
@@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
                }
 
 #ifdef CONFIG_IRQ_REMAP
-               if (status & MMIO_STATUS_GALOG_INT_MASK) {
+               if (status & (MMIO_STATUS_GALOG_INT_MASK |
+                             MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
                        pr_devel("Processing IOMMU GA Log\n");
                        iommu_poll_ga_log(iommu);
                }
+
+               if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
+                       pr_info_ratelimited("IOMMU GA Log overflow\n");
+                       amd_iommu_restart_ga_log(iommu);
+               }
 #endif
 
                if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
@@ -2067,14 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
 {
        struct io_pgtable_ops *pgtbl_ops;
        struct protection_domain *domain;
-       int pgtable = amd_iommu_pgtable;
+       int pgtable;
        int mode = DEFAULT_PGTABLE_LEVEL;
        int ret;
 
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (!domain)
-               return NULL;
-
        /*
         * Force IOMMU v1 page table when iommu=pt and
         * when allocating domain for pass-through devices.
@@ -2084,8 +2087,16 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
                mode = PAGE_MODE_NONE;
        } else if (type == IOMMU_DOMAIN_UNMANAGED) {
                pgtable = AMD_IOMMU_V1;
+       } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
+               pgtable = amd_iommu_pgtable;
+       } else {
+               return NULL;
        }
 
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
        switch (pgtable) {
        case AMD_IOMMU_V1:
                ret = protection_domain_init_v1(domain, mode);
@@ -2118,6 +2129,15 @@ out_err:
        return NULL;
 }
 
+static inline u64 dma_max_address(void)
+{
+       if (amd_iommu_pgtable == AMD_IOMMU_V1)
+               return ~0ULL;
+
+       /* V2 with 4/5 level page table */
+       return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+}
+
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
        struct protection_domain *domain;
@@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
                return NULL;
 
        domain->domain.geometry.aperture_start = 0;
-       domain->domain.geometry.aperture_end   = ~0ULL;
+       domain->domain.geometry.aperture_end   = dma_max_address();
        domain->domain.geometry.force_aperture = true;
 
        return &domain->domain;
@@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
+       domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
        amd_iommu_domain_flush_complete(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
@@ -3491,8 +3511,7 @@ int amd_iommu_activate_guest_mode(void *data)
        struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
        u64 valid;
 
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-           !entry || entry->lo.fields_vapic.guest_mode)
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
                return 0;
 
        valid = entry->lo.fields_vapic.valid;