Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma...
[platform/kernel/linux-starfive.git] / drivers / iommu / dma-iommu.c
index 458fb67..376c4e3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/iova.h>
 #include <linux/irq.h>
 #include <linux/list_sort.h>
+#include <linux/memremap.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
@@ -1062,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 
        for_each_sg(sg, s, nents, i) {
                /* Restore this segment's original unaligned fields first */
+               dma_addr_t s_dma_addr = sg_dma_address(s);
                unsigned int s_iova_off = sg_dma_address(s);
                unsigned int s_length = sg_dma_len(s);
                unsigned int s_iova_len = s->length;
 
-               s->offset += s_iova_off;
-               s->length = s_length;
                sg_dma_address(s) = DMA_MAPPING_ERROR;
                sg_dma_len(s) = 0;
 
+               if (sg_is_dma_bus_address(s)) {
+                       if (i > 0)
+                               cur = sg_next(cur);
+
+                       sg_dma_unmark_bus_address(s);
+                       sg_dma_address(cur) = s_dma_addr;
+                       sg_dma_len(cur) = s_length;
+                       sg_dma_mark_bus_address(cur);
+                       count++;
+                       cur_len = 0;
+                       continue;
+               }
+
+               s->offset += s_iova_off;
+               s->length = s_length;
+
                /*
                 * Now fill in the real DMA data. If...
                 * - there is a valid output segment to append to
@@ -1111,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
        int i;
 
        for_each_sg(sg, s, nents, i) {
-               if (sg_dma_address(s) != DMA_MAPPING_ERROR)
-                       s->offset += sg_dma_address(s);
-               if (sg_dma_len(s))
-                       s->length = sg_dma_len(s);
+               if (sg_is_dma_bus_address(s)) {
+                       sg_dma_unmark_bus_address(s);
+               } else {
+                       if (sg_dma_address(s) != DMA_MAPPING_ERROR)
+                               s->offset += sg_dma_address(s);
+                       if (sg_dma_len(s))
+                               s->length = sg_dma_len(s);
+               }
                sg_dma_address(s) = DMA_MAPPING_ERROR;
                sg_dma_len(s) = 0;
        }
@@ -1167,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
        struct iova_domain *iovad = &cookie->iovad;
        struct scatterlist *s, *prev = NULL;
        int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+       struct pci_p2pdma_map_state p2pdma_state = {};
+       enum pci_p2pdma_map_type map;
        dma_addr_t iova;
        size_t iova_len = 0;
        unsigned long mask = dma_get_seg_boundary(dev);
@@ -1196,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
                size_t s_length = s->length;
                size_t pad_len = (mask - iova_len + 1) & mask;
 
+               if (is_pci_p2pdma_page(sg_page(s))) {
+                       map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
+                       switch (map) {
+                       case PCI_P2PDMA_MAP_BUS_ADDR:
+                               /*
+                                * iommu_map_sg() will skip this segment as
+                                * it is marked as a bus address,
+                                * __finalise_sg() will copy the dma address
+                                * into the output segment.
+                                */
+                               continue;
+                       case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+                               /*
+                                * Mapping through host bridge should be
+                                * mapped with regular IOVAs, thus we
+                                * do nothing here and continue below.
+                                */
+                               break;
+                       default:
+                               ret = -EREMOTEIO;
+                               goto out_restore_sg;
+                       }
+               }
+
                sg_dma_address(s) = s_iova_off;
                sg_dma_len(s) = s_length;
                s->offset -= s_iova_off;
@@ -1224,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
                prev = s;
        }
 
+       if (!iova_len)
+               return __finalise_sg(dev, sg, nents, 0);
+
        iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
        if (!iova) {
                ret = -ENOMEM;
@@ -1245,7 +1294,7 @@ out_free_iova:
 out_restore_sg:
        __invalidate_sg(sg, nents);
 out:
-       if (ret != -ENOMEM)
+       if (ret != -ENOMEM && ret != -EREMOTEIO)
                return -EINVAL;
        return ret;
 }
@@ -1253,7 +1302,7 @@ out:
 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
                int nents, enum dma_data_direction dir, unsigned long attrs)
 {
-       dma_addr_t start, end;
+       dma_addr_t end = 0, start;
        struct scatterlist *tmp;
        int i;
 
@@ -1267,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 
        /*
         * The scatterlist segments are mapped into a single
-        * contiguous IOVA allocation, so this is incredibly easy.
+        * contiguous IOVA allocation, the start and end points
+        * just have to be determined.
         */
-       start = sg_dma_address(sg);
-       for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+       for_each_sg(sg, tmp, nents, i) {
+               if (sg_is_dma_bus_address(tmp)) {
+                       sg_dma_unmark_bus_address(tmp);
+                       continue;
+               }
+
                if (sg_dma_len(tmp) == 0)
                        break;
-               sg = tmp;
+
+               start = sg_dma_address(tmp);
+               break;
        }
-       end = sg_dma_address(sg) + sg_dma_len(sg);
-       __iommu_dma_unmap(dev, start, end - start);
+
+       nents -= i;
+       for_each_sg(tmp, tmp, nents, i) {
+               if (sg_is_dma_bus_address(tmp)) {
+                       sg_dma_unmark_bus_address(tmp);
+                       continue;
+               }
+
+               if (sg_dma_len(tmp) == 0)
+                       break;
+
+               end = sg_dma_address(tmp) + sg_dma_len(tmp);
+       }
+
+       if (end)
+               __iommu_dma_unmap(dev, start, end - start);
 }
 
 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1468,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
        return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
 }
 
+static size_t iommu_dma_opt_mapping_size(void)
+{
+       return iova_rcache_range();
+}
+
 static const struct dma_map_ops iommu_dma_ops = {
+       .flags                  = DMA_F_PCI_P2PDMA_SUPPORTED,
        .alloc                  = iommu_dma_alloc,
        .free                   = iommu_dma_free,
        .alloc_pages            = dma_common_alloc_pages,
@@ -1488,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
        .map_resource           = iommu_dma_map_resource,
        .unmap_resource         = iommu_dma_unmap_resource,
        .get_merge_boundary     = iommu_dma_get_merge_boundary,
+       .opt_mapping_size       = iommu_dma_opt_mapping_size,
 };
 
 /*