iommu/dma: Merge the CMA and alloc_pages allocation paths
authorChristoph Hellwig <hch@lst.de>
Mon, 20 May 2019 07:29:39 +0000 (09:29 +0200)
committerJoerg Roedel <jroedel@suse.de>
Mon, 27 May 2019 15:31:11 +0000 (17:31 +0200)
Instead of having a separate code path for the non-blocking alloc_pages
and CMA allocations paths merge them into one.  There is a slight
behavior change here in that we try the page allocator if CMA fails.
This matches what dma-direct and other iommu drivers do and will be
needed to use the dma-iommu code on architectures without DMA remapping
later on.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index cffd308..ee7dcf0 100644 (file)
@@ -974,7 +974,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
        bool coherent = dev_is_dma_coherent(dev);
        int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
        size_t iosize = size;
-       struct page *page;
+       struct page *page = NULL;
        void *addr;
 
        size = PAGE_ALIGN(size);
@@ -984,35 +984,26 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
            !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
                return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
 
-       if (!gfpflags_allow_blocking(gfp)) {
-               /*
-                * In atomic context we can't remap anything, so we'll only
-                * get the virtually contiguous buffer we need by way of a
-                * physically contiguous allocation.
-                */
-               if (coherent) {
-                       page = alloc_pages(gfp, get_order(size));
-                       addr = page ? page_address(page) : NULL;
-               } else {
-                       addr = dma_alloc_from_pool(size, &page, gfp);
-               }
+       if (!gfpflags_allow_blocking(gfp) && !coherent) {
+               addr = dma_alloc_from_pool(size, &page, gfp);
                if (!addr)
                        return NULL;
 
                *handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
                                          ioprot);
                if (*handle == DMA_MAPPING_ERROR) {
-                       if (coherent)
-                               __free_pages(page, get_order(size));
-                       else
-                               dma_free_from_pool(addr, size);
+                       dma_free_from_pool(addr, size);
                        return NULL;
                }
                return addr;
        }
 
-       page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
-                                        get_order(size), gfp & __GFP_NOWARN);
+       if (gfpflags_allow_blocking(gfp))
+               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+                                                get_order(size),
+                                                gfp & __GFP_NOWARN);
+       if (!page)
+               page = alloc_pages(gfp, get_order(size));
        if (!page)
                return NULL;
 
@@ -1038,7 +1029,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 out_unmap:
        __iommu_dma_unmap(dev, *handle, iosize);
 out_free_pages:
-       dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, get_order(size));
        return NULL;
 }