Merge tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Oct 2022 20:24:55 +0000 (13:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Oct 2022 20:24:55 +0000 (13:24 -0700)
Pull dma-mapping updates from Christoph Hellwig:

 - fix a regression in the ARM dma-direct conversion (Christoph Hellwig)

 - use memcpy_{from,to}_page (Fabio M. De Francesco)

 - cleanup the swiotlb MAINTAINERS entry (Lukas Bulwahn)

 - make SG table pool allocation less fragile (Masahiro Yamada)

 - don't panic on swiotlb initialization failure (Robin Murphy)

* tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping:
  ARM/dma-mapping: remove the dma_coherent member of struct dev_archdata
  ARM/dma-mappіng: don't override ->dma_coherent when set from a bus notifier
  lib/sg_pool: change module_init(sg_pool_init) to subsys_initcall
  MAINTAINERS: merge SWIOTLB SUBSYSTEM into DMA MAPPING HELPERS
  swiotlb: don't panic!
  swiotlb: replace kmap_atomic() with memcpy_{from,to}_page()

MAINTAINERS
arch/arm/include/asm/device.h
arch/arm/mm/dma-mapping.c
kernel/dma/swiotlb.c
lib/sg_pool.c

index 4157710..7547ffc 100644 (file)
@@ -6171,6 +6171,7 @@ F:        include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
 F:     include/linux/dma-map-ops.h
+F:     include/linux/swiotlb.h
 F:     kernel/dma/
 
 DMA MAPPING BENCHMARK
@@ -19749,16 +19750,6 @@ S:     Maintained
 F:     Documentation/admin-guide/svga.rst
 F:     arch/x86/boot/video*
 
-SWIOTLB SUBSYSTEM
-M:     Christoph Hellwig <hch@infradead.org>
-L:     iommu@lists.linux.dev
-S:     Supported
-W:     http://git.infradead.org/users/hch/dma-mapping.git
-T:     git git://git.infradead.org/users/hch/dma-mapping.git
-F:     arch/*/kernel/pci-swiotlb.c
-F:     include/linux/swiotlb.h
-F:     kernel/dma/swiotlb.c
-
 SWITCHDEV
 M:     Jiri Pirko <jiri@resnulli.us>
 M:     Ivan Vecera <ivecera@redhat.com>
@@ -22475,8 +22466,10 @@ M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     iommu@lists.linux.dev
 S:     Supported
-F:     arch/x86/xen/*swiotlb*
-F:     drivers/xen/*swiotlb*
+F:     arch/*/include/asm/xen/swiotlb-xen.h
+F:     drivers/xen/swiotlb-xen.c
+F:     include/xen/arm/swiotlb-xen.h
+F:     include/xen/swiotlb-xen.h
 
 XFS FILESYSTEM
 C:     irc://irc.oftc.net/xfs
index 8754c0f..c6beb17 100644 (file)
@@ -9,7 +9,6 @@ struct dev_archdata {
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
        struct dma_iommu_mapping        *mapping;
 #endif
-       unsigned int dma_coherent:1;
        unsigned int dma_ops_setup:1;
 };
 
index ef691a5..d790909 100644 (file)
@@ -1769,8 +1769,14 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
 {
-       dev->archdata.dma_coherent = coherent;
-       dev->dma_coherent = coherent;
+       /*
+        * Due to legacy code that sets the ->dma_coherent flag from a bus
+        * notifier we can't just assign coherent to the ->dma_coherent flag
+        * here, but instead have to make sure we only set but never clear it
+        * for now.
+        */
+       if (coherent)
+               dev->dma_coherent = true;
 
        /*
         * Don't override the dma_ops if they have already been set. Ideally
index 0ef6b12..339a990 100644 (file)
@@ -346,22 +346,27 @@ retry:
                memblock_free(tlb, PAGE_ALIGN(bytes));
 
                nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
-               if (nslabs < IO_TLB_MIN_SLABS)
-                       panic("%s: Failed to remap %zu bytes\n",
-                             __func__, bytes);
-               goto retry;
+               if (nslabs >= IO_TLB_MIN_SLABS)
+                       goto retry;
+
+               pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
+               return;
        }
 
        alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
        mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
-       if (!mem->slots)
-               panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
-                     __func__, alloc_size, PAGE_SIZE);
+       if (!mem->slots) {
+               pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
+                       __func__, alloc_size, PAGE_SIZE);
+               return;
+       }
 
        mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
                default_nareas), SMP_CACHE_BYTES);
-       if (!mem->areas)
-               panic("%s: Failed to allocate mem->areas.\n", __func__);
+       if (!mem->areas) {
+               pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
+               return;
+       }
 
        swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
                                default_nareas);
@@ -545,9 +550,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
        }
 
        if (PageHighMem(pfn_to_page(pfn))) {
-               /* The buffer does not have a mapping.  Map it in and copy */
                unsigned int offset = orig_addr & ~PAGE_MASK;
-               char *buffer;
+               struct page *page;
                unsigned int sz = 0;
                unsigned long flags;
 
@@ -555,12 +559,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
                        sz = min_t(size_t, PAGE_SIZE - offset, size);
 
                        local_irq_save(flags);
-                       buffer = kmap_atomic(pfn_to_page(pfn));
+                       page = pfn_to_page(pfn);
                        if (dir == DMA_TO_DEVICE)
-                               memcpy(vaddr, buffer + offset, sz);
+                               memcpy_from_page(vaddr, page, offset, sz);
                        else
-                               memcpy(buffer + offset, vaddr, sz);
-                       kunmap_atomic(buffer);
+                               memcpy_to_page(page, offset, vaddr, sz);
                        local_irq_restore(flags);
 
                        size -= sz;
@@ -731,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
        int index;
        phys_addr_t tlb_addr;
 
-       if (!mem || !mem->nslabs)
-               panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+       if (!mem || !mem->nslabs) {
+               dev_warn_ratelimited(dev,
+                       "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+               return (phys_addr_t)DMA_MAPPING_ERROR;
+       }
 
        if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
                pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
index a0b1a52..9bfe60c 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/scatterlist.h>
 #include <linux/mempool.h>
 #include <linux/slab.h>
@@ -177,16 +177,4 @@ cleanup_sdb:
        return -ENOMEM;
 }
 
-static __exit void sg_pool_exit(void)
-{
-       int i;
-
-       for (i = 0; i < SG_MEMPOOL_NR; i++) {
-               struct sg_pool *sgp = sg_pools + i;
-               mempool_destroy(sgp->pool);
-               kmem_cache_destroy(sgp->slab);
-       }
-}
-
-module_init(sg_pool_init);
-module_exit(sg_pool_exit);
+subsys_initcall(sg_pool_init);