From b4ebe6063204da58e48600b810a97c29ae9e5d12 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Sep 2018 14:04:08 +0200 Subject: [PATCH] dma-direct: implement complete bus_dma_mask handling Instead of rejecting devices with a too small bus_dma_mask we can handle by taking the bus dma_mask into account for allocations and bounce buffering decisions. Signed-off-by: Christoph Hellwig --- include/linux/dma-direct.h | 3 ++- kernel/dma/direct.c | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b79496d..fbca184 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; - return addr + size - 1 <= *dev->dma_mask; + return addr + size - 1 <= + min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e785483..60c433b 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return false; } - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { + if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); + "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", + caller, &dma_addr, size, + *dev->dma_mask, dev->bus_dma_mask); } return false; } @@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) + max_dma = dev->bus_dma_mask; + return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask) { + if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) + dma_mask = dev->bus_dma_mask; + if (force_dma_unencrypted()) *phys_mask = __dma_to_phys(dev, dma_mask); else @@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= - dev->coherent_dma_mask; + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); } void *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif - /* - * Upstream PCI/PCIe bridges or SoC interconnects may not carry - * as many DMA address bits as the device itself supports. - */ - if (dev->bus_dma_mask && mask > dev->bus_dma_mask) - return 0; return 1; } -- 2.7.4