From 60695be2bb6b0623f8e53bd9949d582a83c6d44a Mon Sep 17 00:00:00 2001 From: Jacopo Mondi Date: Fri, 13 Apr 2018 19:25:37 +0200 Subject: [PATCH] dma-mapping: postpone cpu addr translation on mmap Postpone calling virt_to_page() translation on memory locations not guaranteed to be backed by a struct page. Try first to map memory from the device coherent memory pool, then perform translation if that fails. On some architectures, specifically SH when configured with the SPARSEMEM memory model, assuming a struct page is always assigned to a memory address lead to unexpected hangs during the virtual to page address translation. This patch fixes that specific issue but applies in the general case too. Suggested-by: Laurent Pinchart Signed-off-by: Jacopo Mondi Reviewed-by: Robin Murphy Signed-off-by: Christoph Hellwig --- drivers/base/dma-mapping.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 3b11835..d82566d 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); unsigned long off = vma->vm_pgoff; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (off < count && user_count <= (count - off)) { + if (off < count && user_count <= (count - off)) ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, + page_to_pfn(virt_to_page(cpu_addr)) + off, user_count << PAGE_SHIFT, vma->vm_page_prot); - } #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ return ret; -- 2.7.4