From: Christoph Hellwig Date: Mon, 8 Jun 2020 04:41:58 +0000 (-0700) Subject: hexagon: use asm-generic/cacheflush.h X-Git-Tag: v5.10.7~2398^2~19 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=af23eea561961108b22422750f07025d2fd91240;p=platform%2Fkernel%2Flinux-rpi.git hexagon: use asm-generic/cacheflush.h Hexagon needs almost no cache flushing routines of its own. Rely on asm-generic/cacheflush.h for the defaults. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Brian Cain Link: http://lkml.kernel.org/r/20200515143646.3857579-12-hch@lst.de Signed-off-by: Linus Torvalds --- diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h index fb447de..6eff073 100644 --- a/arch/hexagon/include/asm/cacheflush.h +++ b/arch/hexagon/include/asm/cacheflush.h @@ -25,29 +25,17 @@ #define LINESIZE 32 #define LINEBITS 5 -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_page(vma, pg) do { } while (0) -#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - /* * Flush Dcache range through current map. */ extern void flush_dcache_range(unsigned long start, unsigned long end); +#define flush_dcache_range flush_dcache_range /* * Flush Icache range through current map. */ extern void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range /* * Memory-management related flushes are there to ensure in non-physically @@ -78,6 +66,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len); +#define copy_to_user_page copy_to_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) @@ -85,4 +74,6 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); +#include + #endif