arm64: Do not enable uaccess for flush_icache_range
authorFuad Tabba <tabba@google.com>
Mon, 24 May 2021 08:29:47 +0000 (09:29 +0100)
committerWill Deacon <will@kernel.org>
Tue, 25 May 2021 18:27:48 +0000 (19:27 +0100)
__flush_icache_range works on kernel addresses, and doesn't need
uaccess. The existing code is a side-effect of its current
implementation with __flush_cache_user_range fallthrough.

Instead of fallthrough to share the code, use a common macro for
the two where the caller specifies an optional fixup label if
user access is needed. If provided, this label would be used to
generate an extable entry.

Simplify the code to use dcache_by_line_op, instead of
replicating much of its functionality.

No functional change intended.
Possible performance impact due to the reduced number of
instructions.

Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Will Deacon <will@kernel.org>
Reported-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
Link: https://lore.kernel.org/linux-arm-kernel/20210521121846.GB1040@C02TD0UTHF1T.local/
Signed-off-by: Fuad Tabba <tabba@google.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-5-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/mm/cache.S

index 2d881f3..7c54bcb 100644 (file)
 #include <asm/asm-uaccess.h>
 
 /*
+ *     __flush_cache_range(start,end) [fixup]
+ *
+ *     Ensure that the I and D caches are coherent within specified region.
+ *     This is typically used when code has been written to a memory region,
+ *     and will be executed.
+ *
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ *     - fixup   - optional label to branch to on user fault
+ */
+.macro __flush_cache_range, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+       dsb     ishst
+       b       .Ldc_skip_\@
+alternative_else_nop_endif
+       mov     x2, x0
+       sub     x3, x1, x0
+       dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+       isb
+       b       .Lic_skip_\@
+alternative_else_nop_endif
+       invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
+
+/*
  *     flush_icache_range(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
@@ -25,7 +53,9 @@
  *     - end     - virtual end address of region
  */
 SYM_FUNC_START(__flush_icache_range)
-       /* FALLTHROUGH */
+       __flush_cache_range
+       ret
+SYM_FUNC_END(__flush_icache_range)
 
 /*
  *     __flush_cache_user_range(start,end)
@@ -39,34 +69,15 @@ SYM_FUNC_START(__flush_icache_range)
  */
 SYM_FUNC_START(__flush_cache_user_range)
        uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
-       dsb     ishst
-       b       7f
-alternative_else_nop_endif
-       dcache_line_size x2, x3
-       sub     x3, x2, #1
-       bic     x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
-       add     x4, x4, x2
-       cmp     x4, x1
-       b.lo    1b
-       dsb     ish
 
-7:
-alternative_if ARM64_HAS_CACHE_DIC
-       isb
-       b       8f
-alternative_else_nop_endif
-       invalidate_icache_by_line x0, x1, x2, x3, 9f
-8:     mov     x0, #0
+       __flush_cache_range 2f
+       mov     x0, xzr
 1:
        uaccess_ttbr0_disable x1, x2
        ret
-9:
+2:
        mov     x0, #-EFAULT
        b       1b
-SYM_FUNC_END(__flush_icache_range)
 SYM_FUNC_END(__flush_cache_user_range)
 
 /*