From 207918461eb0aca720fddec5da79bc71c133b9f1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 8 Oct 2015 20:02:03 +0100 Subject: [PATCH] arm64: use ENDPIPROC() to annotate position independent assembler routines For more control over which functions are called with the MMU off or with the UEFI 1:1 mapping active, annotate some assembler routines as position independent. This is done by introducing ENDPIPROC(), which replaces the ENDPROC() declaration of those routines. Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 11 +++++++++++ arch/arm64/lib/memchr.S | 2 +- arch/arm64/lib/memcmp.S | 2 +- arch/arm64/lib/memcpy.S | 2 +- arch/arm64/lib/memmove.S | 2 +- arch/arm64/lib/memset.S | 2 +- arch/arm64/lib/strcmp.S | 2 +- arch/arm64/lib/strlen.S | 2 +- arch/arm64/lib/strncmp.S | 2 +- arch/arm64/mm/cache.S | 10 +++++----- 10 files changed, 24 insertions(+), 13 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b51f2cc..12eff92 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -193,4 +193,15 @@ lr .req x30 // link register str \src, [\tmp, :lo12:\sym] .endm +/* + * Annotate a function as position independent, i.e., safe to be called before + * the kernel virtual mapping is activated. + */ +#define ENDPIPROC(x) \ + .globl __pi_##x; \ + .type __pi_##x, %function; \ + .set __pi_##x, x; \ + .size __pi_##x, . - x; \ + ENDPROC(x) + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 8636b75..4444c1d 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -41,4 +41,4 @@ ENTRY(memchr) ret 2: mov x0, #0 ret -ENDPROC(memchr) +ENDPIPROC(memchr) diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index 6ea0776..ffbdec0 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 ) .Lret0: mov result, #0 ret -ENDPROC(memcmp) +ENDPIPROC(memcmp) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 173a1aa..36a6a62 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -71,4 +71,4 @@ ENTRY(memcpy) #include "copy_template.S" ret -ENDPROC(memcpy) +ENDPIPROC(memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 57b19ea2..68e2f20 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -194,4 +194,4 @@ ENTRY(memmove) tst count, #0x3f b.ne .Ltail63 ret -ENDPROC(memmove) +ENDPIPROC(memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 7c72dfd..29f405f 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -213,4 +213,4 @@ ENTRY(memset) ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret -ENDPROC(memset) +ENDPIPROC(memset) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 42f828b..471fe61 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -231,4 +231,4 @@ CPU_BE( orr syndrome, diff, has_nul ) lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret -ENDPROC(strcmp) +ENDPIPROC(strcmp) diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 987b68b..55ccc8e 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned -ENDPROC(strlen) +ENDPIPROC(strlen) diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index 0224cf5..e267044 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul ) .Lret0: mov result, #0 ret -ENDPROC(strncmp) +ENDPIPROC(strncmp) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index eb48d5d..cfa44a6 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area) b.lo 1b dsb sy ret -ENDPROC(__flush_dcache_area) +ENDPIPROC(__flush_dcache_area) /* * __inval_cache_range(start, end) @@ -131,7 +131,7 @@ __dma_inv_range: b.lo 2b dsb sy ret -ENDPROC(__inval_cache_range) +ENDPIPROC(__inval_cache_range) ENDPROC(__dma_inv_range) /* @@ -171,7 +171,7 @@ ENTRY(__dma_flush_range) b.lo 1b dsb sy ret -ENDPROC(__dma_flush_range) +ENDPIPROC(__dma_flush_range) /* * __dma_map_area(start, size, dir) @@ -184,7 +184,7 @@ ENTRY(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_range b __dma_clean_range -ENDPROC(__dma_map_area) +ENDPIPROC(__dma_map_area) /* * __dma_unmap_area(start, size, dir) @@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_range ret -ENDPROC(__dma_unmap_area) +ENDPIPROC(__dma_unmap_area) -- 2.7.4