crypto: x86/serpent: Remove redundant alignments
authorThomas Gleixner <tglx@linutronix.de>
Thu, 15 Sep 2022 11:10:55 +0000 (13:10 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 17 Oct 2022 14:41:01 +0000 (16:41 +0200)
SYM_FUNC_START*() and friends already imply alignment, remove custom
alignment hacks to make code consistent. This prepares for future
function call ABI changes.

Also, with having pushed the function alignment to 16 bytes, this
custom alignment is completely superfluous.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111144.558544791@infradead.org
arch/x86/crypto/serpent-avx-x86_64-asm_64.S
arch/x86/crypto/serpent-avx2-asm_64.S

index 82f2313..97e2836 100644 (file)
 #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \
        transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
 
-.align 8
 SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
        /* input:
         *      %rdi: ctx, CTX
@@ -604,7 +603,6 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
        RET;
 SYM_FUNC_END(__serpent_enc_blk8_avx)
 
-.align 8
 SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
        /* input:
         *      %rdi: ctx, CTX
index 8ea34c9..6d60c50 100644 (file)
 #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \
        transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
 
-.align 8
 SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
        /* input:
         *      %rdi: ctx, CTX
@@ -604,7 +603,6 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
        RET;
 SYM_FUNC_END(__serpent_enc_blk16)
 
-.align 8
 SYM_FUNC_START_LOCAL(__serpent_dec_blk16)
        /* input:
         *      %rdi: ctx, CTX