crypto: x86/chacha-sse3 - use unaligned loads for state array
authorArd Biesheuvel <ardb@kernel.org>
Wed, 8 Jul 2020 09:11:18 +0000 (12:11 +0300)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 16 Jul 2020 11:49:04 +0000 (21:49 +1000)
Due to the fact that the x86 port does not support allocating objects
on the stack with an alignment that exceeds 8 bytes, we have a rather
ugly hack in the x86 code for ChaCha to ensure that the state array is
aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
to use aligned loads.

Given that the performance benefit of using of aligned loads appears to
be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
the fact that this hack has leaked into generic ChaCha code, let's just
remove it.

Cc: Martin Willi <martin@strongswan.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Martin Willi <martin@strongswan.org>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/x86/crypto/chacha-ssse3-x86_64.S
arch/x86/crypto/chacha_glue.c
include/crypto/chacha.h

index a38ab25..ca1788b 100644 (file)
@@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
        FRAME_BEGIN
 
        # x0..3 = s0..3
-       movdqa          0x00(%rdi),%xmm0
-       movdqa          0x10(%rdi),%xmm1
-       movdqa          0x20(%rdi),%xmm2
-       movdqa          0x30(%rdi),%xmm3
+       movdqu          0x00(%rdi),%xmm0
+       movdqu          0x10(%rdi),%xmm1
+       movdqu          0x20(%rdi),%xmm2
+       movdqu          0x30(%rdi),%xmm3
        movdqa          %xmm0,%xmm8
        movdqa          %xmm1,%xmm9
        movdqa          %xmm2,%xmm10
@@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3)
        # %edx: nrounds
        FRAME_BEGIN
 
-       movdqa          0x00(%rdi),%xmm0
-       movdqa          0x10(%rdi),%xmm1
-       movdqa          0x20(%rdi),%xmm2
-       movdqa          0x30(%rdi),%xmm3
+       movdqu          0x00(%rdi),%xmm0
+       movdqu          0x10(%rdi),%xmm1
+       movdqu          0x20(%rdi),%xmm2
+       movdqu          0x30(%rdi),%xmm3
 
        mov             %edx,%r8d
        call            chacha_permute
index 2225009..e67a591 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/module.h>
 #include <asm/simd.h>
 
-#define CHACHA_STATE_ALIGN 16
-
 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
                                       unsigned int len, int nrounds);
 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
@@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
 
 void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
 {
-       state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
        if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
                hchacha_block_generic(state, stream, nrounds);
        } else {
@@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
 
 void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
 {
-       state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
        chacha_init_generic(state, key, iv);
 }
 EXPORT_SYMBOL(chacha_init_arch);
@@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch);
 void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
                       int nrounds)
 {
-       state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
        if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
            bytes <= CHACHA_BLOCK_SIZE)
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
@@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
 static int chacha_simd_stream_xor(struct skcipher_request *req,
                                  const struct chacha_ctx *ctx, const u8 *iv)
 {
-       u32 *state, state_buf[16 + 2] __aligned(8);
+       u32 state[CHACHA_STATE_WORDS] __aligned(8);
        struct skcipher_walk walk;
        int err;
 
        err = skcipher_walk_virt(&walk, req, false);
 
-       BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
-       state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
-
        chacha_init_generic(state, ctx->key, iv);
 
        while (walk.nbytes > 0) {
@@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-       u32 *state, state_buf[16 + 2] __aligned(8);
+       u32 state[CHACHA_STATE_WORDS] __aligned(8);
        struct chacha_ctx subctx;
        u8 real_iv[16];
 
-       BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
-       state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
        chacha_init_generic(state, ctx->key, req->iv);
 
        if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
index 2676f4f..3a1c72f 100644 (file)
 #define CHACHA_BLOCK_SIZE      64
 #define CHACHAPOLY_IV_SIZE     12
 
-#ifdef CONFIG_X86_64
-#define CHACHA_STATE_WORDS     ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
-#else
 #define CHACHA_STATE_WORDS     (CHACHA_BLOCK_SIZE / sizeof(u32))
-#endif
 
 /* 192-bit nonce, then 64-bit stream position */
 #define XCHACHA_IV_SIZE                32