crypto: arm/aes - use native endiannes for key schedule
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 2 Jul 2019 19:41:37 +0000 (21:41 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 26 Jul 2019 04:58:09 +0000 (14:58 +1000)
Align ARM's hw instruction based AES implementation with other versions
that keep the key schedule in native endianness. This will allow us to
merge the various implementations going forward.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm/crypto/aes-ce-core.S
arch/arm/crypto/aes-ce-glue.c

index caac519d62490d18104826d83e3f6a055bba3265..425000232d49765337d50d1d88defd8311618bcf 100644 (file)
 
        .macro          do_block, dround, fround
        cmp             r3, #12                 @ which key size?
-       vld1.         {q10-q11}, [ip]!
+       vld1.32         {q10-q11}, [ip]!
        \dround         q8, q9
-       vld1.         {q12-q13}, [ip]!
+       vld1.32         {q12-q13}, [ip]!
        \dround         q10, q11
-       vld1.         {q10-q11}, [ip]!
+       vld1.32         {q10-q11}, [ip]!
        \dround         q12, q13
-       vld1.         {q12-q13}, [ip]!
+       vld1.32         {q12-q13}, [ip]!
        \dround         q10, q11
        blo             0f                      @ AES-128: 10 rounds
-       vld1.         {q10-q11}, [ip]!
+       vld1.32         {q10-q11}, [ip]!
        \dround         q12, q13
        beq             1f                      @ AES-192: 12 rounds
-       vld1.         {q12-q13}, [ip]
+       vld1.32         {q12-q13}, [ip]
        \dround         q10, q11
 0:     \fround         q12, q13, q14
        bx              lr
@@ -149,8 +149,8 @@ ENDPROC(aes_decrypt_3x)
 
        .macro          prepare_key, rk, rounds
        add             ip, \rk, \rounds, lsl #4
-       vld1.         {q8-q9}, [\rk]          @ load first 2 round keys
-       vld1.         {q14}, [ip]             @ load last round key
+       vld1.32         {q8-q9}, [\rk]          @ load first 2 round keys
+       vld1.32         {q14}, [ip]             @ load last round key
        .endm
 
        /*
@@ -505,8 +505,8 @@ ENDPROC(ce_aes_sub)
         *                                        operation on round key *src
         */
 ENTRY(ce_aes_invert)
-       vld1.         {q0}, [r1]
+       vld1.32         {q0}, [r1]
        aesimc.8        q0, q0
-       vst1.         {q0}, [r0]
+       vst1.32         {q0}, [r0]
        bx              lr
 ENDPROC(ce_aes_invert)
index e4139a0b0d75d4cc8bd47ec9328bddcb86e452e0..36d1a5301284174070c95fd2c95d3599caab7272 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <asm/hwcap.h>
 #include <asm/neon.h>
+#include <asm/unaligned.h>
 #include <crypto/aes.h>
 #include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
@@ -77,21 +78,17 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
            key_len != AES_KEYSIZE_256)
                return -EINVAL;
 
-       memcpy(ctx->key_enc, in_key, key_len);
        ctx->key_length = key_len;
+       for (i = 0; i < kwords; i++)
+               ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
 
        kernel_neon_begin();
        for (i = 0; i < sizeof(rcon); i++) {
                u32 *rki = ctx->key_enc + (i * kwords);
                u32 *rko = rki + kwords;
 
-#ifndef CONFIG_CPU_BIG_ENDIAN
                rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
                rko[0] = rko[0] ^ rki[0] ^ rcon[i];
-#else
-               rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
-               rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
-#endif
                rko[1] = rko[0] ^ rki[1];
                rko[2] = rko[1] ^ rki[2];
                rko[3] = rko[2] ^ rki[3];