crypto: arm/ghash - provide a synchronous version
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 2 Jul 2019 19:41:40 +0000 (21:41 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 26 Jul 2019 04:58:11 +0000 (14:58 +1000)
GHASH is used by the GCM mode, which is often used in contexts where
only synchronous ciphers are permitted. So provide a synchronous version
of GHASH based on the existing code. This requires a non-SIMD fallback
to deal with invocations occurring from a context where SIMD instructions
may not be used.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm/crypto/ghash-ce-glue.c

index 52d472a..bb906b5 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <asm/unaligned.h>
+#include <crypto/b128ops.h>
 #include <crypto/cryptd.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/simd.h>
@@ -30,6 +31,8 @@ struct ghash_key {
        u64     h2[2];
        u64     h3[2];
        u64     h4[2];
+
+       be128   k;
 };
 
 struct ghash_desc_ctx {
@@ -62,6 +65,36 @@ static int ghash_init(struct shash_desc *desc)
        return 0;
 }
 
+static void ghash_do_update(int blocks, u64 dg[], const char *src,
+                           struct ghash_key *key, const char *head)
+{
+       if (likely(crypto_simd_usable())) {
+               kernel_neon_begin();
+               pmull_ghash_update(blocks, dg, src, key, head);
+               kernel_neon_end();
+       } else {
+               be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
+
+               do {
+                       const u8 *in = src;
+
+                       if (head) {
+                               in = head;
+                               blocks++;
+                               head = NULL;
+                       } else {
+                               src += GHASH_BLOCK_SIZE;
+                       }
+
+                       crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
+                       gf128mul_lle(&dst, &key->k);
+               } while (--blocks);
+
+               dg[0] = be64_to_cpu(dst.b);
+               dg[1] = be64_to_cpu(dst.a);
+       }
+}
+
 static int ghash_update(struct shash_desc *desc, const u8 *src,
                        unsigned int len)
 {
@@ -85,10 +118,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
                blocks = len / GHASH_BLOCK_SIZE;
                len %= GHASH_BLOCK_SIZE;
 
-               kernel_neon_begin();
-               pmull_ghash_update(blocks, ctx->digest, src, key,
-                                  partial ? ctx->buf : NULL);
-               kernel_neon_end();
+               ghash_do_update(blocks, ctx->digest, src, key,
+                               partial ? ctx->buf : NULL);
                src += blocks * GHASH_BLOCK_SIZE;
                partial = 0;
        }
@@ -106,9 +137,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
                struct ghash_key *key = crypto_shash_ctx(desc->tfm);
 
                memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
-               kernel_neon_begin();
-               pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
-               kernel_neon_end();
+               ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
        }
        put_unaligned_be64(ctx->digest[1], dst);
        put_unaligned_be64(ctx->digest[0], dst + 8);
@@ -132,24 +161,25 @@ static int ghash_setkey(struct crypto_shash *tfm,
                        const u8 *inkey, unsigned int keylen)
 {
        struct ghash_key *key = crypto_shash_ctx(tfm);
-       be128 h, k;
+       be128 h;
 
        if (keylen != GHASH_BLOCK_SIZE) {
                crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
                return -EINVAL;
        }
 
-       memcpy(&k, inkey, GHASH_BLOCK_SIZE);
-       ghash_reflect(key->h, &k);
+       /* needed for the fallback */
+       memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
+       ghash_reflect(key->h, &key->k);
 
-       h = k;
-       gf128mul_lle(&h, &k);
+       h = key->k;
+       gf128mul_lle(&h, &key->k);
        ghash_reflect(key->h2, &h);
 
-       gf128mul_lle(&h, &k);
+       gf128mul_lle(&h, &key->k);
        ghash_reflect(key->h3, &h);
 
-       gf128mul_lle(&h, &k);
+       gf128mul_lle(&h, &key->k);
        ghash_reflect(key->h4, &h);
 
        return 0;
@@ -162,15 +192,13 @@ static struct shash_alg ghash_alg = {
        .final                  = ghash_final,
        .setkey                 = ghash_setkey,
        .descsize               = sizeof(struct ghash_desc_ctx),
-       .base                   = {
-               .cra_name       = "__ghash",
-               .cra_driver_name = "__driver-ghash-ce",
-               .cra_priority   = 0,
-               .cra_flags      = CRYPTO_ALG_INTERNAL,
-               .cra_blocksize  = GHASH_BLOCK_SIZE,
-               .cra_ctxsize    = sizeof(struct ghash_key),
-               .cra_module     = THIS_MODULE,
-       },
+
+       .base.cra_name          = "ghash",
+       .base.cra_driver_name   = "ghash-ce-sync",
+       .base.cra_priority      = 300 - 1,
+       .base.cra_blocksize     = GHASH_BLOCK_SIZE,
+       .base.cra_ctxsize       = sizeof(struct ghash_key),
+       .base.cra_module        = THIS_MODULE,
 };
 
 static int ghash_async_init(struct ahash_request *req)
@@ -285,9 +313,7 @@ static int ghash_async_init_tfm(struct crypto_tfm *tfm)
        struct cryptd_ahash *cryptd_tfm;
        struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       cryptd_tfm = cryptd_alloc_ahash("__driver-ghash-ce",
-                                       CRYPTO_ALG_INTERNAL,
-                                       CRYPTO_ALG_INTERNAL);
+       cryptd_tfm = cryptd_alloc_ahash("ghash-ce-sync", 0, 0);
        if (IS_ERR(cryptd_tfm))
                return PTR_ERR(cryptd_tfm);
        ctx->cryptd_tfm = cryptd_tfm;