From: Linus Torvalds Date: Tue, 23 Jun 2015 04:04:48 +0000 (-0700) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 X-Git-Tag: v4.2-rc1~160 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a;p=platform%2Fkernel%2Flinux-exynos.git Merge git://git./linux/kernel/git/herbert/crypto-2.6 Pull crypto update from Herbert Xu: "Here is the crypto update for 4.2: API: - Convert RNG interface to new style. - New AEAD interface with one SG list for AD and plain/cipher text. All external AEAD users have been converted. - New asymmetric key interface (akcipher). Algorithms: - Chacha20, Poly1305 and RFC7539 support. - New RSA implementation. - Jitter RNG. - DRBG is now seeded with both /dev/random and Jitter RNG. If kernel pool isn't ready then DRBG will be reseeded when it is. - DRBG is now the default crypto API RNG, replacing krng. - 842 compression (previously part of powerpc nx driver). Drivers: - Accelerated SHA-512 for arm64. - New Marvell CESA driver that supports DMA and more algorithms. - Updated powerpc nx 842 support. - Added support for SEC1 hardware to talitos" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (292 commits) crypto: marvell/cesa - remove COMPILE_TEST dependency crypto: algif_aead - Temporarily disable all AEAD algorithms crypto: af_alg - Forbid the use internal algorithms crypto: echainiv - Only hold RNG during initialisation crypto: seqiv - Add compatibility support without RNG crypto: eseqiv - Offer normal cipher functionality without RNG crypto: chainiv - Offer normal cipher functionality without RNG crypto: user - Add CRYPTO_MSG_DELRNG crypto: user - Move cryptouser.h to uapi crypto: rng - Do not free default RNG when it becomes unused crypto: skcipher - Allow givencrypt to be NULL crypto: sahara - propagate the error on clk_disable_unprepare() failure crypto: rsa - fix invalid select for AKCIPHER crypto: picoxcell - Update to the current clk API crypto: nx - Check for bogus firmware properties crypto: marvell/cesa - add DT bindings documentation crypto: marvell/cesa - add support for Kirkwood and Dove SoCs crypto: marvell/cesa - add support for Orion SoCs crypto: marvell/cesa - add allhwsupport module parameter crypto: marvell/cesa - add support for all armada SoCs ... --- 44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a diff --cc drivers/crypto/vmx/aes.c index a9064e3,023e5f0..e79e567 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@@ -73,53 -76,47 +76,53 @@@ static void p8_aes_exit(struct crypto_t } static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); - preempt_enable(); - - ret += crypto_cipher_setkey(ctx->fallback, key, keylen); - return ret; + int ret; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); ++ preempt_enable(); + + ret += crypto_cipher_setkey(ctx->fallback, key, keylen); + return ret; } static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_encrypt_one(ctx->fallback, dst, src); - } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_encrypt(src, dst, &ctx->enc_key); - pagefault_enable(); - preempt_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_encrypt_one(ctx->fallback, dst, src); + } else { ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(src, dst, &ctx->enc_key); + pagefault_enable(); ++ preempt_enable(); + } } static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_decrypt_one(ctx->fallback, dst, src); - } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_decrypt(src, dst, &ctx->dec_key); - pagefault_enable(); - preempt_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_decrypt_one(ctx->fallback, dst, src); + } else { ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_decrypt(src, dst, &ctx->dec_key); + pagefault_enable(); ++ preempt_enable(); + } } struct crypto_alg p8_aes_alg = { diff --cc drivers/crypto/vmx/aes_cbc.c index 477284a,7120ab2..7299995 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@@ -74,95 -77,95 +77,101 @@@ static void p8_aes_cbc_exit(struct cryp } static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); - preempt_enable(); - - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); - return ret; + int ret; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); ++ preempt_enable(); + + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; } static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); - } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, + nbytes); + } else { ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->enc_key, walk.iv, 1); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); - } + } - pagefault_enable(); - preempt_enable(); - } + pagefault_enable(); ++ preempt_enable(); + } - return ret; + return ret; } static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); - } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, + nbytes); + } else { ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->dec_key, walk.iv, 0); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - pagefault_enable(); - preempt_enable(); - } + pagefault_enable(); ++ preempt_enable(); + } - return ret; + return ret; } diff --cc drivers/crypto/vmx/ghash.c index f255ec4,4c3a8f7..b5e2900 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@@ -107,98 -109,92 +109,100 @@@ static int p8_ghash_init(struct shash_d } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); - - if (keylen != GHASH_KEY_LEN) - return -EINVAL; - - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_init_p8(ctx->htable, (const u64 *) key); - pagefault_enable(); - preempt_enable(); - return crypto_shash_setkey(ctx->fallback, key, keylen); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); + + if (keylen != GHASH_KEY_LEN) + return -EINVAL; + ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_init_p8(ctx->htable, (const u64 *) key); + pagefault_enable(); ++ preempt_enable(); + return crypto_shash_setkey(ctx->fallback, key, keylen); } static int p8_ghash_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) + const u8 *src, unsigned int srclen) { - unsigned int len; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_update(&dctx->fallback_desc, src, srclen); - } else { - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - preempt_enable(); - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); - pagefault_enable(); - preempt_enable(); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - return 0; - } + unsigned int len; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_update(&dctx->fallback_desc, src, + srclen); + } else { + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { + memcpy(dctx->buffer + dctx->bytes, src, + srclen); + dctx->bytes += srclen; + return 0; + } + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); ++ preempt_enable(); + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + pagefault_enable(); ++ preempt_enable(); + src += len; + srclen -= len; + } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; + } } static int p8_ghash_final(struct shash_desc *desc, u8 *out) { - int i; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_final(&dctx->fallback_desc, out); - } else { - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - preempt_enable(); - dctx->bytes = 0; - } - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); - return 0; - } + int i; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_final(&dctx->fallback_desc, out); + } else { + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; ++ preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); ++ preempt_enable(); + dctx->bytes = 0; + } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; + } } struct shash_alg p8_ghash_alg = {