1 /* ====================================================================
2 * Copyright (c) 2001-2011 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ==================================================================== */
49 #include <openssl/aead.h>
50 #include <openssl/aes.h>
51 #include <openssl/cipher.h>
52 #include <openssl/cpu.h>
53 #include <openssl/err.h>
54 #include <openssl/mem.h>
55 #include <openssl/modes.h>
56 #include <openssl/obj.h>
57 #include <openssl/rand.h>
60 #include "../modes/internal.h"
79 } ks; /* AES key schedule to use */
80 int key_set; /* Set if key initialised */
81 int iv_set; /* Set if an iv is set */
83 uint8_t *iv; /* Temporary IV store */
84 int ivlen; /* IV length */
86 int iv_gen; /* It is OK to generate IVs */
90 #if !defined(OPENSSL_NO_ASM) && \
91 (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
93 extern unsigned int OPENSSL_ia32cap_P[];
95 static char vpaes_capable(void) {
96 return (OPENSSL_ia32cap_P[1] & (1 << (41 - 32))) != 0;
99 #if defined(OPENSSL_X86_64)
101 static char bsaes_capable(void) {
102 return vpaes_capable();
106 #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM)
107 #include "../arm_arch.h"
108 #if __ARM_ARCH__ >= 7
110 static char bsaes_capable(void) {
111 return CRYPTO_is_NEON_capable();
113 #endif /* __ARM_ARCH__ >= 7 */
114 #endif /* OPENSSL_ARM */
117 /* On platforms where BSAES gets defined (just above), then these functions are
118 * provided by asm. */
119 void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
120 const AES_KEY *key, uint8_t ivec[16], int enc);
121 void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
122 const AES_KEY *key, const uint8_t ivec[16]);
124 static char bsaes_capable(void) {
128 /* On other platforms, bsaes_capable() will always return false and so the
129 * following will never be called. */
130 void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
131 const AES_KEY *key, uint8_t ivec[16], int enc) {
135 void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
136 const AES_KEY *key, const uint8_t ivec[16]) {
142 /* On platforms where VPAES gets defined (just above), then these functions are
143 * provided by asm. */
144 int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
145 int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
147 void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
148 void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
150 void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
151 const AES_KEY *key, uint8_t *ivec, int enc);
153 static char vpaes_capable(void) {
157 /* On other platforms, vpaes_capable() will always return false and so the
158 * following will never be called. */
159 int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) {
162 int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) {
165 void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
168 void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
171 void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
172 const AES_KEY *key, uint8_t *ivec, int enc) {
177 #if !defined(OPENSSL_NO_ASM) && \
178 (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
179 int aesni_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
180 int aesni_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
182 void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
183 void aesni_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
185 void aesni_ecb_encrypt(const uint8_t *in, uint8_t *out, size_t length,
186 const AES_KEY *key, int enc);
187 void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
188 const AES_KEY *key, uint8_t *ivec, int enc);
190 void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks,
191 const void *key, const uint8_t *ivec);
193 #if defined(OPENSSL_X86_64)
194 size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
195 const void *key, uint8_t ivec[16], uint64_t *Xi);
196 #define AES_gcm_encrypt aesni_gcm_encrypt
197 size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
198 const void *key, uint8_t ivec[16], uint64_t *Xi);
199 #define AES_gcm_decrypt aesni_gcm_decrypt
200 void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
202 #define AES_GCM_ASM(gctx) \
203 (gctx->ctr == aesni_ctr32_encrypt_blocks && gctx->gcm.ghash == gcm_ghash_avx)
204 #endif /* OPENSSL_X86_64 */
208 /* On other platforms, aesni_capable() will always return false and so the
209 * following will never be called. */
210 void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
213 int aesni_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) {
216 void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks,
217 const void *key, const uint8_t *ivec) {
223 static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
224 const uint8_t *iv, int enc) {
226 EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
228 mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
229 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) {
230 if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) {
231 ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
232 dat->block = (block128_f)AES_decrypt;
233 dat->stream.cbc = (cbc128_f)bsaes_cbc_encrypt;
234 } else if (vpaes_capable()) {
235 ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
236 dat->block = (block128_f)vpaes_decrypt;
238 mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL;
240 ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
241 dat->block = (block128_f)AES_decrypt;
243 mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL;
245 } else if (bsaes_capable() && mode == EVP_CIPH_CTR_MODE) {
246 ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
247 dat->block = (block128_f)AES_encrypt;
248 dat->stream.ctr = (ctr128_f)bsaes_ctr32_encrypt_blocks;
249 } else if (vpaes_capable()) {
250 ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
251 dat->block = (block128_f)vpaes_encrypt;
253 mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL;
255 ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
256 dat->block = (block128_f)AES_encrypt;
258 mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL;
262 OPENSSL_PUT_ERROR(CIPHER, aes_init_key, CIPHER_R_AES_KEY_SETUP_FAILED);
269 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
270 const unsigned char *in, size_t len) {
271 EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
273 if (dat->stream.cbc) {
274 (*dat->stream.cbc)(in, out, len, &dat->ks, ctx->iv, ctx->encrypt);
275 } else if (ctx->encrypt) {
276 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, dat->block);
278 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks, ctx->iv, dat->block);
284 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
285 const unsigned char *in, size_t len) {
286 size_t bl = ctx->cipher->block_size;
288 EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
294 for (i = 0, len -= bl; i <= len; i += bl) {
295 (*dat->block)(in + i, out + i, &dat->ks);
301 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
302 const unsigned char *in, size_t len) {
303 unsigned int num = ctx->num;
304 EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
306 if (dat->stream.ctr) {
307 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks, ctx->iv, ctx->buf, &num,
310 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, ctx->iv, ctx->buf, &num,
313 ctx->num = (size_t)num;
317 static ctr128_f aes_gcm_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx,
318 const uint8_t *key, size_t key_len) {
319 if (bsaes_capable()) {
320 AES_set_encrypt_key(key, key_len * 8, aes_key);
321 CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
322 return (ctr128_f)bsaes_ctr32_encrypt_blocks;
325 if (vpaes_capable()) {
326 vpaes_set_encrypt_key(key, key_len * 8, aes_key);
327 CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)vpaes_encrypt);
331 AES_set_encrypt_key(key, key_len * 8, aes_key);
332 CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
336 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
337 const uint8_t *iv, int enc) {
338 EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
343 gctx->ctr = aes_gcm_set_key(&gctx->ks.ks, &gctx->gcm, key, ctx->key_len);
344 /* If we have an iv can set it directly, otherwise use saved IV. */
345 if (iv == NULL && gctx->iv_set) {
349 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
354 /* If key set use IV, otherwise copy */
356 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
358 memcpy(gctx->iv, iv, gctx->ivlen);
366 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c) {
367 EVP_AES_GCM_CTX *gctx = c->cipher_data;
368 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
369 if (gctx->iv != c->iv) {
370 OPENSSL_free(gctx->iv);
375 /* increment counter (64-bit int) by 1 */
376 static void ctr64_inc(uint8_t *counter) {
391 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) {
392 EVP_AES_GCM_CTX *gctx = c->cipher_data;
397 gctx->ivlen = c->cipher->iv_len;
403 case EVP_CTRL_GCM_SET_IVLEN:
408 /* Allocate memory for IV if needed */
409 if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) {
410 if (gctx->iv != c->iv) {
411 OPENSSL_free(gctx->iv);
413 gctx->iv = OPENSSL_malloc(arg);
421 case EVP_CTRL_GCM_SET_TAG:
422 if (arg <= 0 || arg > 16 || c->encrypt) {
425 memcpy(c->buf, ptr, arg);
429 case EVP_CTRL_GCM_GET_TAG:
430 if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) {
433 memcpy(ptr, c->buf, arg);
436 case EVP_CTRL_GCM_SET_IV_FIXED:
437 /* Special case: -1 length restores whole IV */
439 memcpy(gctx->iv, ptr, gctx->ivlen);
443 /* Fixed field must be at least 4 bytes and invocation field
445 if (arg < 4 || (gctx->ivlen - arg) < 8) {
449 memcpy(gctx->iv, ptr, arg);
452 RAND_pseudo_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0) {
458 case EVP_CTRL_GCM_IV_GEN:
459 if (gctx->iv_gen == 0 || gctx->key_set == 0) {
462 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
463 if (arg <= 0 || arg > gctx->ivlen) {
466 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
467 /* Invocation field will be at least 8 bytes in size and
468 * so no need to check wrap around or increment more than
470 ctr64_inc(gctx->iv + gctx->ivlen - 8);
474 case EVP_CTRL_GCM_SET_IV_INV:
475 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) {
478 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
479 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
483 case EVP_CTRL_COPY: {
484 EVP_CIPHER_CTX *out = ptr;
485 EVP_AES_GCM_CTX *gctx_out = out->cipher_data;
487 if (gctx->gcm.key != &gctx->ks) {
490 gctx_out->gcm.key = &gctx_out->ks;
492 if (gctx->iv == c->iv) {
493 gctx_out->iv = out->iv;
495 gctx_out->iv = OPENSSL_malloc(gctx->ivlen);
499 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
509 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
511 EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
513 /* If not set up, return error */
514 if (!gctx->key_set) {
523 if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len)) {
526 } else if (ctx->encrypt) {
529 #if defined(AES_GCM_ASM)
530 if (len >= 32 && AES_GCM_ASM(gctx)) {
531 size_t res = (16 - gctx->gcm.mres) % 16;
533 if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res)) {
537 bulk = AES_gcm_encrypt(in + res, out + res, len - res, gctx->gcm.key,
538 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
539 gctx->gcm.len.u[1] += bulk;
543 if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
544 len - bulk, gctx->ctr)) {
549 if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in + bulk, out + bulk,
557 #if defined(AES_GCM_ASM)
558 if (len >= 16 && AES_GCM_ASM(gctx)) {
559 size_t res = (16 - gctx->gcm.mres) % 16;
561 if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res)) {
565 bulk = AES_gcm_decrypt(in + res, out + res, len - res, gctx->gcm.key,
566 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
567 gctx->gcm.len.u[1] += bulk;
571 if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
572 len - bulk, gctx->ctr)) {
577 if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in + bulk, out + bulk,
586 if (gctx->taglen < 0 ||
587 !CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0) {
593 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
595 /* Don't reuse the IV */
601 static const EVP_CIPHER aes_128_cbc = {
602 NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */,
603 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
604 NULL /* app_data */, aes_init_key, aes_cbc_cipher,
605 NULL /* cleanup */, NULL /* ctrl */};
607 static const EVP_CIPHER aes_128_ctr = {
608 NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */,
609 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
610 NULL /* app_data */, aes_init_key, aes_ctr_cipher,
611 NULL /* cleanup */, NULL /* ctrl */};
613 static const EVP_CIPHER aes_128_ecb = {
614 NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */,
615 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
616 NULL /* app_data */, aes_init_key, aes_ecb_cipher,
617 NULL /* cleanup */, NULL /* ctrl */};
619 static const EVP_CIPHER aes_128_gcm = {
620 NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */,
621 sizeof(EVP_AES_GCM_CTX),
622 EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
623 EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
624 EVP_CIPH_FLAG_AEAD_CIPHER,
625 NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
629 static const EVP_CIPHER aes_256_cbc = {
630 NID_aes_128_cbc, 16 /* block_size */, 32 /* key_size */,
631 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
632 NULL /* app_data */, aes_init_key, aes_cbc_cipher,
633 NULL /* cleanup */, NULL /* ctrl */};
635 static const EVP_CIPHER aes_256_ctr = {
636 NID_aes_128_ctr, 1 /* block_size */, 32 /* key_size */,
637 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
638 NULL /* app_data */, aes_init_key, aes_ctr_cipher,
639 NULL /* cleanup */, NULL /* ctrl */};
641 static const EVP_CIPHER aes_256_ecb = {
642 NID_aes_128_ecb, 16 /* block_size */, 32 /* key_size */,
643 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
644 NULL /* app_data */, aes_init_key, aes_ecb_cipher,
645 NULL /* cleanup */, NULL /* ctrl */};
647 static const EVP_CIPHER aes_256_gcm = {
648 NID_aes_128_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */,
649 sizeof(EVP_AES_GCM_CTX),
650 EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
651 EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
652 EVP_CIPH_FLAG_AEAD_CIPHER,
653 NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
656 #if !defined(OPENSSL_NO_ASM) && \
657 (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
659 /* AES-NI section. */
661 static char aesni_capable(void) {
662 return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0;
665 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
666 const uint8_t *iv, int enc) {
668 EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
670 mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
671 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) {
672 ret = aesni_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
673 dat->block = (block128_f)aesni_decrypt;
675 mode == EVP_CIPH_CBC_MODE ? (cbc128_f)aesni_cbc_encrypt : NULL;
677 ret = aesni_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
678 dat->block = (block128_f)aesni_encrypt;
679 if (mode == EVP_CIPH_CBC_MODE) {
680 dat->stream.cbc = (cbc128_f)aesni_cbc_encrypt;
681 } else if (mode == EVP_CIPH_CTR_MODE) {
682 dat->stream.ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
684 dat->stream.cbc = NULL;
689 OPENSSL_PUT_ERROR(CIPHER, aesni_init_key, CIPHER_R_AES_KEY_SETUP_FAILED);
696 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
697 const uint8_t *in, size_t len) {
698 aesni_cbc_encrypt(in, out, len, ctx->cipher_data, ctx->iv, ctx->encrypt);
703 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
704 const uint8_t *in, size_t len) {
705 size_t bl = ctx->cipher->block_size;
711 aesni_ecb_encrypt(in, out, len, ctx->cipher_data, ctx->encrypt);
716 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
717 const uint8_t *iv, int enc) {
718 EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
723 aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
724 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt);
725 gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
726 /* If we have an iv can set it directly, otherwise use
728 if (iv == NULL && gctx->iv_set) {
732 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
737 /* If key set use IV, otherwise copy */
739 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
741 memcpy(gctx->iv, iv, gctx->ivlen);
749 static const EVP_CIPHER aesni_128_cbc = {
750 NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */,
751 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
752 NULL /* app_data */, aesni_init_key, aesni_cbc_cipher,
753 NULL /* cleanup */, NULL /* ctrl */};
755 static const EVP_CIPHER aesni_128_ctr = {
756 NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */,
757 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
758 NULL /* app_data */, aesni_init_key, aes_ctr_cipher,
759 NULL /* cleanup */, NULL /* ctrl */};
761 static const EVP_CIPHER aesni_128_ecb = {
762 NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */,
763 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
764 NULL /* app_data */, aesni_init_key, aesni_ecb_cipher,
765 NULL /* cleanup */, NULL /* ctrl */};
767 static const EVP_CIPHER aesni_128_gcm = {
768 NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */,
769 sizeof(EVP_AES_GCM_CTX),
770 EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
771 EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
772 EVP_CIPH_FLAG_AEAD_CIPHER,
773 NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
777 static const EVP_CIPHER aesni_256_cbc = {
778 NID_aes_128_cbc, 16 /* block_size */, 32 /* key_size */,
779 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
780 NULL /* app_data */, aesni_init_key, aesni_cbc_cipher,
781 NULL /* cleanup */, NULL /* ctrl */};
783 static const EVP_CIPHER aesni_256_ctr = {
784 NID_aes_128_ctr, 1 /* block_size */, 32 /* key_size */,
785 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
786 NULL /* app_data */, aesni_init_key, aes_ctr_cipher,
787 NULL /* cleanup */, NULL /* ctrl */};
789 static const EVP_CIPHER aesni_256_ecb = {
790 NID_aes_128_ecb, 16 /* block_size */, 32 /* key_size */,
791 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
792 NULL /* app_data */, aesni_init_key, aesni_ecb_cipher,
793 NULL /* cleanup */, NULL /* ctrl */};
795 static const EVP_CIPHER aesni_256_gcm = {
796 NID_aes_256_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */,
797 sizeof(EVP_AES_GCM_CTX),
798 EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
799 EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_CUSTOM_COPY |
800 EVP_CIPH_FLAG_AEAD_CIPHER,
801 NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
804 #define EVP_CIPHER_FUNCTION(keybits, mode) \
805 const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \
806 if (aesni_capable()) { \
807 return &aesni_##keybits##_##mode; \
809 return &aes_##keybits##_##mode; \
813 #else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */
815 static char aesni_capable(void) {
819 #define EVP_CIPHER_FUNCTION(keybits, mode) \
820 const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \
821 return &aes_##keybits##_##mode; \
826 EVP_CIPHER_FUNCTION(128, cbc)
827 EVP_CIPHER_FUNCTION(128, ctr)
828 EVP_CIPHER_FUNCTION(128, ecb)
829 EVP_CIPHER_FUNCTION(128, gcm)
831 EVP_CIPHER_FUNCTION(256, cbc)
832 EVP_CIPHER_FUNCTION(256, ctr)
833 EVP_CIPHER_FUNCTION(256, ecb)
834 EVP_CIPHER_FUNCTION(256, gcm)
837 #define EVP_AEAD_AES_GCM_TAG_LEN 16
839 struct aead_aes_gcm_ctx {
849 static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
850 size_t key_len, size_t tag_len) {
851 struct aead_aes_gcm_ctx *gcm_ctx;
852 const size_t key_bits = key_len * 8;
854 if (key_bits != 128 && key_bits != 256) {
855 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_init, CIPHER_R_BAD_KEY_LENGTH);
856 return 0; /* EVP_AEAD_CTX_init should catch this. */
859 if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
860 tag_len = EVP_AEAD_AES_GCM_TAG_LEN;
863 if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) {
864 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_init, CIPHER_R_TAG_TOO_LARGE);
868 gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_ctx));
869 if (gcm_ctx == NULL) {
873 if (aesni_capable()) {
874 aesni_set_encrypt_key(key, key_len * 8, &gcm_ctx->ks.ks);
875 CRYPTO_gcm128_init(&gcm_ctx->gcm, &gcm_ctx->ks.ks,
876 (block128_f)aesni_encrypt);
877 gcm_ctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
880 aes_gcm_set_key(&gcm_ctx->ks.ks, &gcm_ctx->gcm, key, key_len);
882 gcm_ctx->tag_len = tag_len;
883 ctx->aead_state = gcm_ctx;
888 static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) {
889 struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
890 OPENSSL_cleanse(gcm_ctx, sizeof(struct aead_aes_gcm_ctx));
891 OPENSSL_free(gcm_ctx);
894 static int aead_aes_gcm_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
895 size_t *out_len, size_t max_out_len,
896 const uint8_t *nonce, size_t nonce_len,
897 const uint8_t *in, size_t in_len,
898 const uint8_t *ad, size_t ad_len) {
900 const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
903 if (in_len + gcm_ctx->tag_len < in_len) {
904 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_seal, CIPHER_R_TOO_LARGE);
908 if (max_out_len < in_len + gcm_ctx->tag_len) {
909 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_seal, CIPHER_R_BUFFER_TOO_SMALL);
913 memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
914 CRYPTO_gcm128_setiv(&gcm, nonce, nonce_len);
916 if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
921 if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, in + bulk, out + bulk, in_len - bulk,
926 if (!CRYPTO_gcm128_encrypt(&gcm, in + bulk, out + bulk, in_len - bulk)) {
931 CRYPTO_gcm128_tag(&gcm, out + in_len, gcm_ctx->tag_len);
932 *out_len = in_len + gcm_ctx->tag_len;
936 static int aead_aes_gcm_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
937 size_t *out_len, size_t max_out_len,
938 const uint8_t *nonce, size_t nonce_len,
939 const uint8_t *in, size_t in_len,
940 const uint8_t *ad, size_t ad_len) {
942 const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
943 uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN];
944 size_t plaintext_len;
947 if (in_len < gcm_ctx->tag_len) {
948 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
952 plaintext_len = in_len - gcm_ctx->tag_len;
954 if (max_out_len < plaintext_len) {
955 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BUFFER_TOO_SMALL);
959 memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
960 CRYPTO_gcm128_setiv(&gcm, nonce, nonce_len);
962 if (!CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
967 if (!CRYPTO_gcm128_decrypt_ctr32(&gcm, in + bulk, out + bulk,
968 in_len - bulk - gcm_ctx->tag_len,
973 if (!CRYPTO_gcm128_decrypt(&gcm, in + bulk, out + bulk,
974 in_len - bulk - gcm_ctx->tag_len)) {
979 CRYPTO_gcm128_tag(&gcm, tag, gcm_ctx->tag_len);
980 if (CRYPTO_memcmp(tag, in + plaintext_len, gcm_ctx->tag_len) != 0) {
981 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
985 *out_len = plaintext_len;
989 static const EVP_AEAD aead_aes_128_gcm = {
992 EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
993 EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
994 aead_aes_gcm_init, aead_aes_gcm_cleanup,
995 aead_aes_gcm_seal, aead_aes_gcm_open,
998 static const EVP_AEAD aead_aes_256_gcm = {
1001 EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
1002 EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
1003 aead_aes_gcm_init, aead_aes_gcm_cleanup,
1004 aead_aes_gcm_seal, aead_aes_gcm_open,
1007 const EVP_AEAD *EVP_aead_aes_128_gcm(void) { return &aead_aes_128_gcm; }
1009 const EVP_AEAD *EVP_aead_aes_256_gcm(void) { return &aead_aes_256_gcm; }
1012 /* AES Key Wrap is specified in
1013 * http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf
1014 * or https://tools.ietf.org/html/rfc3394 */
1016 struct aead_aes_key_wrap_ctx {
1021 static int aead_aes_key_wrap_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
1022 size_t key_len, size_t tag_len) {
1023 struct aead_aes_key_wrap_ctx *kw_ctx;
1024 const size_t key_bits = key_len * 8;
1026 if (key_bits != 128 && key_bits != 256) {
1027 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_init, CIPHER_R_BAD_KEY_LENGTH);
1028 return 0; /* EVP_AEAD_CTX_init should catch this. */
1031 if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
1036 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_init,
1037 CIPHER_R_UNSUPPORTED_TAG_SIZE);
1041 kw_ctx = OPENSSL_malloc(sizeof(struct aead_aes_key_wrap_ctx));
1042 if (kw_ctx == NULL) {
1043 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_init, ERR_R_MALLOC_FAILURE);
1047 memcpy(kw_ctx->key, key, key_len);
1048 kw_ctx->key_bits = key_bits;
1050 ctx->aead_state = kw_ctx;
1054 static void aead_aes_key_wrap_cleanup(EVP_AEAD_CTX *ctx) {
1055 struct aead_aes_key_wrap_ctx *kw_ctx = ctx->aead_state;
1056 OPENSSL_cleanse(kw_ctx, sizeof(struct aead_aes_key_wrap_ctx));
1057 OPENSSL_free(kw_ctx);
1060 /* kDefaultAESKeyWrapNonce is the default nonce value given in 2.2.3.1. */
1061 static const uint8_t kDefaultAESKeyWrapNonce[8] = {0xa6, 0xa6, 0xa6, 0xa6,
1062 0xa6, 0xa6, 0xa6, 0xa6};
1065 static int aead_aes_key_wrap_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
1066 size_t *out_len, size_t max_out_len,
1067 const uint8_t *nonce, size_t nonce_len,
1068 const uint8_t *in, size_t in_len,
1069 const uint8_t *ad, size_t ad_len) {
1070 const struct aead_aes_key_wrap_ctx *kw_ctx = ctx->aead_state;
1075 /* Variables in this function match up with the variables in the second half
1076 * of section 2.2.1. */
1078 uint8_t A[AES_BLOCK_SIZE];
1081 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1082 CIPHER_R_UNSUPPORTED_AD_SIZE);
1086 if (nonce_len == 0) {
1087 nonce = kDefaultAESKeyWrapNonce;
1088 nonce_len = sizeof(kDefaultAESKeyWrapNonce);
1091 if (nonce_len != 8) {
1092 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1093 CIPHER_R_UNSUPPORTED_NONCE_SIZE);
1097 if (in_len % 8 != 0) {
1098 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1099 CIPHER_R_UNSUPPORTED_INPUT_SIZE);
1103 /* The code below only handles a 32-bit |t| thus 6*|n| must be less than
1104 * 2^32, where |n| is |in_len| / 8. So in_len < 4/3 * 2^32 and we
1105 * conservatively cap it to 2^32-16 to stop 32-bit platforms complaining that
1106 * a comparision is always true. */
1107 if (in_len > 0xfffffff0) {
1108 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal, CIPHER_R_TOO_LARGE);
1115 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1116 CIPHER_R_UNSUPPORTED_INPUT_SIZE);
1120 if (in_len + 8 < in_len) {
1121 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal, CIPHER_R_TOO_LARGE);
1125 if (max_out_len < in_len + 8) {
1126 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1127 CIPHER_R_BUFFER_TOO_SMALL);
1131 if (AES_set_encrypt_key(kw_ctx->key, kw_ctx->key_bits, &ks.ks) < 0) {
1132 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_seal,
1133 CIPHER_R_AES_KEY_SETUP_FAILED);
1137 memmove(out + 8, in, in_len);
1138 memcpy(A, nonce, 8);
1140 for (j = 0; j < 6; j++) {
1141 for (i = 1; i <= n; i++) {
1144 memcpy(A + 8, out + 8 * i, 8);
1145 AES_encrypt(A, A, &ks.ks);
1148 A[6] ^= (t >> 8) & 0xff;
1149 A[5] ^= (t >> 16) & 0xff;
1150 A[4] ^= (t >> 24) & 0xff;
1151 memcpy(out + 8 * i, A + 8, 8);
1156 *out_len = in_len + 8;
1160 static int aead_aes_key_wrap_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
1161 size_t *out_len, size_t max_out_len,
1162 const uint8_t *nonce, size_t nonce_len,
1163 const uint8_t *in, size_t in_len,
1164 const uint8_t *ad, size_t ad_len) {
1165 const struct aead_aes_key_wrap_ctx *kw_ctx = ctx->aead_state;
1170 /* Variables in this function match up with the variables in the second half
1171 * of section 2.2.1. */
1173 uint8_t A[AES_BLOCK_SIZE];
1176 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open,
1177 CIPHER_R_UNSUPPORTED_AD_SIZE);
1181 if (nonce_len == 0) {
1182 nonce = kDefaultAESKeyWrapNonce;
1183 nonce_len = sizeof(kDefaultAESKeyWrapNonce);
1186 if (nonce_len != 8) {
1187 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open,
1188 CIPHER_R_UNSUPPORTED_NONCE_SIZE);
1192 if (in_len % 8 != 0) {
1193 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open,
1194 CIPHER_R_UNSUPPORTED_INPUT_SIZE);
1198 /* The code below only handles a 32-bit |t| thus 6*|n| must be less than
1199 * 2^32, where |n| is |in_len| / 8. So in_len < 4/3 * 2^32 and we
1200 * conservatively cap it to 2^32-8 to stop 32-bit platforms complaining that
1201 * a comparision is always true. */
1202 if (in_len > 0xfffffff8) {
1203 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open, CIPHER_R_TOO_LARGE);
1208 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
1212 n = (in_len / 8) - 1;
1214 if (max_out_len < in_len - 8) {
1215 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open,
1216 CIPHER_R_BUFFER_TOO_SMALL);
1220 if (AES_set_decrypt_key(kw_ctx->key, kw_ctx->key_bits, &ks.ks) < 0) {
1221 OPENSSL_PUT_ERROR(CIPHER, aead_aes_key_wrap_open,
1222 CIPHER_R_AES_KEY_SETUP_FAILED);
1227 memmove(out, in + 8, in_len - 8);
1229 for (j = 5; j < 6; j--) {
1230 for (i = n; i > 0; i--) {
1235 A[6] ^= (t >> 8) & 0xff;
1236 A[5] ^= (t >> 16) & 0xff;
1237 A[4] ^= (t >> 24) & 0xff;
1238 memcpy(A + 8, out + 8 * (i - 1), 8);
1239 AES_decrypt(A, A, &ks.ks);
1240 memcpy(out + 8 * (i - 1), A + 8, 8);
1244 if (CRYPTO_memcmp(A, nonce, 8) != 0) {
1245 OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
1249 *out_len = in_len - 8;
1253 static const EVP_AEAD aead_aes_128_key_wrap = {
1257 8, /* max tag length */
1258 aead_aes_key_wrap_init, aead_aes_key_wrap_cleanup,
1259 aead_aes_key_wrap_seal, aead_aes_key_wrap_open,
1262 static const EVP_AEAD aead_aes_256_key_wrap = {
1266 8, /* max tag length */
1267 aead_aes_key_wrap_init, aead_aes_key_wrap_cleanup,
1268 aead_aes_key_wrap_seal, aead_aes_key_wrap_open,
1271 const EVP_AEAD *EVP_aead_aes_128_key_wrap(void) { return &aead_aes_128_key_wrap; }
1273 const EVP_AEAD *EVP_aead_aes_256_key_wrap(void) { return &aead_aes_256_key_wrap; }
1275 int EVP_has_aes_hardware(void) {
1276 #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
1277 return aesni_capable() && crypto_gcm_clmul_enabled();