2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <asm/cpu_device_id.h>
34 #include <asm/crypto/ablk_helper.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/internal/aead.h>
37 #include <linux/workqueue.h>
38 #include <linux/spinlock.h>
40 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
44 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
48 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
52 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
56 /* This data is stored at the end of the crypto_tfm struct.
57 * It's a type of per "session" data storage location.
58 * This needs to be 16 byte aligned.
60 struct aesni_rfc4106_gcm_ctx {
62 struct crypto_aes_ctx aes_key_expanded;
64 struct cryptd_aead *cryptd_tfm;
67 struct aesni_gcm_set_hash_subkey_result {
69 struct completion completion;
72 struct aesni_hash_subkey_req_data {
74 struct aesni_gcm_set_hash_subkey_result result;
75 struct scatterlist sg;
78 #define AESNI_ALIGN (16)
79 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
80 #define RFC4106_HASH_SUBKEY_SIZE 16
82 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
83 unsigned int key_len);
84 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
97 int crypto_fpu_init(void);
98 void crypto_fpu_exit(void);
101 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
102 const u8 *in, unsigned int len, u8 *iv);
104 /* asmlinkage void aesni_gcm_enc()
105 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
106 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
107 * const u8 *in, Plaintext input
108 * unsigned long plaintext_len, Length of data in bytes for encryption.
109 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
110 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
111 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
112 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
113 * const u8 *aad, Additional Authentication Data (AAD)
114 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
115 * is going to be 8 or 12 bytes
116 * u8 *auth_tag, Authenticated Tag output.
117 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
118 * Valid values are 16 (most likely), 12 or 8.
120 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
121 const u8 *in, unsigned long plaintext_len, u8 *iv,
122 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
123 u8 *auth_tag, unsigned long auth_tag_len);
125 /* asmlinkage void aesni_gcm_dec()
126 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
127 * u8 *out, Plaintext output. Decrypt in-place is allowed.
128 * const u8 *in, Ciphertext input
129 * unsigned long ciphertext_len, Length of data in bytes for decryption.
130 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
131 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
132 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
133 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
134 * const u8 *aad, Additional Authentication Data (AAD)
135 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
136 * to be 8 or 12 bytes
137 * u8 *auth_tag, Authenticated Tag output.
138 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
139 * Valid values are 16 (most likely), 12 or 8.
141 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
142 const u8 *in, unsigned long ciphertext_len, u8 *iv,
143 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
144 u8 *auth_tag, unsigned long auth_tag_len);
147 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
150 (struct aesni_rfc4106_gcm_ctx *)
152 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
156 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
158 unsigned long addr = (unsigned long)raw_ctx;
159 unsigned long align = AESNI_ALIGN;
161 if (align <= crypto_tfm_ctx_alignment())
163 return (struct crypto_aes_ctx *)ALIGN(addr, align);
166 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
167 const u8 *in_key, unsigned int key_len)
169 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
170 u32 *flags = &tfm->crt_flags;
173 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
174 key_len != AES_KEYSIZE_256) {
175 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
179 if (!irq_fpu_usable())
180 err = crypto_aes_expand_key(ctx, in_key, key_len);
183 err = aesni_set_key(ctx, in_key, key_len);
190 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
191 unsigned int key_len)
193 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
196 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
198 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
200 if (!irq_fpu_usable())
201 crypto_aes_encrypt_x86(ctx, dst, src);
204 aesni_enc(ctx, dst, src);
209 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
211 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
213 if (!irq_fpu_usable())
214 crypto_aes_decrypt_x86(ctx, dst, src);
217 aesni_dec(ctx, dst, src);
222 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
224 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
226 aesni_enc(ctx, dst, src);
229 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
231 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
233 aesni_dec(ctx, dst, src);
236 static int ecb_encrypt(struct blkcipher_desc *desc,
237 struct scatterlist *dst, struct scatterlist *src,
240 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
241 struct blkcipher_walk walk;
244 blkcipher_walk_init(&walk, dst, src, nbytes);
245 err = blkcipher_walk_virt(desc, &walk);
246 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
249 while ((nbytes = walk.nbytes)) {
250 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
251 nbytes & AES_BLOCK_MASK);
252 nbytes &= AES_BLOCK_SIZE - 1;
253 err = blkcipher_walk_done(desc, &walk, nbytes);
260 static int ecb_decrypt(struct blkcipher_desc *desc,
261 struct scatterlist *dst, struct scatterlist *src,
264 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
265 struct blkcipher_walk walk;
268 blkcipher_walk_init(&walk, dst, src, nbytes);
269 err = blkcipher_walk_virt(desc, &walk);
270 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
273 while ((nbytes = walk.nbytes)) {
274 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
275 nbytes & AES_BLOCK_MASK);
276 nbytes &= AES_BLOCK_SIZE - 1;
277 err = blkcipher_walk_done(desc, &walk, nbytes);
284 static int cbc_encrypt(struct blkcipher_desc *desc,
285 struct scatterlist *dst, struct scatterlist *src,
288 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
289 struct blkcipher_walk walk;
292 blkcipher_walk_init(&walk, dst, src, nbytes);
293 err = blkcipher_walk_virt(desc, &walk);
294 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
297 while ((nbytes = walk.nbytes)) {
298 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
299 nbytes & AES_BLOCK_MASK, walk.iv);
300 nbytes &= AES_BLOCK_SIZE - 1;
301 err = blkcipher_walk_done(desc, &walk, nbytes);
308 static int cbc_decrypt(struct blkcipher_desc *desc,
309 struct scatterlist *dst, struct scatterlist *src,
312 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
313 struct blkcipher_walk walk;
316 blkcipher_walk_init(&walk, dst, src, nbytes);
317 err = blkcipher_walk_virt(desc, &walk);
318 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
321 while ((nbytes = walk.nbytes)) {
322 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
323 nbytes & AES_BLOCK_MASK, walk.iv);
324 nbytes &= AES_BLOCK_SIZE - 1;
325 err = blkcipher_walk_done(desc, &walk, nbytes);
333 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
334 struct blkcipher_walk *walk)
336 u8 *ctrblk = walk->iv;
337 u8 keystream[AES_BLOCK_SIZE];
338 u8 *src = walk->src.virt.addr;
339 u8 *dst = walk->dst.virt.addr;
340 unsigned int nbytes = walk->nbytes;
342 aesni_enc(ctx, keystream, ctrblk);
343 crypto_xor(keystream, src, nbytes);
344 memcpy(dst, keystream, nbytes);
345 crypto_inc(ctrblk, AES_BLOCK_SIZE);
348 static int ctr_crypt(struct blkcipher_desc *desc,
349 struct scatterlist *dst, struct scatterlist *src,
352 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
353 struct blkcipher_walk walk;
356 blkcipher_walk_init(&walk, dst, src, nbytes);
357 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
358 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
361 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
362 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
363 nbytes & AES_BLOCK_MASK, walk.iv);
364 nbytes &= AES_BLOCK_SIZE - 1;
365 err = blkcipher_walk_done(desc, &walk, nbytes);
368 ctr_crypt_final(ctx, &walk);
369 err = blkcipher_walk_done(desc, &walk, 0);
377 static int ablk_ecb_init(struct crypto_tfm *tfm)
379 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
382 static int ablk_cbc_init(struct crypto_tfm *tfm)
384 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
388 static int ablk_ctr_init(struct crypto_tfm *tfm)
390 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
394 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
396 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
402 static int ablk_lrw_init(struct crypto_tfm *tfm)
404 return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
409 static int ablk_pcbc_init(struct crypto_tfm *tfm)
411 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
416 static int ablk_xts_init(struct crypto_tfm *tfm)
418 return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
423 static int rfc4106_init(struct crypto_tfm *tfm)
425 struct cryptd_aead *cryptd_tfm;
426 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
427 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
428 struct crypto_aead *cryptd_child;
429 struct aesni_rfc4106_gcm_ctx *child_ctx;
430 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
431 if (IS_ERR(cryptd_tfm))
432 return PTR_ERR(cryptd_tfm);
434 cryptd_child = cryptd_aead_child(cryptd_tfm);
435 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
436 memcpy(child_ctx, ctx, sizeof(*ctx));
437 ctx->cryptd_tfm = cryptd_tfm;
438 tfm->crt_aead.reqsize = sizeof(struct aead_request)
439 + crypto_aead_reqsize(&cryptd_tfm->base);
443 static void rfc4106_exit(struct crypto_tfm *tfm)
445 struct aesni_rfc4106_gcm_ctx *ctx =
446 (struct aesni_rfc4106_gcm_ctx *)
447 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
448 if (!IS_ERR(ctx->cryptd_tfm))
449 cryptd_free_aead(ctx->cryptd_tfm);
454 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
456 struct aesni_gcm_set_hash_subkey_result *result = req->data;
458 if (err == -EINPROGRESS)
461 complete(&result->completion);
465 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
467 struct crypto_ablkcipher *ctr_tfm;
468 struct ablkcipher_request *req;
470 struct aesni_hash_subkey_req_data *req_data;
472 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
474 return PTR_ERR(ctr_tfm);
476 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
478 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
480 goto out_free_ablkcipher;
483 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
485 goto out_free_ablkcipher;
487 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
489 goto out_free_request;
491 memset(req_data->iv, 0, sizeof(req_data->iv));
493 /* Clear the data in the hash sub key container to zero.*/
494 /* We want to cipher all zeros to create the hash sub key. */
495 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
497 init_completion(&req_data->result.completion);
498 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
499 ablkcipher_request_set_tfm(req, ctr_tfm);
500 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
501 CRYPTO_TFM_REQ_MAY_BACKLOG,
502 rfc4106_set_hash_subkey_done,
505 ablkcipher_request_set_crypt(req, &req_data->sg,
506 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
508 ret = crypto_ablkcipher_encrypt(req);
509 if (ret == -EINPROGRESS || ret == -EBUSY) {
510 ret = wait_for_completion_interruptible
511 (&req_data->result.completion);
513 ret = req_data->result.err;
517 ablkcipher_request_free(req);
519 crypto_free_ablkcipher(ctr_tfm);
523 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
524 unsigned int key_len)
527 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
528 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
529 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
530 struct aesni_rfc4106_gcm_ctx *child_ctx =
531 aesni_rfc4106_gcm_ctx_get(cryptd_child);
532 u8 *new_key_mem = NULL;
535 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 /*Account for 4 byte nonce at the end.*/
540 if (key_len != AES_KEYSIZE_128) {
541 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
545 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
546 /*This must be on a 16 byte boundary!*/
547 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
550 if ((unsigned long)key % AESNI_ALIGN) {
551 /*key is not aligned: use an auxuliar aligned pointer*/
552 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
556 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
557 memcpy(new_key_mem, key, key_len);
561 if (!irq_fpu_usable())
562 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
566 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
569 /*This must be on a 16 byte boundary!*/
570 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
574 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
575 memcpy(child_ctx, ctx, sizeof(*ctx));
581 /* This is the Integrity Check Value (aka the authentication tag length and can
582 * be 8, 12 or 16 bytes long. */
583 static int rfc4106_set_authsize(struct crypto_aead *parent,
584 unsigned int authsize)
586 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
587 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
597 crypto_aead_crt(parent)->authsize = authsize;
598 crypto_aead_crt(cryptd_child)->authsize = authsize;
602 static int rfc4106_encrypt(struct aead_request *req)
605 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
606 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
608 if (!irq_fpu_usable()) {
609 struct aead_request *cryptd_req =
610 (struct aead_request *) aead_request_ctx(req);
611 memcpy(cryptd_req, req, sizeof(*req));
612 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
613 return crypto_aead_encrypt(cryptd_req);
615 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
617 ret = cryptd_child->base.crt_aead.encrypt(req);
623 static int rfc4106_decrypt(struct aead_request *req)
626 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
627 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
629 if (!irq_fpu_usable()) {
630 struct aead_request *cryptd_req =
631 (struct aead_request *) aead_request_ctx(req);
632 memcpy(cryptd_req, req, sizeof(*req));
633 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
634 return crypto_aead_decrypt(cryptd_req);
636 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
638 ret = cryptd_child->base.crt_aead.decrypt(req);
644 static int __driver_rfc4106_encrypt(struct aead_request *req)
646 u8 one_entry_in_sg = 0;
647 u8 *src, *dst, *assoc;
648 __be32 counter = cpu_to_be32(1);
649 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
650 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
651 void *aes_ctx = &(ctx->aes_key_expanded);
652 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
653 u8 iv_tab[16+AESNI_ALIGN];
654 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
655 struct scatter_walk src_sg_walk;
656 struct scatter_walk assoc_sg_walk;
657 struct scatter_walk dst_sg_walk;
660 /* Assuming we are supporting rfc4106 64-bit extended */
661 /* sequence numbers We need to have the AAD length equal */
662 /* to 8 or 12 bytes */
663 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
666 for (i = 0; i < 4; i++)
667 *(iv+i) = ctx->nonce[i];
668 for (i = 0; i < 8; i++)
669 *(iv+4+i) = req->iv[i];
670 *((__be32 *)(iv+12)) = counter;
672 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
674 scatterwalk_start(&src_sg_walk, req->src);
675 scatterwalk_start(&assoc_sg_walk, req->assoc);
676 src = scatterwalk_map(&src_sg_walk);
677 assoc = scatterwalk_map(&assoc_sg_walk);
679 if (unlikely(req->src != req->dst)) {
680 scatterwalk_start(&dst_sg_walk, req->dst);
681 dst = scatterwalk_map(&dst_sg_walk);
685 /* Allocate memory for src, dst, assoc */
686 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
690 assoc = (src + req->cryptlen + auth_tag_len);
691 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
692 scatterwalk_map_and_copy(assoc, req->assoc, 0,
697 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
698 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
699 + ((unsigned long)req->cryptlen), auth_tag_len);
701 /* The authTag (aka the Integrity Check Value) needs to be written
702 * back to the packet. */
703 if (one_entry_in_sg) {
704 if (unlikely(req->src != req->dst)) {
705 scatterwalk_unmap(dst);
706 scatterwalk_done(&dst_sg_walk, 0, 0);
708 scatterwalk_unmap(src);
709 scatterwalk_unmap(assoc);
710 scatterwalk_done(&src_sg_walk, 0, 0);
711 scatterwalk_done(&assoc_sg_walk, 0, 0);
713 scatterwalk_map_and_copy(dst, req->dst, 0,
714 req->cryptlen + auth_tag_len, 1);
720 static int __driver_rfc4106_decrypt(struct aead_request *req)
722 u8 one_entry_in_sg = 0;
723 u8 *src, *dst, *assoc;
724 unsigned long tempCipherLen = 0;
725 __be32 counter = cpu_to_be32(1);
727 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
728 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
729 void *aes_ctx = &(ctx->aes_key_expanded);
730 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
731 u8 iv_and_authTag[32+AESNI_ALIGN];
732 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
733 u8 *authTag = iv + 16;
734 struct scatter_walk src_sg_walk;
735 struct scatter_walk assoc_sg_walk;
736 struct scatter_walk dst_sg_walk;
739 if (unlikely((req->cryptlen < auth_tag_len) ||
740 (req->assoclen != 8 && req->assoclen != 12)))
742 /* Assuming we are supporting rfc4106 64-bit extended */
743 /* sequence numbers We need to have the AAD length */
744 /* equal to 8 or 12 bytes */
746 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
748 for (i = 0; i < 4; i++)
749 *(iv+i) = ctx->nonce[i];
750 for (i = 0; i < 8; i++)
751 *(iv+4+i) = req->iv[i];
752 *((__be32 *)(iv+12)) = counter;
754 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
756 scatterwalk_start(&src_sg_walk, req->src);
757 scatterwalk_start(&assoc_sg_walk, req->assoc);
758 src = scatterwalk_map(&src_sg_walk);
759 assoc = scatterwalk_map(&assoc_sg_walk);
761 if (unlikely(req->src != req->dst)) {
762 scatterwalk_start(&dst_sg_walk, req->dst);
763 dst = scatterwalk_map(&dst_sg_walk);
767 /* Allocate memory for src, dst, assoc */
768 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
771 assoc = (src + req->cryptlen + auth_tag_len);
772 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
773 scatterwalk_map_and_copy(assoc, req->assoc, 0,
778 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
779 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
780 authTag, auth_tag_len);
782 /* Compare generated tag with passed in tag. */
783 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
786 if (one_entry_in_sg) {
787 if (unlikely(req->src != req->dst)) {
788 scatterwalk_unmap(dst);
789 scatterwalk_done(&dst_sg_walk, 0, 0);
791 scatterwalk_unmap(src);
792 scatterwalk_unmap(assoc);
793 scatterwalk_done(&src_sg_walk, 0, 0);
794 scatterwalk_done(&assoc_sg_walk, 0, 0);
796 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
803 static struct crypto_alg aesni_algs[] = { {
805 .cra_driver_name = "aes-aesni",
807 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
808 .cra_blocksize = AES_BLOCK_SIZE,
809 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
812 .cra_module = THIS_MODULE,
815 .cia_min_keysize = AES_MIN_KEY_SIZE,
816 .cia_max_keysize = AES_MAX_KEY_SIZE,
817 .cia_setkey = aes_set_key,
818 .cia_encrypt = aes_encrypt,
819 .cia_decrypt = aes_decrypt
823 .cra_name = "__aes-aesni",
824 .cra_driver_name = "__driver-aes-aesni",
826 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
827 .cra_blocksize = AES_BLOCK_SIZE,
828 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
831 .cra_module = THIS_MODULE,
834 .cia_min_keysize = AES_MIN_KEY_SIZE,
835 .cia_max_keysize = AES_MAX_KEY_SIZE,
836 .cia_setkey = aes_set_key,
837 .cia_encrypt = __aes_encrypt,
838 .cia_decrypt = __aes_decrypt
842 .cra_name = "__ecb-aes-aesni",
843 .cra_driver_name = "__driver-ecb-aes-aesni",
845 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
846 .cra_blocksize = AES_BLOCK_SIZE,
847 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
850 .cra_type = &crypto_blkcipher_type,
851 .cra_module = THIS_MODULE,
854 .min_keysize = AES_MIN_KEY_SIZE,
855 .max_keysize = AES_MAX_KEY_SIZE,
856 .setkey = aes_set_key,
857 .encrypt = ecb_encrypt,
858 .decrypt = ecb_decrypt,
862 .cra_name = "__cbc-aes-aesni",
863 .cra_driver_name = "__driver-cbc-aes-aesni",
865 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
866 .cra_blocksize = AES_BLOCK_SIZE,
867 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
870 .cra_type = &crypto_blkcipher_type,
871 .cra_module = THIS_MODULE,
874 .min_keysize = AES_MIN_KEY_SIZE,
875 .max_keysize = AES_MAX_KEY_SIZE,
876 .setkey = aes_set_key,
877 .encrypt = cbc_encrypt,
878 .decrypt = cbc_decrypt,
882 .cra_name = "ecb(aes)",
883 .cra_driver_name = "ecb-aes-aesni",
885 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
886 .cra_blocksize = AES_BLOCK_SIZE,
887 .cra_ctxsize = sizeof(struct async_helper_ctx),
889 .cra_type = &crypto_ablkcipher_type,
890 .cra_module = THIS_MODULE,
891 .cra_init = ablk_ecb_init,
892 .cra_exit = ablk_exit,
895 .min_keysize = AES_MIN_KEY_SIZE,
896 .max_keysize = AES_MAX_KEY_SIZE,
897 .setkey = ablk_set_key,
898 .encrypt = ablk_encrypt,
899 .decrypt = ablk_decrypt,
903 .cra_name = "cbc(aes)",
904 .cra_driver_name = "cbc-aes-aesni",
906 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
907 .cra_blocksize = AES_BLOCK_SIZE,
908 .cra_ctxsize = sizeof(struct async_helper_ctx),
910 .cra_type = &crypto_ablkcipher_type,
911 .cra_module = THIS_MODULE,
912 .cra_init = ablk_cbc_init,
913 .cra_exit = ablk_exit,
916 .min_keysize = AES_MIN_KEY_SIZE,
917 .max_keysize = AES_MAX_KEY_SIZE,
918 .ivsize = AES_BLOCK_SIZE,
919 .setkey = ablk_set_key,
920 .encrypt = ablk_encrypt,
921 .decrypt = ablk_decrypt,
926 .cra_name = "__ctr-aes-aesni",
927 .cra_driver_name = "__driver-ctr-aes-aesni",
929 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
931 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
934 .cra_type = &crypto_blkcipher_type,
935 .cra_module = THIS_MODULE,
938 .min_keysize = AES_MIN_KEY_SIZE,
939 .max_keysize = AES_MAX_KEY_SIZE,
940 .ivsize = AES_BLOCK_SIZE,
941 .setkey = aes_set_key,
942 .encrypt = ctr_crypt,
943 .decrypt = ctr_crypt,
947 .cra_name = "ctr(aes)",
948 .cra_driver_name = "ctr-aes-aesni",
950 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
952 .cra_ctxsize = sizeof(struct async_helper_ctx),
954 .cra_type = &crypto_ablkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_init = ablk_ctr_init,
957 .cra_exit = ablk_exit,
960 .min_keysize = AES_MIN_KEY_SIZE,
961 .max_keysize = AES_MAX_KEY_SIZE,
962 .ivsize = AES_BLOCK_SIZE,
963 .setkey = ablk_set_key,
964 .encrypt = ablk_encrypt,
965 .decrypt = ablk_encrypt,
970 .cra_name = "__gcm-aes-aesni",
971 .cra_driver_name = "__driver-gcm-aes-aesni",
973 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
975 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
978 .cra_type = &crypto_aead_type,
979 .cra_module = THIS_MODULE,
982 .encrypt = __driver_rfc4106_encrypt,
983 .decrypt = __driver_rfc4106_decrypt,
987 .cra_name = "rfc4106(gcm(aes))",
988 .cra_driver_name = "rfc4106-gcm-aesni",
990 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
992 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
995 .cra_type = &crypto_nivaead_type,
996 .cra_module = THIS_MODULE,
997 .cra_init = rfc4106_init,
998 .cra_exit = rfc4106_exit,
1001 .setkey = rfc4106_set_key,
1002 .setauthsize = rfc4106_set_authsize,
1003 .encrypt = rfc4106_encrypt,
1004 .decrypt = rfc4106_decrypt,
1012 .cra_name = "rfc3686(ctr(aes))",
1013 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1014 .cra_priority = 400,
1015 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1017 .cra_ctxsize = sizeof(struct async_helper_ctx),
1019 .cra_type = &crypto_ablkcipher_type,
1020 .cra_module = THIS_MODULE,
1021 .cra_init = ablk_rfc3686_ctr_init,
1022 .cra_exit = ablk_exit,
1025 .min_keysize = AES_MIN_KEY_SIZE +
1026 CTR_RFC3686_NONCE_SIZE,
1027 .max_keysize = AES_MAX_KEY_SIZE +
1028 CTR_RFC3686_NONCE_SIZE,
1029 .ivsize = CTR_RFC3686_IV_SIZE,
1030 .setkey = ablk_set_key,
1031 .encrypt = ablk_encrypt,
1032 .decrypt = ablk_decrypt,
1040 .cra_name = "lrw(aes)",
1041 .cra_driver_name = "lrw-aes-aesni",
1042 .cra_priority = 400,
1043 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1044 .cra_blocksize = AES_BLOCK_SIZE,
1045 .cra_ctxsize = sizeof(struct async_helper_ctx),
1047 .cra_type = &crypto_ablkcipher_type,
1048 .cra_module = THIS_MODULE,
1049 .cra_init = ablk_lrw_init,
1050 .cra_exit = ablk_exit,
1053 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1054 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1055 .ivsize = AES_BLOCK_SIZE,
1056 .setkey = ablk_set_key,
1057 .encrypt = ablk_encrypt,
1058 .decrypt = ablk_decrypt,
1064 .cra_name = "pcbc(aes)",
1065 .cra_driver_name = "pcbc-aes-aesni",
1066 .cra_priority = 400,
1067 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1068 .cra_blocksize = AES_BLOCK_SIZE,
1069 .cra_ctxsize = sizeof(struct async_helper_ctx),
1071 .cra_type = &crypto_ablkcipher_type,
1072 .cra_module = THIS_MODULE,
1073 .cra_init = ablk_pcbc_init,
1074 .cra_exit = ablk_exit,
1077 .min_keysize = AES_MIN_KEY_SIZE,
1078 .max_keysize = AES_MAX_KEY_SIZE,
1079 .ivsize = AES_BLOCK_SIZE,
1080 .setkey = ablk_set_key,
1081 .encrypt = ablk_encrypt,
1082 .decrypt = ablk_decrypt,
1088 .cra_name = "xts(aes)",
1089 .cra_driver_name = "xts-aes-aesni",
1090 .cra_priority = 400,
1091 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1092 .cra_blocksize = AES_BLOCK_SIZE,
1093 .cra_ctxsize = sizeof(struct async_helper_ctx),
1095 .cra_type = &crypto_ablkcipher_type,
1096 .cra_module = THIS_MODULE,
1097 .cra_init = ablk_xts_init,
1098 .cra_exit = ablk_exit,
1101 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1102 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1103 .ivsize = AES_BLOCK_SIZE,
1104 .setkey = ablk_set_key,
1105 .encrypt = ablk_encrypt,
1106 .decrypt = ablk_decrypt,
1113 static const struct x86_cpu_id aesni_cpu_id[] = {
1114 X86_FEATURE_MATCH(X86_FEATURE_AES),
1117 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1119 static int __init aesni_init(void)
1123 if (!x86_match_cpu(aesni_cpu_id))
1126 err = crypto_fpu_init();
1130 for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
1131 INIT_LIST_HEAD(&aesni_algs[i].cra_list);
1133 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1136 static void __exit aesni_exit(void)
1138 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1143 module_init(aesni_init);
1144 module_exit(aesni_exit);
1146 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1147 MODULE_LICENSE("GPL");
1148 MODULE_ALIAS("aes");