2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79 char *nonce = nx_ctx->priv.gcm.nonce;
87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
91 memcpy(nonce, in_key + key_len, 4);
96 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
97 unsigned int authsize)
111 static int nx_gca(struct nx_crypto_ctx *nx_ctx,
112 struct aead_request *req,
116 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
117 struct scatter_walk walk;
118 struct nx_sg *nx_sg = nx_ctx->in_sg;
119 unsigned int nbytes = req->assoclen;
120 unsigned int processed = 0, to_process;
121 unsigned int max_sg_len;
123 if (nbytes <= AES_BLOCK_SIZE) {
124 scatterwalk_start(&walk, req->src);
125 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
126 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
130 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
132 /* page_limit: number of sg entries that fit on one page */
133 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
135 max_sg_len = min_t(u64, max_sg_len,
136 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
140 * to_process: the data chunk to process in this update.
141 * This value is bound by sg list limits.
143 to_process = min_t(u64, nbytes - processed,
144 nx_ctx->ap->databytelen);
145 to_process = min_t(u64, to_process,
146 NX_PAGE_SIZE * (max_sg_len - 1));
148 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
149 req->src, processed, &to_process);
151 if ((to_process + processed) < nbytes)
152 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
154 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
156 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
157 * sizeof(struct nx_sg);
159 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
160 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
164 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
165 csbcpb_aead->cpb.aes_gca.out_pat,
167 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
169 atomic_inc(&(nx_ctx->stats->aes_ops));
170 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
172 processed += to_process;
173 } while (processed < nbytes);
175 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
180 static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
183 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
184 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
186 unsigned int nbytes = req->assoclen;
187 unsigned int processed = 0, to_process;
188 unsigned int max_sg_len;
191 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
193 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
195 /* page_limit: number of sg entries that fit on one page */
196 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
198 max_sg_len = min_t(u64, max_sg_len,
199 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
202 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
206 * to_process: the data chunk to process in this update.
207 * This value is bound by sg list limits.
209 to_process = min_t(u64, nbytes - processed,
210 nx_ctx->ap->databytelen);
211 to_process = min_t(u64, to_process,
212 NX_PAGE_SIZE * (max_sg_len - 1));
214 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
215 req->src, processed, &to_process);
217 if ((to_process + processed) < nbytes)
218 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
220 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
222 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
223 * sizeof(struct nx_sg);
225 csbcpb->cpb.aes_gcm.bit_length_data = 0;
226 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
228 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
229 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
233 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
234 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
235 memcpy(csbcpb->cpb.aes_gcm.in_s0,
236 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
238 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
240 atomic_inc(&(nx_ctx->stats->aes_ops));
241 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
243 processed += to_process;
244 } while (processed < nbytes);
247 /* Restore GCM mode */
248 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
252 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
256 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
257 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
258 char out[AES_BLOCK_SIZE];
259 struct nx_sg *in_sg, *out_sg;
262 /* For scenarios where the input message is zero length, AES CTR mode
263 * may be used. Set the source data to be a single block (16B) of all
264 * zeros, and set the input IV value to be the same as the GMAC IV
265 * value. - nx_wb 4.8.1.3 */
267 /* Change to ECB mode */
268 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
269 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
270 sizeof(csbcpb->cpb.aes_ecb.key));
272 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
274 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
276 len = AES_BLOCK_SIZE;
278 /* Encrypt the counter/IV */
279 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
280 &len, nx_ctx->ap->sglen);
282 if (len != AES_BLOCK_SIZE)
286 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
289 if (len != sizeof(out))
292 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
293 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
295 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
296 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
299 atomic_inc(&(nx_ctx->stats->aes_ops));
301 /* Copy out the auth tag */
302 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
303 crypto_aead_authsize(crypto_aead_reqtfm(req)));
305 /* Restore XCBC mode */
306 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
309 * ECB key uses the same region that GCM AAD and counter, so it's safe
310 * to just fill it with zeroes.
312 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
317 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
320 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
321 struct blkcipher_desc desc;
322 unsigned int nbytes = req->cryptlen;
323 unsigned int processed = 0, to_process;
324 unsigned long irq_flags;
327 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
329 desc.info = nx_ctx->priv.gcm.iv;
330 /* initialize the counter */
331 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
334 if (req->assoclen == 0)
335 rc = gcm_empty(req, &desc, enc);
337 rc = gmac(req, &desc);
344 /* Process associated data */
345 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
347 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
352 /* Set flags for encryption */
353 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
355 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
357 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
358 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
362 to_process = nbytes - processed;
364 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
365 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
366 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
367 req->src, &to_process,
368 processed + req->assoclen,
369 csbcpb->cpb.aes_gcm.iv_or_cnt);
374 if ((to_process + processed) < nbytes)
375 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
377 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
380 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
381 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
385 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
386 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
387 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
388 memcpy(csbcpb->cpb.aes_gcm.in_s0,
389 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
391 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
393 atomic_inc(&(nx_ctx->stats->aes_ops));
394 atomic64_add(csbcpb->csb.processed_byte_count,
395 &(nx_ctx->stats->aes_bytes));
397 processed += to_process;
398 } while (processed < nbytes);
402 /* copy out the auth tag */
403 scatterwalk_map_and_copy(
404 csbcpb->cpb.aes_gcm.out_pat_or_mac,
405 req->dst, req->assoclen + nbytes,
406 crypto_aead_authsize(crypto_aead_reqtfm(req)),
409 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
410 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
412 scatterwalk_map_and_copy(
413 itag, req->src, req->assoclen + nbytes,
414 crypto_aead_authsize(crypto_aead_reqtfm(req)),
415 SCATTERWALK_FROM_SG);
416 rc = memcmp(itag, otag,
417 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
421 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
425 static int gcm_aes_nx_encrypt(struct aead_request *req)
427 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
428 char *iv = nx_ctx->priv.gcm.iv;
430 memcpy(iv, req->iv, 12);
432 return gcm_aes_nx_crypt(req, 1);
435 static int gcm_aes_nx_decrypt(struct aead_request *req)
437 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
438 char *iv = nx_ctx->priv.gcm.iv;
440 memcpy(iv, req->iv, 12);
442 return gcm_aes_nx_crypt(req, 0);
445 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
447 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
448 char *iv = nx_ctx->priv.gcm.iv;
449 char *nonce = nx_ctx->priv.gcm.nonce;
451 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
452 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
454 return gcm_aes_nx_crypt(req, 1);
457 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
459 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
460 char *iv = nx_ctx->priv.gcm.iv;
461 char *nonce = nx_ctx->priv.gcm.nonce;
463 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
464 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
466 return gcm_aes_nx_crypt(req, 0);
469 /* tell the block cipher walk routines that this is a stream cipher by
470 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
471 * during encrypt/decrypt doesn't solve this problem, because it calls
472 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
473 * but instead uses this tfm->blocksize. */
474 struct aead_alg nx_gcm_aes_alg = {
476 .cra_name = "gcm(aes)",
477 .cra_driver_name = "gcm-aes-nx",
480 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
481 .cra_module = THIS_MODULE,
483 .init = nx_crypto_ctx_aes_gcm_init,
484 .exit = nx_crypto_ctx_aead_exit,
486 .maxauthsize = AES_BLOCK_SIZE,
487 .setkey = gcm_aes_nx_set_key,
488 .encrypt = gcm_aes_nx_encrypt,
489 .decrypt = gcm_aes_nx_decrypt,
492 struct aead_alg nx_gcm4106_aes_alg = {
494 .cra_name = "rfc4106(gcm(aes))",
495 .cra_driver_name = "rfc4106-gcm-aes-nx",
498 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
499 .cra_module = THIS_MODULE,
501 .init = nx_crypto_ctx_aes_gcm_init,
502 .exit = nx_crypto_ctx_aead_exit,
504 .maxauthsize = AES_BLOCK_SIZE,
505 .setkey = gcm4106_aes_nx_set_key,
506 .setauthsize = gcm4106_aes_nx_setauthsize,
507 .encrypt = gcm4106_aes_nx_encrypt,
508 .decrypt = gcm4106_aes_nx_decrypt,