2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/cryptouser.h>
27 #include <linux/compiler.h>
28 #include <net/netlink.h>
33 BLKCIPHER_WALK_PHYS = 1 << 0,
34 BLKCIPHER_WALK_SLOW = 1 << 1,
35 BLKCIPHER_WALK_COPY = 1 << 2,
36 BLKCIPHER_WALK_DIFF = 1 << 3,
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 struct blkcipher_walk *walk);
44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
46 walk->src.virt.addr = scatterwalk_map(&walk->in);
49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
51 walk->dst.virt.addr = scatterwalk_map(&walk->out);
54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
56 scatterwalk_unmap(walk->src.virt.addr);
59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
61 scatterwalk_unmap(walk->dst.virt.addr);
64 /* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70 return max(start, end_page);
73 static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
78 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
79 addr = blkcipher_get_spot(addr, bsize);
80 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
83 static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
86 if (walk->flags & BLKCIPHER_WALK_COPY) {
87 blkcipher_map_dst(walk);
88 memcpy(walk->dst.virt.addr, walk->page, n);
89 blkcipher_unmap_dst(walk);
90 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
91 if (walk->flags & BLKCIPHER_WALK_DIFF)
92 blkcipher_unmap_dst(walk);
93 blkcipher_unmap_src(walk);
96 scatterwalk_advance(&walk->in, n);
97 scatterwalk_advance(&walk->out, n);
100 int blkcipher_walk_done(struct blkcipher_desc *desc,
101 struct blkcipher_walk *walk, int err)
103 unsigned int n; /* bytes processed */
106 if (unlikely(err < 0))
109 n = walk->nbytes - err;
111 more = (walk->total != 0);
113 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
114 blkcipher_done_fast(walk, n);
117 /* unexpected case; didn't process all bytes */
121 blkcipher_done_slow(walk, n);
124 scatterwalk_done(&walk->in, 0, more);
125 scatterwalk_done(&walk->out, 1, more);
128 crypto_yield(desc->flags);
129 return blkcipher_walk_next(desc, walk);
134 if (walk->iv != desc->info)
135 memcpy(desc->info, walk->iv, walk->ivsize);
136 if (walk->buffer != walk->page)
139 free_page((unsigned long)walk->page);
142 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
144 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
145 struct blkcipher_walk *walk,
147 unsigned int alignmask)
150 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
155 walk->buffer = walk->page;
159 n = aligned_bsize * 3 - (alignmask + 1) +
160 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
161 walk->buffer = kmalloc(n, GFP_ATOMIC);
163 return blkcipher_walk_done(desc, walk, -ENOMEM);
166 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
168 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
169 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
170 aligned_bsize, bsize);
172 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
174 walk->nbytes = bsize;
175 walk->flags |= BLKCIPHER_WALK_SLOW;
180 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
182 u8 *tmp = walk->page;
184 blkcipher_map_src(walk);
185 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
186 blkcipher_unmap_src(walk);
188 walk->src.virt.addr = tmp;
189 walk->dst.virt.addr = tmp;
194 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
195 struct blkcipher_walk *walk)
199 walk->src.phys.page = scatterwalk_page(&walk->in);
200 walk->src.phys.offset = offset_in_page(walk->in.offset);
201 walk->dst.phys.page = scatterwalk_page(&walk->out);
202 walk->dst.phys.offset = offset_in_page(walk->out.offset);
204 if (walk->flags & BLKCIPHER_WALK_PHYS)
207 diff = walk->src.phys.offset - walk->dst.phys.offset;
208 diff |= walk->src.virt.page - walk->dst.virt.page;
210 blkcipher_map_src(walk);
211 walk->dst.virt.addr = walk->src.virt.addr;
214 walk->flags |= BLKCIPHER_WALK_DIFF;
215 blkcipher_map_dst(walk);
221 static int blkcipher_walk_next(struct blkcipher_desc *desc,
222 struct blkcipher_walk *walk)
229 if (unlikely(n < walk->cipher_blocksize)) {
230 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
231 return blkcipher_walk_done(desc, walk, -EINVAL);
234 bsize = min(walk->walk_blocksize, n);
236 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
237 BLKCIPHER_WALK_DIFF);
238 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
239 !scatterwalk_aligned(&walk->out, walk->alignmask)) {
240 walk->flags |= BLKCIPHER_WALK_COPY;
242 walk->page = (void *)__get_free_page(GFP_ATOMIC);
248 n = scatterwalk_clamp(&walk->in, n);
249 n = scatterwalk_clamp(&walk->out, n);
251 if (unlikely(n < bsize)) {
252 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
253 goto set_phys_lowmem;
257 if (walk->flags & BLKCIPHER_WALK_COPY) {
258 err = blkcipher_next_copy(walk);
259 goto set_phys_lowmem;
262 return blkcipher_next_fast(desc, walk);
265 if (walk->flags & BLKCIPHER_WALK_PHYS) {
266 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
267 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
268 walk->src.phys.offset &= PAGE_SIZE - 1;
269 walk->dst.phys.offset &= PAGE_SIZE - 1;
274 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
276 unsigned bs = walk->walk_blocksize;
277 unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
278 unsigned int size = aligned_bs * 2 +
279 walk->ivsize + max(aligned_bs, walk->ivsize) -
280 (walk->alignmask + 1);
283 size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
284 walk->buffer = kmalloc(size, GFP_ATOMIC);
288 iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
289 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
290 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
291 iv = blkcipher_get_spot(iv, walk->ivsize);
293 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
297 int blkcipher_walk_virt(struct blkcipher_desc *desc,
298 struct blkcipher_walk *walk)
300 walk->flags &= ~BLKCIPHER_WALK_PHYS;
301 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
302 walk->cipher_blocksize = walk->walk_blocksize;
303 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
304 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
305 return blkcipher_walk_first(desc, walk);
307 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
309 int blkcipher_walk_phys(struct blkcipher_desc *desc,
310 struct blkcipher_walk *walk)
312 walk->flags |= BLKCIPHER_WALK_PHYS;
313 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
314 walk->cipher_blocksize = walk->walk_blocksize;
315 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
316 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
317 return blkcipher_walk_first(desc, walk);
319 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
321 static int blkcipher_walk_first(struct blkcipher_desc *desc,
322 struct blkcipher_walk *walk)
324 if (WARN_ON_ONCE(in_irq()))
327 walk->iv = desc->info;
328 walk->nbytes = walk->total;
329 if (unlikely(!walk->total))
333 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
334 int err = blkcipher_copy_iv(walk);
339 scatterwalk_start(&walk->in, walk->in.sg);
340 scatterwalk_start(&walk->out, walk->out.sg);
343 return blkcipher_walk_next(desc, walk);
346 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
347 struct blkcipher_walk *walk,
348 unsigned int blocksize)
350 walk->flags &= ~BLKCIPHER_WALK_PHYS;
351 walk->walk_blocksize = blocksize;
352 walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
353 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
354 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
355 return blkcipher_walk_first(desc, walk);
357 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
359 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
360 struct blkcipher_walk *walk,
361 struct crypto_aead *tfm,
362 unsigned int blocksize)
364 walk->flags &= ~BLKCIPHER_WALK_PHYS;
365 walk->walk_blocksize = blocksize;
366 walk->cipher_blocksize = crypto_aead_blocksize(tfm);
367 walk->ivsize = crypto_aead_ivsize(tfm);
368 walk->alignmask = crypto_aead_alignmask(tfm);
369 return blkcipher_walk_first(desc, walk);
371 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
373 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
377 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
379 u8 *buffer, *alignbuffer;
380 unsigned long absize;
382 absize = keylen + alignmask;
383 buffer = kmalloc(absize, GFP_ATOMIC);
387 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
388 memcpy(alignbuffer, key, keylen);
389 ret = cipher->setkey(tfm, alignbuffer, keylen);
390 memset(alignbuffer, 0, keylen);
395 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
397 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
398 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
400 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
401 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
405 if ((unsigned long)key & alignmask)
406 return setkey_unaligned(tfm, key, keylen);
408 return cipher->setkey(tfm, key, keylen);
411 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417 static int async_encrypt(struct ablkcipher_request *req)
419 struct crypto_tfm *tfm = req->base.tfm;
420 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421 struct blkcipher_desc desc = {
422 .tfm = __crypto_blkcipher_cast(tfm),
424 .flags = req->base.flags,
428 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431 static int async_decrypt(struct ablkcipher_request *req)
433 struct crypto_tfm *tfm = req->base.tfm;
434 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
435 struct blkcipher_desc desc = {
436 .tfm = __crypto_blkcipher_cast(tfm),
438 .flags = req->base.flags,
441 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
448 unsigned int len = alg->cra_ctxsize;
450 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
452 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
453 len += cipher->ivsize;
459 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
461 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
462 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
464 crt->setkey = async_setkey;
465 crt->encrypt = async_encrypt;
466 crt->decrypt = async_decrypt;
467 crt->base = __crypto_ablkcipher_cast(tfm);
468 crt->ivsize = alg->ivsize;
473 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
475 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
476 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
477 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
480 crt->setkey = setkey;
481 crt->encrypt = alg->encrypt;
482 crt->decrypt = alg->decrypt;
484 addr = (unsigned long)crypto_tfm_ctx(tfm);
485 addr = ALIGN(addr, align);
486 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
487 crt->iv = (void *)addr;
492 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
494 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
496 if (alg->ivsize > PAGE_SIZE / 8)
499 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
500 return crypto_init_blkcipher_ops_sync(tfm);
502 return crypto_init_blkcipher_ops_async(tfm);
506 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
508 struct crypto_report_blkcipher rblkcipher;
510 memset(&rblkcipher, 0, sizeof(rblkcipher));
512 strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
513 strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
515 rblkcipher.blocksize = alg->cra_blocksize;
516 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
517 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
518 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
520 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
521 sizeof(rblkcipher), &rblkcipher);
524 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
530 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
532 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
534 seq_printf(m, "type : blkcipher\n");
535 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
536 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
537 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
538 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
539 seq_printf(m, "geniv : <default>\n");
542 const struct crypto_type crypto_blkcipher_type = {
543 .ctxsize = crypto_blkcipher_ctxsize,
544 .init = crypto_init_blkcipher_ops,
545 #ifdef CONFIG_PROC_FS
546 .show = crypto_blkcipher_show,
548 .report = crypto_blkcipher_report,
550 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
552 MODULE_LICENSE("GPL");
553 MODULE_DESCRIPTION("Generic block chaining cipher type");