2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/cryptouser.h>
28 #include <net/netlink.h>
33 BLKCIPHER_WALK_PHYS = 1 << 0,
34 BLKCIPHER_WALK_SLOW = 1 << 1,
35 BLKCIPHER_WALK_COPY = 1 << 2,
36 BLKCIPHER_WALK_DIFF = 1 << 3,
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 struct blkcipher_walk *walk);
44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
46 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
51 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
56 scatterwalk_unmap(walk->src.virt.addr, 0);
59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
61 scatterwalk_unmap(walk->dst.virt.addr, 1);
64 /* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70 return max(start, end_page);
73 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
74 struct blkcipher_walk *walk,
78 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
80 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
81 addr = blkcipher_get_spot(addr, bsize);
82 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
86 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
89 if (walk->flags & BLKCIPHER_WALK_COPY) {
90 blkcipher_map_dst(walk);
91 memcpy(walk->dst.virt.addr, walk->page, n);
92 blkcipher_unmap_dst(walk);
93 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
94 if (walk->flags & BLKCIPHER_WALK_DIFF)
95 blkcipher_unmap_dst(walk);
96 blkcipher_unmap_src(walk);
99 scatterwalk_advance(&walk->in, n);
100 scatterwalk_advance(&walk->out, n);
105 int blkcipher_walk_done(struct blkcipher_desc *desc,
106 struct blkcipher_walk *walk, int err)
108 struct crypto_blkcipher *tfm = desc->tfm;
109 unsigned int nbytes = 0;
111 if (likely(err >= 0)) {
112 unsigned int n = walk->nbytes - err;
114 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
115 n = blkcipher_done_fast(walk, n);
116 else if (WARN_ON(err)) {
120 n = blkcipher_done_slow(tfm, walk, n);
122 nbytes = walk->total - n;
126 scatterwalk_done(&walk->in, 0, nbytes);
127 scatterwalk_done(&walk->out, 1, nbytes);
130 walk->total = nbytes;
131 walk->nbytes = nbytes;
134 crypto_yield(desc->flags);
135 return blkcipher_walk_next(desc, walk);
138 if (walk->iv != desc->info)
139 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
140 if (walk->buffer != walk->page)
143 free_page((unsigned long)walk->page);
147 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
149 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
150 struct blkcipher_walk *walk,
152 unsigned int alignmask)
155 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
160 walk->buffer = walk->page;
164 n = aligned_bsize * 3 - (alignmask + 1) +
165 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
166 walk->buffer = kmalloc(n, GFP_ATOMIC);
168 return blkcipher_walk_done(desc, walk, -ENOMEM);
171 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
173 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
174 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
175 aligned_bsize, bsize);
177 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
179 walk->nbytes = bsize;
180 walk->flags |= BLKCIPHER_WALK_SLOW;
185 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
187 u8 *tmp = walk->page;
189 blkcipher_map_src(walk);
190 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
191 blkcipher_unmap_src(walk);
193 walk->src.virt.addr = tmp;
194 walk->dst.virt.addr = tmp;
199 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
200 struct blkcipher_walk *walk)
204 walk->src.phys.page = scatterwalk_page(&walk->in);
205 walk->src.phys.offset = offset_in_page(walk->in.offset);
206 walk->dst.phys.page = scatterwalk_page(&walk->out);
207 walk->dst.phys.offset = offset_in_page(walk->out.offset);
209 if (walk->flags & BLKCIPHER_WALK_PHYS)
212 diff = walk->src.phys.offset - walk->dst.phys.offset;
213 diff |= walk->src.virt.page - walk->dst.virt.page;
215 blkcipher_map_src(walk);
216 walk->dst.virt.addr = walk->src.virt.addr;
219 walk->flags |= BLKCIPHER_WALK_DIFF;
220 blkcipher_map_dst(walk);
226 static int blkcipher_walk_next(struct blkcipher_desc *desc,
227 struct blkcipher_walk *walk)
229 struct crypto_blkcipher *tfm = desc->tfm;
230 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
236 if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
237 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
238 return blkcipher_walk_done(desc, walk, -EINVAL);
241 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
242 BLKCIPHER_WALK_DIFF);
243 if (!scatterwalk_aligned(&walk->in, alignmask) ||
244 !scatterwalk_aligned(&walk->out, alignmask)) {
245 walk->flags |= BLKCIPHER_WALK_COPY;
247 walk->page = (void *)__get_free_page(GFP_ATOMIC);
253 bsize = min(walk->blocksize, n);
254 n = scatterwalk_clamp(&walk->in, n);
255 n = scatterwalk_clamp(&walk->out, n);
257 if (unlikely(n < bsize)) {
258 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
259 goto set_phys_lowmem;
263 if (walk->flags & BLKCIPHER_WALK_COPY) {
264 err = blkcipher_next_copy(walk);
265 goto set_phys_lowmem;
268 return blkcipher_next_fast(desc, walk);
271 if (walk->flags & BLKCIPHER_WALK_PHYS) {
272 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
273 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
274 walk->src.phys.offset &= PAGE_SIZE - 1;
275 walk->dst.phys.offset &= PAGE_SIZE - 1;
280 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
281 struct crypto_blkcipher *tfm,
282 unsigned int alignmask)
284 unsigned bs = walk->blocksize;
285 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
286 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
287 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
291 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
292 walk->buffer = kmalloc(size, GFP_ATOMIC);
296 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
297 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
298 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
299 iv = blkcipher_get_spot(iv, ivsize);
301 walk->iv = memcpy(iv, walk->iv, ivsize);
305 int blkcipher_walk_virt(struct blkcipher_desc *desc,
306 struct blkcipher_walk *walk)
308 walk->flags &= ~BLKCIPHER_WALK_PHYS;
309 walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
310 return blkcipher_walk_first(desc, walk);
312 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
314 int blkcipher_walk_phys(struct blkcipher_desc *desc,
315 struct blkcipher_walk *walk)
317 walk->flags |= BLKCIPHER_WALK_PHYS;
318 walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
319 return blkcipher_walk_first(desc, walk);
321 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
323 static int blkcipher_walk_first(struct blkcipher_desc *desc,
324 struct blkcipher_walk *walk)
326 struct crypto_blkcipher *tfm = desc->tfm;
327 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
329 if (WARN_ON_ONCE(in_irq()))
332 walk->nbytes = walk->total;
333 if (unlikely(!walk->total))
337 walk->iv = desc->info;
338 if (unlikely(((unsigned long)walk->iv & alignmask))) {
339 int err = blkcipher_copy_iv(walk, tfm, alignmask);
344 scatterwalk_start(&walk->in, walk->in.sg);
345 scatterwalk_start(&walk->out, walk->out.sg);
348 return blkcipher_walk_next(desc, walk);
351 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
352 struct blkcipher_walk *walk,
353 unsigned int blocksize)
355 walk->flags &= ~BLKCIPHER_WALK_PHYS;
356 walk->blocksize = blocksize;
357 return blkcipher_walk_first(desc, walk);
359 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
361 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
364 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
365 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
367 u8 *buffer, *alignbuffer;
368 unsigned long absize;
370 absize = keylen + alignmask;
371 buffer = kmalloc(absize, GFP_ATOMIC);
375 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
376 memcpy(alignbuffer, key, keylen);
377 ret = cipher->setkey(tfm, alignbuffer, keylen);
378 memset(alignbuffer, 0, keylen);
383 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
385 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
386 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
388 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
389 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
393 if ((unsigned long)key & alignmask)
394 return setkey_unaligned(tfm, key, keylen);
396 return cipher->setkey(tfm, key, keylen);
399 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
402 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
405 static int async_encrypt(struct ablkcipher_request *req)
407 struct crypto_tfm *tfm = req->base.tfm;
408 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
409 struct blkcipher_desc desc = {
410 .tfm = __crypto_blkcipher_cast(tfm),
412 .flags = req->base.flags,
416 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
419 static int async_decrypt(struct ablkcipher_request *req)
421 struct crypto_tfm *tfm = req->base.tfm;
422 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423 struct blkcipher_desc desc = {
424 .tfm = __crypto_blkcipher_cast(tfm),
426 .flags = req->base.flags,
429 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
432 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
435 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
436 unsigned int len = alg->cra_ctxsize;
438 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
440 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
441 len += cipher->ivsize;
447 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
449 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
450 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
452 crt->setkey = async_setkey;
453 crt->encrypt = async_encrypt;
454 crt->decrypt = async_decrypt;
456 crt->givencrypt = skcipher_null_givencrypt;
457 crt->givdecrypt = skcipher_null_givdecrypt;
459 crt->base = __crypto_ablkcipher_cast(tfm);
460 crt->ivsize = alg->ivsize;
465 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
467 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
468 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
469 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
472 crt->setkey = setkey;
473 crt->encrypt = alg->encrypt;
474 crt->decrypt = alg->decrypt;
476 addr = (unsigned long)crypto_tfm_ctx(tfm);
477 addr = ALIGN(addr, align);
478 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
479 crt->iv = (void *)addr;
484 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
486 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
488 if (alg->ivsize > PAGE_SIZE / 8)
491 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
492 return crypto_init_blkcipher_ops_sync(tfm);
494 return crypto_init_blkcipher_ops_async(tfm);
497 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
499 struct crypto_report_blkcipher rblkcipher;
501 snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
502 snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
503 alg->cra_blkcipher.geniv ?: "<default>");
505 rblkcipher.blocksize = alg->cra_blocksize;
506 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
507 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
508 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
510 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
511 sizeof(struct crypto_report_blkcipher), &rblkcipher);
519 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
520 __attribute__ ((unused));
521 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
523 seq_printf(m, "type : blkcipher\n");
524 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
525 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
526 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
527 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
528 seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
532 const struct crypto_type crypto_blkcipher_type = {
533 .ctxsize = crypto_blkcipher_ctxsize,
534 .init = crypto_init_blkcipher_ops,
535 #ifdef CONFIG_PROC_FS
536 .show = crypto_blkcipher_show,
538 .report = crypto_blkcipher_report,
540 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
542 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
543 const char *name, u32 type, u32 mask)
545 struct crypto_alg *alg;
548 type = crypto_skcipher_type(type);
549 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
551 alg = crypto_alg_mod_lookup(name, type, mask);
555 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
560 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
561 struct rtattr **tb, u32 type,
565 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
566 unsigned int keylen);
567 int (*encrypt)(struct ablkcipher_request *req);
568 int (*decrypt)(struct ablkcipher_request *req);
570 unsigned int min_keysize;
571 unsigned int max_keysize;
577 struct crypto_skcipher_spawn *spawn;
578 struct crypto_attr_type *algt;
579 struct crypto_instance *inst;
580 struct crypto_alg *alg;
583 algt = crypto_get_attr_type(tb);
588 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
590 return ERR_PTR(-EINVAL);
592 name = crypto_attr_alg_name(tb[1]);
597 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
599 return ERR_PTR(-ENOMEM);
601 spawn = crypto_instance_ctx(inst);
603 /* Ignore async algorithms if necessary. */
604 mask |= crypto_requires_sync(algt->type, algt->mask);
606 crypto_set_skcipher_spawn(spawn, inst);
607 err = crypto_grab_nivcipher(spawn, name, type, mask);
611 alg = crypto_skcipher_spawn_alg(spawn);
613 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
614 CRYPTO_ALG_TYPE_BLKCIPHER) {
615 balg.ivsize = alg->cra_blkcipher.ivsize;
616 balg.min_keysize = alg->cra_blkcipher.min_keysize;
617 balg.max_keysize = alg->cra_blkcipher.max_keysize;
619 balg.setkey = async_setkey;
620 balg.encrypt = async_encrypt;
621 balg.decrypt = async_decrypt;
623 balg.geniv = alg->cra_blkcipher.geniv;
625 balg.ivsize = alg->cra_ablkcipher.ivsize;
626 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
627 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
629 balg.setkey = alg->cra_ablkcipher.setkey;
630 balg.encrypt = alg->cra_ablkcipher.encrypt;
631 balg.decrypt = alg->cra_ablkcipher.decrypt;
633 balg.geniv = alg->cra_ablkcipher.geniv;
641 * This is only true if we're constructing an algorithm with its
642 * default IV generator. For the default generator we elide the
643 * template name and double-check the IV generator.
645 if (algt->mask & CRYPTO_ALG_GENIV) {
647 balg.geniv = crypto_default_geniv(alg);
649 if (strcmp(tmpl->name, balg.geniv))
652 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
653 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
654 CRYPTO_MAX_ALG_NAME);
657 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
658 "%s(%s)", tmpl->name, alg->cra_name) >=
661 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
662 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
667 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
668 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
669 inst->alg.cra_priority = alg->cra_priority;
670 inst->alg.cra_blocksize = alg->cra_blocksize;
671 inst->alg.cra_alignmask = alg->cra_alignmask;
672 inst->alg.cra_type = &crypto_givcipher_type;
674 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
675 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
676 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
677 inst->alg.cra_ablkcipher.geniv = balg.geniv;
679 inst->alg.cra_ablkcipher.setkey = balg.setkey;
680 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
681 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
687 crypto_drop_skcipher(spawn);
693 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
695 void skcipher_geniv_free(struct crypto_instance *inst)
697 crypto_drop_skcipher(crypto_instance_ctx(inst));
700 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
702 int skcipher_geniv_init(struct crypto_tfm *tfm)
704 struct crypto_instance *inst = (void *)tfm->__crt_alg;
705 struct crypto_ablkcipher *cipher;
707 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
709 return PTR_ERR(cipher);
711 tfm->crt_ablkcipher.base = cipher;
712 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
716 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
718 void skcipher_geniv_exit(struct crypto_tfm *tfm)
720 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
722 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
724 MODULE_LICENSE("GPL");
725 MODULE_DESCRIPTION("Generic block chaining cipher type");