2 * Asynchronous block chaining cipher operations.
4 * This is the asynchronous version of blkcipher.c indicating completion
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/skcipher.h>
17 #include <linux/cpumask.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/seq_file.h>
26 #include <linux/cryptouser.h>
27 #include <net/netlink.h>
29 #include <crypto/scatterwalk.h>
33 static const char *skcipher_default_geniv __read_mostly;
35 struct ablkcipher_buffer {
36 struct list_head entry;
37 struct scatter_walk dst;
43 ABLKCIPHER_WALK_SLOW = 1 << 0,
46 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
48 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
51 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
53 struct ablkcipher_buffer *p, *tmp;
55 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
56 ablkcipher_buffer_write(p);
61 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
63 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
64 struct ablkcipher_buffer *p)
67 list_add_tail(&p->entry, &walk->buffers);
70 /* Get a spot of the specified length that does not straddle a page.
71 * The caller needs to ensure that there is enough space for this operation.
73 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
75 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
76 return max(start, end_page);
79 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
82 unsigned int n = bsize;
85 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
87 if (len_this_page > n)
89 scatterwalk_advance(&walk->out, n);
90 if (n == len_this_page)
93 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
99 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
102 scatterwalk_advance(&walk->in, n);
103 scatterwalk_advance(&walk->out, n);
108 static int ablkcipher_walk_next(struct ablkcipher_request *req,
109 struct ablkcipher_walk *walk);
111 int ablkcipher_walk_done(struct ablkcipher_request *req,
112 struct ablkcipher_walk *walk, int err)
114 struct crypto_tfm *tfm = req->base.tfm;
115 unsigned int nbytes = 0;
117 if (likely(err >= 0)) {
118 unsigned int n = walk->nbytes - err;
120 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
121 n = ablkcipher_done_fast(walk, n);
122 else if (WARN_ON(err)) {
126 n = ablkcipher_done_slow(walk, n);
128 nbytes = walk->total - n;
132 scatterwalk_done(&walk->in, 0, nbytes);
133 scatterwalk_done(&walk->out, 1, nbytes);
136 walk->total = nbytes;
137 walk->nbytes = nbytes;
140 crypto_yield(req->base.flags);
141 return ablkcipher_walk_next(req, walk);
144 if (walk->iv != req->info)
145 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
146 kfree(walk->iv_buffer);
150 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
152 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
153 struct ablkcipher_walk *walk,
155 unsigned int alignmask,
156 void **src_p, void **dst_p)
158 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
159 struct ablkcipher_buffer *p;
160 void *src, *dst, *base;
163 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
164 n += (aligned_bsize * 3 - (alignmask + 1) +
165 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
167 p = kmalloc(n, GFP_ATOMIC);
169 return ablkcipher_walk_done(req, walk, -ENOMEM);
173 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
174 src = dst = ablkcipher_get_spot(dst, bsize);
179 scatterwalk_copychunks(src, &walk->in, bsize, 0);
181 ablkcipher_queue_write(walk, p);
183 walk->nbytes = bsize;
184 walk->flags |= ABLKCIPHER_WALK_SLOW;
192 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
193 struct crypto_tfm *tfm,
194 unsigned int alignmask)
196 unsigned bs = walk->blocksize;
197 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
198 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
199 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
203 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
204 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
205 if (!walk->iv_buffer)
208 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
209 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
210 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
211 iv = ablkcipher_get_spot(iv, ivsize);
213 walk->iv = memcpy(iv, walk->iv, ivsize);
217 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
218 struct ablkcipher_walk *walk)
220 walk->src.page = scatterwalk_page(&walk->in);
221 walk->src.offset = offset_in_page(walk->in.offset);
222 walk->dst.page = scatterwalk_page(&walk->out);
223 walk->dst.offset = offset_in_page(walk->out.offset);
228 static int ablkcipher_walk_next(struct ablkcipher_request *req,
229 struct ablkcipher_walk *walk)
231 struct crypto_tfm *tfm = req->base.tfm;
232 unsigned int alignmask, bsize, n;
236 alignmask = crypto_tfm_alg_alignmask(tfm);
238 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
239 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
240 return ablkcipher_walk_done(req, walk, -EINVAL);
243 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
246 bsize = min(walk->blocksize, n);
247 n = scatterwalk_clamp(&walk->in, n);
248 n = scatterwalk_clamp(&walk->out, n);
251 !scatterwalk_aligned(&walk->in, alignmask) ||
252 !scatterwalk_aligned(&walk->out, alignmask)) {
253 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
255 goto set_phys_lowmem;
260 return ablkcipher_next_fast(req, walk);
264 walk->src.page = virt_to_page(src);
265 walk->dst.page = virt_to_page(dst);
266 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
267 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
273 static int ablkcipher_walk_first(struct ablkcipher_request *req,
274 struct ablkcipher_walk *walk)
276 struct crypto_tfm *tfm = req->base.tfm;
277 unsigned int alignmask;
279 alignmask = crypto_tfm_alg_alignmask(tfm);
280 if (WARN_ON_ONCE(in_irq()))
283 walk->nbytes = walk->total;
284 if (unlikely(!walk->total))
287 walk->iv_buffer = NULL;
288 walk->iv = req->info;
289 if (unlikely(((unsigned long)walk->iv & alignmask))) {
290 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
295 scatterwalk_start(&walk->in, walk->in.sg);
296 scatterwalk_start(&walk->out, walk->out.sg);
298 return ablkcipher_walk_next(req, walk);
301 int ablkcipher_walk_phys(struct ablkcipher_request *req,
302 struct ablkcipher_walk *walk)
304 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
305 return ablkcipher_walk_first(req, walk);
307 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
309 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
312 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
313 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
315 u8 *buffer, *alignbuffer;
316 unsigned long absize;
318 absize = keylen + alignmask;
319 buffer = kmalloc(absize, GFP_ATOMIC);
323 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
324 memcpy(alignbuffer, key, keylen);
325 ret = cipher->setkey(tfm, alignbuffer, keylen);
326 memset(alignbuffer, 0, keylen);
331 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
334 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
335 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
337 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
338 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
342 if ((unsigned long)key & alignmask)
343 return setkey_unaligned(tfm, key, keylen);
345 return cipher->setkey(tfm, key, keylen);
348 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
351 return alg->cra_ctxsize;
354 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
356 return crypto_ablkcipher_encrypt(&req->creq);
359 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
361 return crypto_ablkcipher_decrypt(&req->creq);
364 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
367 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
368 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
370 if (alg->ivsize > PAGE_SIZE / 8)
373 crt->setkey = setkey;
374 crt->encrypt = alg->encrypt;
375 crt->decrypt = alg->decrypt;
377 crt->givencrypt = skcipher_null_givencrypt;
378 crt->givdecrypt = skcipher_null_givdecrypt;
380 crt->base = __crypto_ablkcipher_cast(tfm);
381 crt->ivsize = alg->ivsize;
386 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
388 struct crypto_report_blkcipher rblkcipher;
390 snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
391 snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
392 alg->cra_ablkcipher.geniv ?: "<default>");
394 rblkcipher.blocksize = alg->cra_blocksize;
395 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
396 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
397 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
399 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
400 sizeof(struct crypto_report_blkcipher), &rblkcipher);
408 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
409 __attribute__ ((unused));
410 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
412 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
414 seq_printf(m, "type : ablkcipher\n");
415 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
417 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
418 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
419 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
420 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
421 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
424 const struct crypto_type crypto_ablkcipher_type = {
425 .ctxsize = crypto_ablkcipher_ctxsize,
426 .init = crypto_init_ablkcipher_ops,
427 #ifdef CONFIG_PROC_FS
428 .show = crypto_ablkcipher_show,
430 .report = crypto_ablkcipher_report,
432 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
434 static int no_givdecrypt(struct skcipher_givcrypt_request *req)
439 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
442 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
443 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
445 if (alg->ivsize > PAGE_SIZE / 8)
448 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
449 alg->setkey : setkey;
450 crt->encrypt = alg->encrypt;
451 crt->decrypt = alg->decrypt;
452 crt->givencrypt = alg->givencrypt;
453 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
454 crt->base = __crypto_ablkcipher_cast(tfm);
455 crt->ivsize = alg->ivsize;
460 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
462 struct crypto_report_blkcipher rblkcipher;
464 snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
465 snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
466 alg->cra_ablkcipher.geniv ?: "<built-in>");
468 rblkcipher.blocksize = alg->cra_blocksize;
469 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
470 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
471 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
473 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
474 sizeof(struct crypto_report_blkcipher), &rblkcipher);
482 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
483 __attribute__ ((unused));
484 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
486 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
488 seq_printf(m, "type : givcipher\n");
489 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
491 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
492 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
493 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
494 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
495 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
498 const struct crypto_type crypto_givcipher_type = {
499 .ctxsize = crypto_ablkcipher_ctxsize,
500 .init = crypto_init_givcipher_ops,
501 #ifdef CONFIG_PROC_FS
502 .show = crypto_givcipher_show,
504 .report = crypto_givcipher_report,
506 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
508 const char *crypto_default_geniv(const struct crypto_alg *alg)
510 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
511 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
512 alg->cra_ablkcipher.ivsize) !=
516 return alg->cra_flags & CRYPTO_ALG_ASYNC ?
517 "eseqiv" : skcipher_default_geniv;
520 static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
522 struct rtattr *tb[3];
525 struct crypto_attr_type data;
529 struct crypto_attr_alg data;
531 struct crypto_template *tmpl;
532 struct crypto_instance *inst;
533 struct crypto_alg *larval;
537 larval = crypto_larval_lookup(alg->cra_driver_name,
538 (type & ~CRYPTO_ALG_TYPE_MASK) |
539 CRYPTO_ALG_TYPE_GIVCIPHER,
540 mask | CRYPTO_ALG_TYPE_MASK);
541 err = PTR_ERR(larval);
546 if (!crypto_is_larval(larval))
549 ptype.attr.rta_len = sizeof(ptype);
550 ptype.attr.rta_type = CRYPTOA_TYPE;
551 ptype.data.type = type | CRYPTO_ALG_GENIV;
552 /* GENIV tells the template that we're making a default geniv. */
553 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
556 palg.attr.rta_len = sizeof(palg);
557 palg.attr.rta_type = CRYPTOA_ALG;
558 /* Must use the exact name to locate ourselves. */
559 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
564 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
565 CRYPTO_ALG_TYPE_BLKCIPHER)
566 geniv = alg->cra_blkcipher.geniv;
568 geniv = alg->cra_ablkcipher.geniv;
571 geniv = crypto_default_geniv(alg);
573 tmpl = crypto_lookup_template(geniv);
578 inst = tmpl->alloc(tb);
583 if ((err = crypto_register_instance(tmpl, inst))) {
588 /* Redo the lookup to use the instance we just registered. */
592 crypto_tmpl_put(tmpl);
594 crypto_larval_kill(larval);
596 crypto_mod_put(larval);
602 static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
605 struct crypto_alg *alg;
607 alg = crypto_alg_mod_lookup(name, type, mask);
611 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
612 CRYPTO_ALG_TYPE_GIVCIPHER)
615 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
616 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
617 alg->cra_ablkcipher.ivsize))
621 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
622 mask & ~CRYPTO_ALG_TESTED);
626 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
627 CRYPTO_ALG_TYPE_GIVCIPHER) {
628 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
630 alg = ERR_PTR(-ENOENT);
635 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
636 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
637 alg->cra_ablkcipher.ivsize));
639 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
642 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
645 struct crypto_alg *alg;
648 type = crypto_skcipher_type(type);
649 mask = crypto_skcipher_mask(mask);
651 alg = crypto_lookup_skcipher(name, type, mask);
655 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
659 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
661 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
664 struct crypto_tfm *tfm;
667 type = crypto_skcipher_type(type);
668 mask = crypto_skcipher_mask(mask);
671 struct crypto_alg *alg;
673 alg = crypto_lookup_skcipher(alg_name, type, mask);
679 tfm = __crypto_alloc_tfm(alg, type, mask);
681 return __crypto_ablkcipher_cast(tfm);
689 if (signal_pending(current)) {
697 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
699 static int __init skcipher_module_init(void)
701 skcipher_default_geniv = num_possible_cpus() > 1 ?
702 "eseqiv" : "chainiv";
706 static void skcipher_module_exit(void)
710 module_init(skcipher_module_init);
711 module_exit(skcipher_module_exit);