2 * Symmetric key cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
39 struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
47 static int skcipher_walk_next(struct skcipher_walk *walk);
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
51 if (PageHighMem(scatterwalk_page(walk)))
55 static inline void *skcipher_map(struct scatter_walk *walk)
57 struct page *page = scatterwalk_page(walk);
59 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 offset_in_page(walk->offset);
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
65 walk->src.virt.addr = skcipher_map(&walk->in);
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
70 walk->dst.virt.addr = skcipher_map(&walk->out);
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
75 skcipher_unmap(&walk->in, walk->src.virt.addr);
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
80 skcipher_unmap(&walk->out, walk->dst.virt.addr);
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
85 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
88 /* Get a spot of the specified length that does not straddle a page.
89 * The caller needs to ensure that there is enough space for this operation.
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
93 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
95 return max(start, end_page);
98 static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
102 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 addr = skcipher_get_spot(addr, bsize);
104 scatterwalk_copychunks(addr, &walk->out, bsize,
105 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
108 int skcipher_walk_done(struct skcipher_walk *walk, int err)
110 unsigned int n; /* bytes processed */
113 if (unlikely(err < 0))
116 n = walk->nbytes - err;
118 more = (walk->total != 0);
120 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
123 SKCIPHER_WALK_DIFF)))) {
125 skcipher_unmap_src(walk);
126 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
127 skcipher_unmap_dst(walk);
129 } else if (walk->flags & SKCIPHER_WALK_COPY) {
130 skcipher_map_dst(walk);
131 memcpy(walk->dst.virt.addr, walk->page, n);
132 skcipher_unmap_dst(walk);
133 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
136 * Didn't process all bytes. Either the algorithm is
137 * broken, or this was the last step and it turned out
138 * the message wasn't evenly divisible into blocks but
139 * the algorithm requires it.
144 skcipher_done_slow(walk, n);
145 goto already_advanced;
148 scatterwalk_advance(&walk->in, n);
149 scatterwalk_advance(&walk->out, n);
151 scatterwalk_done(&walk->in, 0, more);
152 scatterwalk_done(&walk->out, 1, more);
155 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
156 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
157 return skcipher_walk_next(walk);
163 /* Short-circuit for the common/fast path. */
164 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
167 if (walk->flags & SKCIPHER_WALK_PHYS)
170 if (walk->iv != walk->oiv)
171 memcpy(walk->oiv, walk->iv, walk->ivsize);
172 if (walk->buffer != walk->page)
175 free_page((unsigned long)walk->page);
180 EXPORT_SYMBOL_GPL(skcipher_walk_done);
182 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
184 struct skcipher_walk_buffer *p, *tmp;
186 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
194 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
195 data = skcipher_get_spot(data, walk->stride);
198 scatterwalk_copychunks(data, &p->dst, p->len, 1);
200 if (offset_in_page(p->data) + p->len + walk->stride >
202 free_page((unsigned long)p->data);
209 if (!err && walk->iv != walk->oiv)
210 memcpy(walk->oiv, walk->iv, walk->ivsize);
211 if (walk->buffer != walk->page)
214 free_page((unsigned long)walk->page);
216 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
218 static void skcipher_queue_write(struct skcipher_walk *walk,
219 struct skcipher_walk_buffer *p)
222 list_add_tail(&p->entry, &walk->buffers);
225 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
227 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
228 unsigned alignmask = walk->alignmask;
229 struct skcipher_walk_buffer *p;
237 walk->buffer = walk->page;
238 buffer = walk->buffer;
243 /* Start with the minimum alignment of kmalloc. */
244 a = crypto_tfm_ctx_alignment() - 1;
248 /* Calculate the minimum alignment of p->buffer. */
249 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
253 /* Minimum size to align p->buffer by alignmask. */
256 /* Minimum size to ensure p->buffer does not straddle a page. */
257 n += (bsize - 1) & ~(alignmask | a);
259 v = kzalloc(n, skcipher_walk_gfp(walk));
261 return skcipher_walk_done(walk, -ENOMEM);
266 skcipher_queue_write(walk, p);
274 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
275 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
276 walk->src.virt.addr = walk->dst.virt.addr;
278 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
280 walk->nbytes = bsize;
281 walk->flags |= SKCIPHER_WALK_SLOW;
286 static int skcipher_next_copy(struct skcipher_walk *walk)
288 struct skcipher_walk_buffer *p;
289 u8 *tmp = walk->page;
291 skcipher_map_src(walk);
292 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
293 skcipher_unmap_src(walk);
295 walk->src.virt.addr = tmp;
296 walk->dst.virt.addr = tmp;
298 if (!(walk->flags & SKCIPHER_WALK_PHYS))
301 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
305 p->data = walk->page;
306 p->len = walk->nbytes;
307 skcipher_queue_write(walk, p);
309 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
313 walk->page += walk->nbytes;
318 static int skcipher_next_fast(struct skcipher_walk *walk)
322 walk->src.phys.page = scatterwalk_page(&walk->in);
323 walk->src.phys.offset = offset_in_page(walk->in.offset);
324 walk->dst.phys.page = scatterwalk_page(&walk->out);
325 walk->dst.phys.offset = offset_in_page(walk->out.offset);
327 if (walk->flags & SKCIPHER_WALK_PHYS)
330 diff = walk->src.phys.offset - walk->dst.phys.offset;
331 diff |= walk->src.virt.page - walk->dst.virt.page;
333 skcipher_map_src(walk);
334 walk->dst.virt.addr = walk->src.virt.addr;
337 walk->flags |= SKCIPHER_WALK_DIFF;
338 skcipher_map_dst(walk);
344 static int skcipher_walk_next(struct skcipher_walk *walk)
350 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
354 bsize = min(walk->stride, max(n, walk->blocksize));
355 n = scatterwalk_clamp(&walk->in, n);
356 n = scatterwalk_clamp(&walk->out, n);
358 if (unlikely(n < bsize)) {
359 if (unlikely(walk->total < walk->blocksize))
360 return skcipher_walk_done(walk, -EINVAL);
363 err = skcipher_next_slow(walk, bsize);
364 goto set_phys_lowmem;
367 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
369 gfp_t gfp = skcipher_walk_gfp(walk);
371 walk->page = (void *)__get_free_page(gfp);
376 walk->nbytes = min_t(unsigned, n,
377 PAGE_SIZE - offset_in_page(walk->page));
378 walk->flags |= SKCIPHER_WALK_COPY;
379 err = skcipher_next_copy(walk);
380 goto set_phys_lowmem;
385 return skcipher_next_fast(walk);
388 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
389 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
390 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
391 walk->src.phys.offset &= PAGE_SIZE - 1;
392 walk->dst.phys.offset &= PAGE_SIZE - 1;
397 static int skcipher_copy_iv(struct skcipher_walk *walk)
399 unsigned a = crypto_tfm_ctx_alignment() - 1;
400 unsigned alignmask = walk->alignmask;
401 unsigned ivsize = walk->ivsize;
402 unsigned bs = walk->stride;
407 aligned_bs = ALIGN(bs, alignmask + 1);
409 /* Minimum size to align buffer by alignmask. */
410 size = alignmask & ~a;
412 if (walk->flags & SKCIPHER_WALK_PHYS)
415 size += aligned_bs + ivsize;
417 /* Minimum size to ensure buffer does not straddle a page. */
418 size += (bs - 1) & ~(alignmask | a);
421 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
425 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
426 iv = skcipher_get_spot(iv, bs) + aligned_bs;
428 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
432 static int skcipher_walk_first(struct skcipher_walk *walk)
434 if (WARN_ON_ONCE(in_irq()))
438 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439 int err = skcipher_copy_iv(walk);
446 return skcipher_walk_next(walk);
449 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
450 struct skcipher_request *req)
452 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
454 walk->total = req->cryptlen;
459 if (unlikely(!walk->total))
462 scatterwalk_start(&walk->in, req->src);
463 scatterwalk_start(&walk->out, req->dst);
465 walk->flags &= ~SKCIPHER_WALK_SLEEP;
466 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
467 SKCIPHER_WALK_SLEEP : 0;
469 walk->blocksize = crypto_skcipher_blocksize(tfm);
470 walk->stride = crypto_skcipher_walksize(tfm);
471 walk->ivsize = crypto_skcipher_ivsize(tfm);
472 walk->alignmask = crypto_skcipher_alignmask(tfm);
474 return skcipher_walk_first(walk);
477 int skcipher_walk_virt(struct skcipher_walk *walk,
478 struct skcipher_request *req, bool atomic)
482 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
484 walk->flags &= ~SKCIPHER_WALK_PHYS;
486 err = skcipher_walk_skcipher(walk, req);
488 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
492 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
494 void skcipher_walk_atomise(struct skcipher_walk *walk)
496 walk->flags &= ~SKCIPHER_WALK_SLEEP;
498 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
500 int skcipher_walk_async(struct skcipher_walk *walk,
501 struct skcipher_request *req)
503 walk->flags |= SKCIPHER_WALK_PHYS;
505 INIT_LIST_HEAD(&walk->buffers);
507 return skcipher_walk_skcipher(walk, req);
509 EXPORT_SYMBOL_GPL(skcipher_walk_async);
511 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
512 struct aead_request *req, bool atomic)
514 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
521 if (unlikely(!walk->total))
524 walk->flags &= ~SKCIPHER_WALK_PHYS;
526 scatterwalk_start(&walk->in, req->src);
527 scatterwalk_start(&walk->out, req->dst);
529 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
530 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
532 scatterwalk_done(&walk->in, 0, walk->total);
533 scatterwalk_done(&walk->out, 0, walk->total);
535 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
536 walk->flags |= SKCIPHER_WALK_SLEEP;
538 walk->flags &= ~SKCIPHER_WALK_SLEEP;
540 walk->blocksize = crypto_aead_blocksize(tfm);
541 walk->stride = crypto_aead_chunksize(tfm);
542 walk->ivsize = crypto_aead_ivsize(tfm);
543 walk->alignmask = crypto_aead_alignmask(tfm);
545 err = skcipher_walk_first(walk);
548 walk->flags &= ~SKCIPHER_WALK_SLEEP;
553 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
556 walk->total = req->cryptlen;
558 return skcipher_walk_aead_common(walk, req, atomic);
560 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
562 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
563 struct aead_request *req, bool atomic)
565 walk->total = req->cryptlen;
567 return skcipher_walk_aead_common(walk, req, atomic);
569 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
571 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
572 struct aead_request *req, bool atomic)
574 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
576 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
578 return skcipher_walk_aead_common(walk, req, atomic);
580 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
582 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
584 if (alg->cra_type == &crypto_blkcipher_type)
585 return sizeof(struct crypto_blkcipher *);
587 if (alg->cra_type == &crypto_ablkcipher_type)
588 return sizeof(struct crypto_ablkcipher *);
590 return crypto_alg_extsize(alg);
593 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
596 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
599 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
600 const u8 *key, unsigned int keylen)
602 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
603 struct crypto_blkcipher *blkcipher = *ctx;
606 crypto_blkcipher_clear_flags(blkcipher, ~0);
607 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
608 CRYPTO_TFM_REQ_MASK);
609 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
610 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
611 CRYPTO_TFM_RES_MASK);
613 skcipher_set_needkey(tfm);
617 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
621 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
622 int (*crypt)(struct blkcipher_desc *,
623 struct scatterlist *,
624 struct scatterlist *,
627 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
628 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
629 struct blkcipher_desc desc = {
632 .flags = req->base.flags,
636 return crypt(&desc, req->dst, req->src, req->cryptlen);
639 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
641 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
642 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
643 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
645 return skcipher_crypt_blkcipher(req, alg->encrypt);
648 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
650 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
651 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
652 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
654 return skcipher_crypt_blkcipher(req, alg->decrypt);
657 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
659 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
661 crypto_free_blkcipher(*ctx);
664 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
666 struct crypto_alg *calg = tfm->__crt_alg;
667 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
668 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
669 struct crypto_blkcipher *blkcipher;
670 struct crypto_tfm *btfm;
672 if (!crypto_mod_get(calg))
675 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
676 CRYPTO_ALG_TYPE_MASK);
678 crypto_mod_put(calg);
679 return PTR_ERR(btfm);
682 blkcipher = __crypto_blkcipher_cast(btfm);
684 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
686 skcipher->setkey = skcipher_setkey_blkcipher;
687 skcipher->encrypt = skcipher_encrypt_blkcipher;
688 skcipher->decrypt = skcipher_decrypt_blkcipher;
690 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
691 skcipher->keysize = calg->cra_blkcipher.max_keysize;
693 skcipher_set_needkey(skcipher);
698 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
699 const u8 *key, unsigned int keylen)
701 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
702 struct crypto_ablkcipher *ablkcipher = *ctx;
705 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
706 crypto_ablkcipher_set_flags(ablkcipher,
707 crypto_skcipher_get_flags(tfm) &
708 CRYPTO_TFM_REQ_MASK);
709 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
710 crypto_skcipher_set_flags(tfm,
711 crypto_ablkcipher_get_flags(ablkcipher) &
712 CRYPTO_TFM_RES_MASK);
714 skcipher_set_needkey(tfm);
718 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
722 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
723 int (*crypt)(struct ablkcipher_request *))
725 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
726 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
727 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
729 ablkcipher_request_set_tfm(subreq, *ctx);
730 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
731 req->base.complete, req->base.data);
732 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
735 return crypt(subreq);
738 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
740 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
741 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
742 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
744 return skcipher_crypt_ablkcipher(req, alg->encrypt);
747 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
749 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
750 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
751 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
753 return skcipher_crypt_ablkcipher(req, alg->decrypt);
756 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
758 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
760 crypto_free_ablkcipher(*ctx);
763 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
765 struct crypto_alg *calg = tfm->__crt_alg;
766 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
767 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
768 struct crypto_ablkcipher *ablkcipher;
769 struct crypto_tfm *abtfm;
771 if (!crypto_mod_get(calg))
774 abtfm = __crypto_alloc_tfm(calg, 0, 0);
776 crypto_mod_put(calg);
777 return PTR_ERR(abtfm);
780 ablkcipher = __crypto_ablkcipher_cast(abtfm);
782 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
784 skcipher->setkey = skcipher_setkey_ablkcipher;
785 skcipher->encrypt = skcipher_encrypt_ablkcipher;
786 skcipher->decrypt = skcipher_decrypt_ablkcipher;
788 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
789 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
790 sizeof(struct ablkcipher_request);
791 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
793 skcipher_set_needkey(skcipher);
798 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
799 const u8 *key, unsigned int keylen)
801 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
802 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
803 u8 *buffer, *alignbuffer;
804 unsigned long absize;
807 absize = keylen + alignmask;
808 buffer = kmalloc(absize, GFP_ATOMIC);
812 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
813 memcpy(alignbuffer, key, keylen);
814 ret = cipher->setkey(tfm, alignbuffer, keylen);
819 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
822 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
823 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
826 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
827 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
831 if ((unsigned long)key & alignmask)
832 err = skcipher_setkey_unaligned(tfm, key, keylen);
834 err = cipher->setkey(tfm, key, keylen);
837 skcipher_set_needkey(tfm);
841 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
845 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
847 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
848 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
853 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
855 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
856 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
858 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
859 return crypto_init_skcipher_ops_blkcipher(tfm);
861 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
862 return crypto_init_skcipher_ops_ablkcipher(tfm);
864 skcipher->setkey = skcipher_setkey;
865 skcipher->encrypt = alg->encrypt;
866 skcipher->decrypt = alg->decrypt;
867 skcipher->ivsize = alg->ivsize;
868 skcipher->keysize = alg->max_keysize;
870 skcipher_set_needkey(skcipher);
873 skcipher->base.exit = crypto_skcipher_exit_tfm;
876 return alg->init(skcipher);
881 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
883 struct skcipher_instance *skcipher =
884 container_of(inst, struct skcipher_instance, s.base);
886 skcipher->free(skcipher);
889 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
891 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
893 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
896 seq_printf(m, "type : skcipher\n");
897 seq_printf(m, "async : %s\n",
898 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
899 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
900 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
901 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
902 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
903 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
904 seq_printf(m, "walksize : %u\n", skcipher->walksize);
908 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
910 struct crypto_report_blkcipher rblkcipher;
911 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
914 memset(&rblkcipher, 0, sizeof(rblkcipher));
916 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
917 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
919 rblkcipher.blocksize = alg->cra_blocksize;
920 rblkcipher.min_keysize = skcipher->min_keysize;
921 rblkcipher.max_keysize = skcipher->max_keysize;
922 rblkcipher.ivsize = skcipher->ivsize;
924 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
925 sizeof(rblkcipher), &rblkcipher);
928 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
934 static const struct crypto_type crypto_skcipher_type2 = {
935 .extsize = crypto_skcipher_extsize,
936 .init_tfm = crypto_skcipher_init_tfm,
937 .free = crypto_skcipher_free_instance,
938 #ifdef CONFIG_PROC_FS
939 .show = crypto_skcipher_show,
941 .report = crypto_skcipher_report,
942 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
943 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
944 .type = CRYPTO_ALG_TYPE_SKCIPHER,
945 .tfmsize = offsetof(struct crypto_skcipher, base),
948 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
949 const char *name, u32 type, u32 mask)
951 spawn->base.frontend = &crypto_skcipher_type2;
952 return crypto_grab_spawn(&spawn->base, name, type, mask);
954 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
956 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
959 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
961 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
963 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
964 const char *alg_name, u32 type, u32 mask)
966 struct crypto_skcipher *tfm;
968 /* Only sync algorithms allowed. */
969 mask |= CRYPTO_ALG_ASYNC;
971 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
974 * Make sure we do not allocate something that might get used with
975 * an on-stack request: check the request size.
977 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
978 MAX_SYNC_SKCIPHER_REQSIZE)) {
979 crypto_free_skcipher(tfm);
980 return ERR_PTR(-EINVAL);
983 return (struct crypto_sync_skcipher *)tfm;
985 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
987 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
989 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
992 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
994 static int skcipher_prepare_alg(struct skcipher_alg *alg)
996 struct crypto_alg *base = &alg->base;
998 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
999 alg->walksize > PAGE_SIZE / 8)
1002 if (!alg->chunksize)
1003 alg->chunksize = base->cra_blocksize;
1005 alg->walksize = alg->chunksize;
1007 base->cra_type = &crypto_skcipher_type2;
1008 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
1009 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
1014 int crypto_register_skcipher(struct skcipher_alg *alg)
1016 struct crypto_alg *base = &alg->base;
1019 err = skcipher_prepare_alg(alg);
1023 return crypto_register_alg(base);
1025 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1027 void crypto_unregister_skcipher(struct skcipher_alg *alg)
1029 crypto_unregister_alg(&alg->base);
1031 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1033 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1037 for (i = 0; i < count; i++) {
1038 ret = crypto_register_skcipher(&algs[i]);
1046 for (--i; i >= 0; --i)
1047 crypto_unregister_skcipher(&algs[i]);
1051 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1053 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1057 for (i = count - 1; i >= 0; --i)
1058 crypto_unregister_skcipher(&algs[i]);
1060 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1062 int skcipher_register_instance(struct crypto_template *tmpl,
1063 struct skcipher_instance *inst)
1067 err = skcipher_prepare_alg(&inst->alg);
1071 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1073 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1075 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1076 unsigned int keylen)
1078 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1081 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1082 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1083 CRYPTO_TFM_REQ_MASK);
1084 err = crypto_cipher_setkey(cipher, key, keylen);
1085 crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
1086 CRYPTO_TFM_RES_MASK);
1090 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1092 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1093 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
1094 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1095 struct crypto_cipher *cipher;
1097 cipher = crypto_spawn_cipher(spawn);
1099 return PTR_ERR(cipher);
1101 ctx->cipher = cipher;
1105 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1107 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1109 crypto_free_cipher(ctx->cipher);
1112 static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1114 crypto_drop_spawn(skcipher_instance_ctx(inst));
1119 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1121 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1122 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1123 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1124 * alignmask, and priority are set from the underlying cipher but can be
1125 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1126 * default ->setkey(), ->init(), and ->exit() methods are installed.
1128 * @tmpl: the template being instantiated
1129 * @tb: the template parameters
1130 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
1131 * returned here. It must be dropped with crypto_mod_put().
1133 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1134 * needs to register the instance.
1136 struct skcipher_instance *
1137 skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
1138 struct crypto_alg **cipher_alg_ret)
1140 struct crypto_attr_type *algt;
1141 struct crypto_alg *cipher_alg;
1142 struct skcipher_instance *inst;
1143 struct crypto_spawn *spawn;
1147 algt = crypto_get_attr_type(tb);
1149 return ERR_CAST(algt);
1151 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
1152 return ERR_PTR(-EINVAL);
1154 mask = CRYPTO_ALG_TYPE_MASK |
1155 crypto_requires_off(algt->type, algt->mask,
1156 CRYPTO_ALG_NEED_FALLBACK);
1158 cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
1159 if (IS_ERR(cipher_alg))
1160 return ERR_CAST(cipher_alg);
1162 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1165 goto err_put_cipher_alg;
1167 spawn = skcipher_instance_ctx(inst);
1169 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1174 err = crypto_init_spawn(spawn, cipher_alg,
1175 skcipher_crypto_instance(inst),
1176 CRYPTO_ALG_TYPE_MASK);
1179 inst->free = skcipher_free_instance_simple;
1181 /* Default algorithm properties, can be overridden */
1182 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1183 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1184 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1185 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1186 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1187 inst->alg.ivsize = cipher_alg->cra_blocksize;
1189 /* Use skcipher_ctx_simple by default, can be overridden */
1190 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1191 inst->alg.setkey = skcipher_setkey_simple;
1192 inst->alg.init = skcipher_init_tfm_simple;
1193 inst->alg.exit = skcipher_exit_tfm_simple;
1195 *cipher_alg_ret = cipher_alg;
1201 crypto_mod_put(cipher_alg);
1202 return ERR_PTR(err);
1204 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1206 MODULE_LICENSE("GPL");
1207 MODULE_DESCRIPTION("Symmetric key cipher type");