2 * Symmetric key cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
39 struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
47 static int skcipher_walk_next(struct skcipher_walk *walk);
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
51 if (PageHighMem(scatterwalk_page(walk)))
55 static inline void *skcipher_map(struct scatter_walk *walk)
57 struct page *page = scatterwalk_page(walk);
59 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 offset_in_page(walk->offset);
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
65 walk->src.virt.addr = skcipher_map(&walk->in);
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
70 walk->dst.virt.addr = skcipher_map(&walk->out);
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
75 skcipher_unmap(&walk->in, walk->src.virt.addr);
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
80 skcipher_unmap(&walk->out, walk->dst.virt.addr);
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
85 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
88 /* Get a spot of the specified length that does not straddle a page.
89 * The caller needs to ensure that there is enough space for this operation.
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
93 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
95 return max(start, end_page);
98 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
102 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 addr = skcipher_get_spot(addr, bsize);
104 scatterwalk_copychunks(addr, &walk->out, bsize,
105 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
109 int skcipher_walk_done(struct skcipher_walk *walk, int err)
111 unsigned int n = walk->nbytes - err;
114 nbytes = walk->total - n;
116 if (unlikely(err < 0)) {
119 } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
122 SKCIPHER_WALK_DIFF)))) {
124 skcipher_unmap_src(walk);
125 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
126 skcipher_unmap_dst(walk);
128 } else if (walk->flags & SKCIPHER_WALK_COPY) {
129 skcipher_map_dst(walk);
130 memcpy(walk->dst.virt.addr, walk->page, n);
131 skcipher_unmap_dst(walk);
132 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
137 n = skcipher_done_slow(walk, n);
143 walk->total = nbytes;
144 walk->nbytes = nbytes;
146 scatterwalk_advance(&walk->in, n);
147 scatterwalk_advance(&walk->out, n);
148 scatterwalk_done(&walk->in, 0, nbytes);
149 scatterwalk_done(&walk->out, 1, nbytes);
152 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
153 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
154 return skcipher_walk_next(walk);
157 /* Short-circuit for the common/fast path. */
158 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
161 if (walk->flags & SKCIPHER_WALK_PHYS)
164 if (walk->iv != walk->oiv)
165 memcpy(walk->oiv, walk->iv, walk->ivsize);
166 if (walk->buffer != walk->page)
169 free_page((unsigned long)walk->page);
174 EXPORT_SYMBOL_GPL(skcipher_walk_done);
176 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
178 struct skcipher_walk_buffer *p, *tmp;
180 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
188 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
189 data = skcipher_get_spot(data, walk->stride);
192 scatterwalk_copychunks(data, &p->dst, p->len, 1);
194 if (offset_in_page(p->data) + p->len + walk->stride >
196 free_page((unsigned long)p->data);
203 if (!err && walk->iv != walk->oiv)
204 memcpy(walk->oiv, walk->iv, walk->ivsize);
205 if (walk->buffer != walk->page)
208 free_page((unsigned long)walk->page);
210 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
212 static void skcipher_queue_write(struct skcipher_walk *walk,
213 struct skcipher_walk_buffer *p)
216 list_add_tail(&p->entry, &walk->buffers);
219 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
221 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
222 unsigned alignmask = walk->alignmask;
223 struct skcipher_walk_buffer *p;
231 walk->buffer = walk->page;
232 buffer = walk->buffer;
237 /* Start with the minimum alignment of kmalloc. */
238 a = crypto_tfm_ctx_alignment() - 1;
242 /* Calculate the minimum alignment of p->buffer. */
243 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
247 /* Minimum size to align p->buffer by alignmask. */
250 /* Minimum size to ensure p->buffer does not straddle a page. */
251 n += (bsize - 1) & ~(alignmask | a);
253 v = kzalloc(n, skcipher_walk_gfp(walk));
255 return skcipher_walk_done(walk, -ENOMEM);
260 skcipher_queue_write(walk, p);
268 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
269 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
270 walk->src.virt.addr = walk->dst.virt.addr;
272 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
274 walk->nbytes = bsize;
275 walk->flags |= SKCIPHER_WALK_SLOW;
280 static int skcipher_next_copy(struct skcipher_walk *walk)
282 struct skcipher_walk_buffer *p;
283 u8 *tmp = walk->page;
285 skcipher_map_src(walk);
286 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
287 skcipher_unmap_src(walk);
289 walk->src.virt.addr = tmp;
290 walk->dst.virt.addr = tmp;
292 if (!(walk->flags & SKCIPHER_WALK_PHYS))
295 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
299 p->data = walk->page;
300 p->len = walk->nbytes;
301 skcipher_queue_write(walk, p);
303 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
307 walk->page += walk->nbytes;
312 static int skcipher_next_fast(struct skcipher_walk *walk)
316 walk->src.phys.page = scatterwalk_page(&walk->in);
317 walk->src.phys.offset = offset_in_page(walk->in.offset);
318 walk->dst.phys.page = scatterwalk_page(&walk->out);
319 walk->dst.phys.offset = offset_in_page(walk->out.offset);
321 if (walk->flags & SKCIPHER_WALK_PHYS)
324 diff = walk->src.phys.offset - walk->dst.phys.offset;
325 diff |= walk->src.virt.page - walk->dst.virt.page;
327 skcipher_map_src(walk);
328 walk->dst.virt.addr = walk->src.virt.addr;
331 walk->flags |= SKCIPHER_WALK_DIFF;
332 skcipher_map_dst(walk);
338 static int skcipher_walk_next(struct skcipher_walk *walk)
344 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
348 bsize = min(walk->stride, max(n, walk->blocksize));
349 n = scatterwalk_clamp(&walk->in, n);
350 n = scatterwalk_clamp(&walk->out, n);
352 if (unlikely(n < bsize)) {
353 if (unlikely(walk->total < walk->blocksize))
354 return skcipher_walk_done(walk, -EINVAL);
357 err = skcipher_next_slow(walk, bsize);
358 goto set_phys_lowmem;
361 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
363 gfp_t gfp = skcipher_walk_gfp(walk);
365 walk->page = (void *)__get_free_page(gfp);
370 walk->nbytes = min_t(unsigned, n,
371 PAGE_SIZE - offset_in_page(walk->page));
372 walk->flags |= SKCIPHER_WALK_COPY;
373 err = skcipher_next_copy(walk);
374 goto set_phys_lowmem;
379 return skcipher_next_fast(walk);
382 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
383 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
384 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
385 walk->src.phys.offset &= PAGE_SIZE - 1;
386 walk->dst.phys.offset &= PAGE_SIZE - 1;
390 EXPORT_SYMBOL_GPL(skcipher_walk_next);
392 static int skcipher_copy_iv(struct skcipher_walk *walk)
394 unsigned a = crypto_tfm_ctx_alignment() - 1;
395 unsigned alignmask = walk->alignmask;
396 unsigned ivsize = walk->ivsize;
397 unsigned bs = walk->stride;
402 aligned_bs = ALIGN(bs, alignmask);
404 /* Minimum size to align buffer by alignmask. */
405 size = alignmask & ~a;
407 if (walk->flags & SKCIPHER_WALK_PHYS)
410 size += aligned_bs + ivsize;
412 /* Minimum size to ensure buffer does not straddle a page. */
413 size += (bs - 1) & ~(alignmask | a);
416 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
420 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
421 iv = skcipher_get_spot(iv, bs) + aligned_bs;
423 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
427 static int skcipher_walk_first(struct skcipher_walk *walk)
429 if (WARN_ON_ONCE(in_irq()))
433 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
434 int err = skcipher_copy_iv(walk);
440 walk->nbytes = walk->total;
442 return skcipher_walk_next(walk);
445 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
446 struct skcipher_request *req)
448 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
450 walk->total = req->cryptlen;
453 if (unlikely(!walk->total))
456 scatterwalk_start(&walk->in, req->src);
457 scatterwalk_start(&walk->out, req->dst);
462 walk->flags &= ~SKCIPHER_WALK_SLEEP;
463 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
464 SKCIPHER_WALK_SLEEP : 0;
466 walk->blocksize = crypto_skcipher_blocksize(tfm);
467 walk->stride = crypto_skcipher_walksize(tfm);
468 walk->ivsize = crypto_skcipher_ivsize(tfm);
469 walk->alignmask = crypto_skcipher_alignmask(tfm);
471 return skcipher_walk_first(walk);
474 int skcipher_walk_virt(struct skcipher_walk *walk,
475 struct skcipher_request *req, bool atomic)
479 walk->flags &= ~SKCIPHER_WALK_PHYS;
481 err = skcipher_walk_skcipher(walk, req);
483 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
487 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
489 void skcipher_walk_atomise(struct skcipher_walk *walk)
491 walk->flags &= ~SKCIPHER_WALK_SLEEP;
493 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
495 int skcipher_walk_async(struct skcipher_walk *walk,
496 struct skcipher_request *req)
498 walk->flags |= SKCIPHER_WALK_PHYS;
500 INIT_LIST_HEAD(&walk->buffers);
502 return skcipher_walk_skcipher(walk, req);
504 EXPORT_SYMBOL_GPL(skcipher_walk_async);
506 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
507 struct aead_request *req, bool atomic)
509 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
514 if (unlikely(!walk->total))
517 walk->flags &= ~SKCIPHER_WALK_PHYS;
519 scatterwalk_start(&walk->in, req->src);
520 scatterwalk_start(&walk->out, req->dst);
522 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
523 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
528 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
529 walk->flags |= SKCIPHER_WALK_SLEEP;
531 walk->flags &= ~SKCIPHER_WALK_SLEEP;
533 walk->blocksize = crypto_aead_blocksize(tfm);
534 walk->stride = crypto_aead_chunksize(tfm);
535 walk->ivsize = crypto_aead_ivsize(tfm);
536 walk->alignmask = crypto_aead_alignmask(tfm);
538 err = skcipher_walk_first(walk);
541 walk->flags &= ~SKCIPHER_WALK_SLEEP;
546 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
549 walk->total = req->cryptlen;
551 return skcipher_walk_aead_common(walk, req, atomic);
553 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
555 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
556 struct aead_request *req, bool atomic)
558 walk->total = req->cryptlen;
560 return skcipher_walk_aead_common(walk, req, atomic);
562 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
564 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
565 struct aead_request *req, bool atomic)
567 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
569 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
571 return skcipher_walk_aead_common(walk, req, atomic);
573 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
575 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
577 if (alg->cra_type == &crypto_blkcipher_type)
578 return sizeof(struct crypto_blkcipher *);
580 if (alg->cra_type == &crypto_ablkcipher_type ||
581 alg->cra_type == &crypto_givcipher_type)
582 return sizeof(struct crypto_ablkcipher *);
584 return crypto_alg_extsize(alg);
587 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
588 const u8 *key, unsigned int keylen)
590 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
591 struct crypto_blkcipher *blkcipher = *ctx;
594 crypto_blkcipher_clear_flags(blkcipher, ~0);
595 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
596 CRYPTO_TFM_REQ_MASK);
597 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
598 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
599 CRYPTO_TFM_RES_MASK);
604 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
605 int (*crypt)(struct blkcipher_desc *,
606 struct scatterlist *,
607 struct scatterlist *,
610 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
611 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
612 struct blkcipher_desc desc = {
615 .flags = req->base.flags,
619 return crypt(&desc, req->dst, req->src, req->cryptlen);
622 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
624 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
625 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
626 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
628 return skcipher_crypt_blkcipher(req, alg->encrypt);
631 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
633 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
634 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
635 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
637 return skcipher_crypt_blkcipher(req, alg->decrypt);
640 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
642 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
644 crypto_free_blkcipher(*ctx);
647 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
649 struct crypto_alg *calg = tfm->__crt_alg;
650 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
651 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
652 struct crypto_blkcipher *blkcipher;
653 struct crypto_tfm *btfm;
655 if (!crypto_mod_get(calg))
658 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
659 CRYPTO_ALG_TYPE_MASK);
661 crypto_mod_put(calg);
662 return PTR_ERR(btfm);
665 blkcipher = __crypto_blkcipher_cast(btfm);
667 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
669 skcipher->setkey = skcipher_setkey_blkcipher;
670 skcipher->encrypt = skcipher_encrypt_blkcipher;
671 skcipher->decrypt = skcipher_decrypt_blkcipher;
673 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
674 skcipher->keysize = calg->cra_blkcipher.max_keysize;
679 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
680 const u8 *key, unsigned int keylen)
682 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
683 struct crypto_ablkcipher *ablkcipher = *ctx;
686 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
687 crypto_ablkcipher_set_flags(ablkcipher,
688 crypto_skcipher_get_flags(tfm) &
689 CRYPTO_TFM_REQ_MASK);
690 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
691 crypto_skcipher_set_flags(tfm,
692 crypto_ablkcipher_get_flags(ablkcipher) &
693 CRYPTO_TFM_RES_MASK);
698 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
699 int (*crypt)(struct ablkcipher_request *))
701 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
702 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
703 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
705 ablkcipher_request_set_tfm(subreq, *ctx);
706 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
707 req->base.complete, req->base.data);
708 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
711 return crypt(subreq);
714 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
716 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
717 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
718 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
720 return skcipher_crypt_ablkcipher(req, alg->encrypt);
723 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
725 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
726 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
727 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
729 return skcipher_crypt_ablkcipher(req, alg->decrypt);
732 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
734 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
736 crypto_free_ablkcipher(*ctx);
739 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
741 struct crypto_alg *calg = tfm->__crt_alg;
742 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
743 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
744 struct crypto_ablkcipher *ablkcipher;
745 struct crypto_tfm *abtfm;
747 if (!crypto_mod_get(calg))
750 abtfm = __crypto_alloc_tfm(calg, 0, 0);
752 crypto_mod_put(calg);
753 return PTR_ERR(abtfm);
756 ablkcipher = __crypto_ablkcipher_cast(abtfm);
758 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
760 skcipher->setkey = skcipher_setkey_ablkcipher;
761 skcipher->encrypt = skcipher_encrypt_ablkcipher;
762 skcipher->decrypt = skcipher_decrypt_ablkcipher;
764 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
765 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
766 sizeof(struct ablkcipher_request);
767 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
772 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
773 const u8 *key, unsigned int keylen)
775 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
776 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
777 u8 *buffer, *alignbuffer;
778 unsigned long absize;
781 absize = keylen + alignmask;
782 buffer = kmalloc(absize, GFP_ATOMIC);
786 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
787 memcpy(alignbuffer, key, keylen);
788 ret = cipher->setkey(tfm, alignbuffer, keylen);
793 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
796 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
797 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
799 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
800 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
804 if ((unsigned long)key & alignmask)
805 return skcipher_setkey_unaligned(tfm, key, keylen);
807 return cipher->setkey(tfm, key, keylen);
810 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
812 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
813 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
818 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
820 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
821 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
823 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
824 return crypto_init_skcipher_ops_blkcipher(tfm);
826 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
827 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
828 return crypto_init_skcipher_ops_ablkcipher(tfm);
830 skcipher->setkey = skcipher_setkey;
831 skcipher->encrypt = alg->encrypt;
832 skcipher->decrypt = alg->decrypt;
833 skcipher->ivsize = alg->ivsize;
834 skcipher->keysize = alg->max_keysize;
837 skcipher->base.exit = crypto_skcipher_exit_tfm;
840 return alg->init(skcipher);
845 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
847 struct skcipher_instance *skcipher =
848 container_of(inst, struct skcipher_instance, s.base);
850 skcipher->free(skcipher);
853 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
855 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
857 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
860 seq_printf(m, "type : skcipher\n");
861 seq_printf(m, "async : %s\n",
862 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
863 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
864 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
865 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
866 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
867 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
868 seq_printf(m, "walksize : %u\n", skcipher->walksize);
872 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
874 struct crypto_report_blkcipher rblkcipher;
875 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
878 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
879 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
881 rblkcipher.blocksize = alg->cra_blocksize;
882 rblkcipher.min_keysize = skcipher->min_keysize;
883 rblkcipher.max_keysize = skcipher->max_keysize;
884 rblkcipher.ivsize = skcipher->ivsize;
886 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
887 sizeof(struct crypto_report_blkcipher), &rblkcipher))
888 goto nla_put_failure;
895 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
901 static const struct crypto_type crypto_skcipher_type2 = {
902 .extsize = crypto_skcipher_extsize,
903 .init_tfm = crypto_skcipher_init_tfm,
904 .free = crypto_skcipher_free_instance,
905 #ifdef CONFIG_PROC_FS
906 .show = crypto_skcipher_show,
908 .report = crypto_skcipher_report,
909 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
910 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
911 .type = CRYPTO_ALG_TYPE_SKCIPHER,
912 .tfmsize = offsetof(struct crypto_skcipher, base),
915 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
916 const char *name, u32 type, u32 mask)
918 spawn->base.frontend = &crypto_skcipher_type2;
919 return crypto_grab_spawn(&spawn->base, name, type, mask);
921 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
923 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
926 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
928 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
930 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
932 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
935 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
937 static int skcipher_prepare_alg(struct skcipher_alg *alg)
939 struct crypto_alg *base = &alg->base;
941 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
942 alg->walksize > PAGE_SIZE / 8)
946 alg->chunksize = base->cra_blocksize;
948 alg->walksize = alg->chunksize;
950 base->cra_type = &crypto_skcipher_type2;
951 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
952 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
957 int crypto_register_skcipher(struct skcipher_alg *alg)
959 struct crypto_alg *base = &alg->base;
962 err = skcipher_prepare_alg(alg);
966 return crypto_register_alg(base);
968 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
970 void crypto_unregister_skcipher(struct skcipher_alg *alg)
972 crypto_unregister_alg(&alg->base);
974 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
976 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
980 for (i = 0; i < count; i++) {
981 ret = crypto_register_skcipher(&algs[i]);
989 for (--i; i >= 0; --i)
990 crypto_unregister_skcipher(&algs[i]);
994 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
996 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1000 for (i = count - 1; i >= 0; --i)
1001 crypto_unregister_skcipher(&algs[i]);
1003 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1005 int skcipher_register_instance(struct crypto_template *tmpl,
1006 struct skcipher_instance *inst)
1010 err = skcipher_prepare_alg(&inst->alg);
1014 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1016 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1018 MODULE_LICENSE("GPL");
1019 MODULE_DESCRIPTION("Symmetric key cipher type");