Btrfs: fix data loss after inode eviction, renaming it, and fsync it
[platform/kernel/linux-rpi.git] / crypto / skcipher.c
1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 enum {
32         SKCIPHER_WALK_PHYS = 1 << 0,
33         SKCIPHER_WALK_SLOW = 1 << 1,
34         SKCIPHER_WALK_COPY = 1 << 2,
35         SKCIPHER_WALK_DIFF = 1 << 3,
36         SKCIPHER_WALK_SLEEP = 1 << 4,
37 };
38
39 struct skcipher_walk_buffer {
40         struct list_head entry;
41         struct scatter_walk dst;
42         unsigned int len;
43         u8 *data;
44         u8 buffer[];
45 };
46
47 static int skcipher_walk_next(struct skcipher_walk *walk);
48
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50 {
51         if (PageHighMem(scatterwalk_page(walk)))
52                 kunmap_atomic(vaddr);
53 }
54
55 static inline void *skcipher_map(struct scatter_walk *walk)
56 {
57         struct page *page = scatterwalk_page(walk);
58
59         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60                offset_in_page(walk->offset);
61 }
62
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
64 {
65         walk->src.virt.addr = skcipher_map(&walk->in);
66 }
67
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
69 {
70         walk->dst.virt.addr = skcipher_map(&walk->out);
71 }
72
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74 {
75         skcipher_unmap(&walk->in, walk->src.virt.addr);
76 }
77
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79 {
80         skcipher_unmap(&walk->out, walk->dst.virt.addr);
81 }
82
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84 {
85         return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86 }
87
88 /* Get a spot of the specified length that does not straddle a page.
89  * The caller needs to ensure that there is enough space for this operation.
90  */
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92 {
93         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94
95         return max(start, end_page);
96 }
97
98 static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99 {
100         u8 *addr;
101
102         addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103         addr = skcipher_get_spot(addr, bsize);
104         scatterwalk_copychunks(addr, &walk->out, bsize,
105                                (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106 }
107
108 int skcipher_walk_done(struct skcipher_walk *walk, int err)
109 {
110         unsigned int n; /* bytes processed */
111         bool more;
112
113         if (unlikely(err < 0))
114                 goto finish;
115
116         n = walk->nbytes - err;
117         walk->total -= n;
118         more = (walk->total != 0);
119
120         if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
121                                     SKCIPHER_WALK_SLOW |
122                                     SKCIPHER_WALK_COPY |
123                                     SKCIPHER_WALK_DIFF)))) {
124 unmap_src:
125                 skcipher_unmap_src(walk);
126         } else if (walk->flags & SKCIPHER_WALK_DIFF) {
127                 skcipher_unmap_dst(walk);
128                 goto unmap_src;
129         } else if (walk->flags & SKCIPHER_WALK_COPY) {
130                 skcipher_map_dst(walk);
131                 memcpy(walk->dst.virt.addr, walk->page, n);
132                 skcipher_unmap_dst(walk);
133         } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
134                 if (err) {
135                         /*
136                          * Didn't process all bytes.  Either the algorithm is
137                          * broken, or this was the last step and it turned out
138                          * the message wasn't evenly divisible into blocks but
139                          * the algorithm requires it.
140                          */
141                         err = -EINVAL;
142                         goto finish;
143                 }
144                 skcipher_done_slow(walk, n);
145                 goto already_advanced;
146         }
147
148         scatterwalk_advance(&walk->in, n);
149         scatterwalk_advance(&walk->out, n);
150 already_advanced:
151         scatterwalk_done(&walk->in, 0, more);
152         scatterwalk_done(&walk->out, 1, more);
153
154         if (more) {
155                 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
156                              CRYPTO_TFM_REQ_MAY_SLEEP : 0);
157                 return skcipher_walk_next(walk);
158         }
159         err = 0;
160 finish:
161         walk->nbytes = 0;
162
163         /* Short-circuit for the common/fast path. */
164         if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
165                 goto out;
166
167         if (walk->flags & SKCIPHER_WALK_PHYS)
168                 goto out;
169
170         if (walk->iv != walk->oiv)
171                 memcpy(walk->oiv, walk->iv, walk->ivsize);
172         if (walk->buffer != walk->page)
173                 kfree(walk->buffer);
174         if (walk->page)
175                 free_page((unsigned long)walk->page);
176
177 out:
178         return err;
179 }
180 EXPORT_SYMBOL_GPL(skcipher_walk_done);
181
182 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
183 {
184         struct skcipher_walk_buffer *p, *tmp;
185
186         list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
187                 u8 *data;
188
189                 if (err)
190                         goto done;
191
192                 data = p->data;
193                 if (!data) {
194                         data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
195                         data = skcipher_get_spot(data, walk->stride);
196                 }
197
198                 scatterwalk_copychunks(data, &p->dst, p->len, 1);
199
200                 if (offset_in_page(p->data) + p->len + walk->stride >
201                     PAGE_SIZE)
202                         free_page((unsigned long)p->data);
203
204 done:
205                 list_del(&p->entry);
206                 kfree(p);
207         }
208
209         if (!err && walk->iv != walk->oiv)
210                 memcpy(walk->oiv, walk->iv, walk->ivsize);
211         if (walk->buffer != walk->page)
212                 kfree(walk->buffer);
213         if (walk->page)
214                 free_page((unsigned long)walk->page);
215 }
216 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
217
218 static void skcipher_queue_write(struct skcipher_walk *walk,
219                                  struct skcipher_walk_buffer *p)
220 {
221         p->dst = walk->out;
222         list_add_tail(&p->entry, &walk->buffers);
223 }
224
225 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
226 {
227         bool phys = walk->flags & SKCIPHER_WALK_PHYS;
228         unsigned alignmask = walk->alignmask;
229         struct skcipher_walk_buffer *p;
230         unsigned a;
231         unsigned n;
232         u8 *buffer;
233         void *v;
234
235         if (!phys) {
236                 if (!walk->buffer)
237                         walk->buffer = walk->page;
238                 buffer = walk->buffer;
239                 if (buffer)
240                         goto ok;
241         }
242
243         /* Start with the minimum alignment of kmalloc. */
244         a = crypto_tfm_ctx_alignment() - 1;
245         n = bsize;
246
247         if (phys) {
248                 /* Calculate the minimum alignment of p->buffer. */
249                 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
250                 n += sizeof(*p);
251         }
252
253         /* Minimum size to align p->buffer by alignmask. */
254         n += alignmask & ~a;
255
256         /* Minimum size to ensure p->buffer does not straddle a page. */
257         n += (bsize - 1) & ~(alignmask | a);
258
259         v = kzalloc(n, skcipher_walk_gfp(walk));
260         if (!v)
261                 return skcipher_walk_done(walk, -ENOMEM);
262
263         if (phys) {
264                 p = v;
265                 p->len = bsize;
266                 skcipher_queue_write(walk, p);
267                 buffer = p->buffer;
268         } else {
269                 walk->buffer = v;
270                 buffer = v;
271         }
272
273 ok:
274         walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
275         walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
276         walk->src.virt.addr = walk->dst.virt.addr;
277
278         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
279
280         walk->nbytes = bsize;
281         walk->flags |= SKCIPHER_WALK_SLOW;
282
283         return 0;
284 }
285
286 static int skcipher_next_copy(struct skcipher_walk *walk)
287 {
288         struct skcipher_walk_buffer *p;
289         u8 *tmp = walk->page;
290
291         skcipher_map_src(walk);
292         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
293         skcipher_unmap_src(walk);
294
295         walk->src.virt.addr = tmp;
296         walk->dst.virt.addr = tmp;
297
298         if (!(walk->flags & SKCIPHER_WALK_PHYS))
299                 return 0;
300
301         p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
302         if (!p)
303                 return -ENOMEM;
304
305         p->data = walk->page;
306         p->len = walk->nbytes;
307         skcipher_queue_write(walk, p);
308
309         if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
310             PAGE_SIZE)
311                 walk->page = NULL;
312         else
313                 walk->page += walk->nbytes;
314
315         return 0;
316 }
317
318 static int skcipher_next_fast(struct skcipher_walk *walk)
319 {
320         unsigned long diff;
321
322         walk->src.phys.page = scatterwalk_page(&walk->in);
323         walk->src.phys.offset = offset_in_page(walk->in.offset);
324         walk->dst.phys.page = scatterwalk_page(&walk->out);
325         walk->dst.phys.offset = offset_in_page(walk->out.offset);
326
327         if (walk->flags & SKCIPHER_WALK_PHYS)
328                 return 0;
329
330         diff = walk->src.phys.offset - walk->dst.phys.offset;
331         diff |= walk->src.virt.page - walk->dst.virt.page;
332
333         skcipher_map_src(walk);
334         walk->dst.virt.addr = walk->src.virt.addr;
335
336         if (diff) {
337                 walk->flags |= SKCIPHER_WALK_DIFF;
338                 skcipher_map_dst(walk);
339         }
340
341         return 0;
342 }
343
344 static int skcipher_walk_next(struct skcipher_walk *walk)
345 {
346         unsigned int bsize;
347         unsigned int n;
348         int err;
349
350         walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
351                          SKCIPHER_WALK_DIFF);
352
353         n = walk->total;
354         bsize = min(walk->stride, max(n, walk->blocksize));
355         n = scatterwalk_clamp(&walk->in, n);
356         n = scatterwalk_clamp(&walk->out, n);
357
358         if (unlikely(n < bsize)) {
359                 if (unlikely(walk->total < walk->blocksize))
360                         return skcipher_walk_done(walk, -EINVAL);
361
362 slow_path:
363                 err = skcipher_next_slow(walk, bsize);
364                 goto set_phys_lowmem;
365         }
366
367         if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
368                 if (!walk->page) {
369                         gfp_t gfp = skcipher_walk_gfp(walk);
370
371                         walk->page = (void *)__get_free_page(gfp);
372                         if (!walk->page)
373                                 goto slow_path;
374                 }
375
376                 walk->nbytes = min_t(unsigned, n,
377                                      PAGE_SIZE - offset_in_page(walk->page));
378                 walk->flags |= SKCIPHER_WALK_COPY;
379                 err = skcipher_next_copy(walk);
380                 goto set_phys_lowmem;
381         }
382
383         walk->nbytes = n;
384
385         return skcipher_next_fast(walk);
386
387 set_phys_lowmem:
388         if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
389                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
390                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
391                 walk->src.phys.offset &= PAGE_SIZE - 1;
392                 walk->dst.phys.offset &= PAGE_SIZE - 1;
393         }
394         return err;
395 }
396
397 static int skcipher_copy_iv(struct skcipher_walk *walk)
398 {
399         unsigned a = crypto_tfm_ctx_alignment() - 1;
400         unsigned alignmask = walk->alignmask;
401         unsigned ivsize = walk->ivsize;
402         unsigned bs = walk->stride;
403         unsigned aligned_bs;
404         unsigned size;
405         u8 *iv;
406
407         aligned_bs = ALIGN(bs, alignmask + 1);
408
409         /* Minimum size to align buffer by alignmask. */
410         size = alignmask & ~a;
411
412         if (walk->flags & SKCIPHER_WALK_PHYS)
413                 size += ivsize;
414         else {
415                 size += aligned_bs + ivsize;
416
417                 /* Minimum size to ensure buffer does not straddle a page. */
418                 size += (bs - 1) & ~(alignmask | a);
419         }
420
421         walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
422         if (!walk->buffer)
423                 return -ENOMEM;
424
425         iv = PTR_ALIGN(walk->buffer, alignmask + 1);
426         iv = skcipher_get_spot(iv, bs) + aligned_bs;
427
428         walk->iv = memcpy(iv, walk->iv, walk->ivsize);
429         return 0;
430 }
431
432 static int skcipher_walk_first(struct skcipher_walk *walk)
433 {
434         if (WARN_ON_ONCE(in_irq()))
435                 return -EDEADLK;
436
437         walk->buffer = NULL;
438         if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439                 int err = skcipher_copy_iv(walk);
440                 if (err)
441                         return err;
442         }
443
444         walk->page = NULL;
445
446         return skcipher_walk_next(walk);
447 }
448
449 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
450                                   struct skcipher_request *req)
451 {
452         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
453
454         walk->total = req->cryptlen;
455         walk->nbytes = 0;
456         walk->iv = req->iv;
457         walk->oiv = req->iv;
458
459         if (unlikely(!walk->total))
460                 return 0;
461
462         scatterwalk_start(&walk->in, req->src);
463         scatterwalk_start(&walk->out, req->dst);
464
465         walk->flags &= ~SKCIPHER_WALK_SLEEP;
466         walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
467                        SKCIPHER_WALK_SLEEP : 0;
468
469         walk->blocksize = crypto_skcipher_blocksize(tfm);
470         walk->stride = crypto_skcipher_walksize(tfm);
471         walk->ivsize = crypto_skcipher_ivsize(tfm);
472         walk->alignmask = crypto_skcipher_alignmask(tfm);
473
474         return skcipher_walk_first(walk);
475 }
476
477 int skcipher_walk_virt(struct skcipher_walk *walk,
478                        struct skcipher_request *req, bool atomic)
479 {
480         int err;
481
482         walk->flags &= ~SKCIPHER_WALK_PHYS;
483
484         err = skcipher_walk_skcipher(walk, req);
485
486         walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
487
488         return err;
489 }
490 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
491
492 void skcipher_walk_atomise(struct skcipher_walk *walk)
493 {
494         walk->flags &= ~SKCIPHER_WALK_SLEEP;
495 }
496 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
497
498 int skcipher_walk_async(struct skcipher_walk *walk,
499                         struct skcipher_request *req)
500 {
501         walk->flags |= SKCIPHER_WALK_PHYS;
502
503         INIT_LIST_HEAD(&walk->buffers);
504
505         return skcipher_walk_skcipher(walk, req);
506 }
507 EXPORT_SYMBOL_GPL(skcipher_walk_async);
508
509 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
510                                      struct aead_request *req, bool atomic)
511 {
512         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
513         int err;
514
515         walk->nbytes = 0;
516         walk->iv = req->iv;
517         walk->oiv = req->iv;
518
519         if (unlikely(!walk->total))
520                 return 0;
521
522         walk->flags &= ~SKCIPHER_WALK_PHYS;
523
524         scatterwalk_start(&walk->in, req->src);
525         scatterwalk_start(&walk->out, req->dst);
526
527         scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
528         scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
529
530         scatterwalk_done(&walk->in, 0, walk->total);
531         scatterwalk_done(&walk->out, 0, walk->total);
532
533         if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
534                 walk->flags |= SKCIPHER_WALK_SLEEP;
535         else
536                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
537
538         walk->blocksize = crypto_aead_blocksize(tfm);
539         walk->stride = crypto_aead_chunksize(tfm);
540         walk->ivsize = crypto_aead_ivsize(tfm);
541         walk->alignmask = crypto_aead_alignmask(tfm);
542
543         err = skcipher_walk_first(walk);
544
545         if (atomic)
546                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
547
548         return err;
549 }
550
551 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
552                        bool atomic)
553 {
554         walk->total = req->cryptlen;
555
556         return skcipher_walk_aead_common(walk, req, atomic);
557 }
558 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
559
560 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
561                                struct aead_request *req, bool atomic)
562 {
563         walk->total = req->cryptlen;
564
565         return skcipher_walk_aead_common(walk, req, atomic);
566 }
567 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
568
569 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
570                                struct aead_request *req, bool atomic)
571 {
572         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
573
574         walk->total = req->cryptlen - crypto_aead_authsize(tfm);
575
576         return skcipher_walk_aead_common(walk, req, atomic);
577 }
578 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
579
580 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
581 {
582         if (alg->cra_type == &crypto_blkcipher_type)
583                 return sizeof(struct crypto_blkcipher *);
584
585         if (alg->cra_type == &crypto_ablkcipher_type ||
586             alg->cra_type == &crypto_givcipher_type)
587                 return sizeof(struct crypto_ablkcipher *);
588
589         return crypto_alg_extsize(alg);
590 }
591
592 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
593 {
594         if (tfm->keysize)
595                 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
596 }
597
598 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
599                                      const u8 *key, unsigned int keylen)
600 {
601         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
602         struct crypto_blkcipher *blkcipher = *ctx;
603         int err;
604
605         crypto_blkcipher_clear_flags(blkcipher, ~0);
606         crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
607                                               CRYPTO_TFM_REQ_MASK);
608         err = crypto_blkcipher_setkey(blkcipher, key, keylen);
609         crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
610                                        CRYPTO_TFM_RES_MASK);
611         if (unlikely(err)) {
612                 skcipher_set_needkey(tfm);
613                 return err;
614         }
615
616         crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
617         return 0;
618 }
619
620 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
621                                     int (*crypt)(struct blkcipher_desc *,
622                                                  struct scatterlist *,
623                                                  struct scatterlist *,
624                                                  unsigned int))
625 {
626         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
627         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
628         struct blkcipher_desc desc = {
629                 .tfm = *ctx,
630                 .info = req->iv,
631                 .flags = req->base.flags,
632         };
633
634
635         return crypt(&desc, req->dst, req->src, req->cryptlen);
636 }
637
638 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
639 {
640         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
641         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
642         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
643
644         return skcipher_crypt_blkcipher(req, alg->encrypt);
645 }
646
647 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
648 {
649         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
650         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
651         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
652
653         return skcipher_crypt_blkcipher(req, alg->decrypt);
654 }
655
656 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
657 {
658         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
659
660         crypto_free_blkcipher(*ctx);
661 }
662
663 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
664 {
665         struct crypto_alg *calg = tfm->__crt_alg;
666         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
667         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
668         struct crypto_blkcipher *blkcipher;
669         struct crypto_tfm *btfm;
670
671         if (!crypto_mod_get(calg))
672                 return -EAGAIN;
673
674         btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
675                                         CRYPTO_ALG_TYPE_MASK);
676         if (IS_ERR(btfm)) {
677                 crypto_mod_put(calg);
678                 return PTR_ERR(btfm);
679         }
680
681         blkcipher = __crypto_blkcipher_cast(btfm);
682         *ctx = blkcipher;
683         tfm->exit = crypto_exit_skcipher_ops_blkcipher;
684
685         skcipher->setkey = skcipher_setkey_blkcipher;
686         skcipher->encrypt = skcipher_encrypt_blkcipher;
687         skcipher->decrypt = skcipher_decrypt_blkcipher;
688
689         skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
690         skcipher->keysize = calg->cra_blkcipher.max_keysize;
691
692         skcipher_set_needkey(skcipher);
693
694         return 0;
695 }
696
697 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
698                                       const u8 *key, unsigned int keylen)
699 {
700         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
701         struct crypto_ablkcipher *ablkcipher = *ctx;
702         int err;
703
704         crypto_ablkcipher_clear_flags(ablkcipher, ~0);
705         crypto_ablkcipher_set_flags(ablkcipher,
706                                     crypto_skcipher_get_flags(tfm) &
707                                     CRYPTO_TFM_REQ_MASK);
708         err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
709         crypto_skcipher_set_flags(tfm,
710                                   crypto_ablkcipher_get_flags(ablkcipher) &
711                                   CRYPTO_TFM_RES_MASK);
712         if (unlikely(err)) {
713                 skcipher_set_needkey(tfm);
714                 return err;
715         }
716
717         crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
718         return 0;
719 }
720
721 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
722                                      int (*crypt)(struct ablkcipher_request *))
723 {
724         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
725         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
726         struct ablkcipher_request *subreq = skcipher_request_ctx(req);
727
728         ablkcipher_request_set_tfm(subreq, *ctx);
729         ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
730                                         req->base.complete, req->base.data);
731         ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
732                                      req->iv);
733
734         return crypt(subreq);
735 }
736
737 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
738 {
739         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
740         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
741         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
742
743         return skcipher_crypt_ablkcipher(req, alg->encrypt);
744 }
745
746 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
747 {
748         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
749         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
750         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
751
752         return skcipher_crypt_ablkcipher(req, alg->decrypt);
753 }
754
755 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
756 {
757         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
758
759         crypto_free_ablkcipher(*ctx);
760 }
761
762 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
763 {
764         struct crypto_alg *calg = tfm->__crt_alg;
765         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
766         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
767         struct crypto_ablkcipher *ablkcipher;
768         struct crypto_tfm *abtfm;
769
770         if (!crypto_mod_get(calg))
771                 return -EAGAIN;
772
773         abtfm = __crypto_alloc_tfm(calg, 0, 0);
774         if (IS_ERR(abtfm)) {
775                 crypto_mod_put(calg);
776                 return PTR_ERR(abtfm);
777         }
778
779         ablkcipher = __crypto_ablkcipher_cast(abtfm);
780         *ctx = ablkcipher;
781         tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
782
783         skcipher->setkey = skcipher_setkey_ablkcipher;
784         skcipher->encrypt = skcipher_encrypt_ablkcipher;
785         skcipher->decrypt = skcipher_decrypt_ablkcipher;
786
787         skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
788         skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
789                             sizeof(struct ablkcipher_request);
790         skcipher->keysize = calg->cra_ablkcipher.max_keysize;
791
792         skcipher_set_needkey(skcipher);
793
794         return 0;
795 }
796
797 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
798                                      const u8 *key, unsigned int keylen)
799 {
800         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
801         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
802         u8 *buffer, *alignbuffer;
803         unsigned long absize;
804         int ret;
805
806         absize = keylen + alignmask;
807         buffer = kmalloc(absize, GFP_ATOMIC);
808         if (!buffer)
809                 return -ENOMEM;
810
811         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
812         memcpy(alignbuffer, key, keylen);
813         ret = cipher->setkey(tfm, alignbuffer, keylen);
814         kzfree(buffer);
815         return ret;
816 }
817
818 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
819                            unsigned int keylen)
820 {
821         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
822         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
823         int err;
824
825         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
826                 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
827                 return -EINVAL;
828         }
829
830         if ((unsigned long)key & alignmask)
831                 err = skcipher_setkey_unaligned(tfm, key, keylen);
832         else
833                 err = cipher->setkey(tfm, key, keylen);
834
835         if (unlikely(err)) {
836                 skcipher_set_needkey(tfm);
837                 return err;
838         }
839
840         crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
841         return 0;
842 }
843
844 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
845 {
846         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
847         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
848
849         alg->exit(skcipher);
850 }
851
852 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
853 {
854         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
855         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
856
857         if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
858                 return crypto_init_skcipher_ops_blkcipher(tfm);
859
860         if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
861             tfm->__crt_alg->cra_type == &crypto_givcipher_type)
862                 return crypto_init_skcipher_ops_ablkcipher(tfm);
863
864         skcipher->setkey = skcipher_setkey;
865         skcipher->encrypt = alg->encrypt;
866         skcipher->decrypt = alg->decrypt;
867         skcipher->ivsize = alg->ivsize;
868         skcipher->keysize = alg->max_keysize;
869
870         skcipher_set_needkey(skcipher);
871
872         if (alg->exit)
873                 skcipher->base.exit = crypto_skcipher_exit_tfm;
874
875         if (alg->init)
876                 return alg->init(skcipher);
877
878         return 0;
879 }
880
881 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
882 {
883         struct skcipher_instance *skcipher =
884                 container_of(inst, struct skcipher_instance, s.base);
885
886         skcipher->free(skcipher);
887 }
888
889 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
890         __maybe_unused;
891 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
892 {
893         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
894                                                      base);
895
896         seq_printf(m, "type         : skcipher\n");
897         seq_printf(m, "async        : %s\n",
898                    alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
899         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
900         seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
901         seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
902         seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
903         seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
904         seq_printf(m, "walksize     : %u\n", skcipher->walksize);
905 }
906
907 #ifdef CONFIG_NET
908 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
909 {
910         struct crypto_report_blkcipher rblkcipher;
911         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
912                                                      base);
913
914         strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
915         strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
916
917         rblkcipher.blocksize = alg->cra_blocksize;
918         rblkcipher.min_keysize = skcipher->min_keysize;
919         rblkcipher.max_keysize = skcipher->max_keysize;
920         rblkcipher.ivsize = skcipher->ivsize;
921
922         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
923                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
924                 goto nla_put_failure;
925         return 0;
926
927 nla_put_failure:
928         return -EMSGSIZE;
929 }
930 #else
931 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
932 {
933         return -ENOSYS;
934 }
935 #endif
936
937 static const struct crypto_type crypto_skcipher_type2 = {
938         .extsize = crypto_skcipher_extsize,
939         .init_tfm = crypto_skcipher_init_tfm,
940         .free = crypto_skcipher_free_instance,
941 #ifdef CONFIG_PROC_FS
942         .show = crypto_skcipher_show,
943 #endif
944         .report = crypto_skcipher_report,
945         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
946         .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
947         .type = CRYPTO_ALG_TYPE_SKCIPHER,
948         .tfmsize = offsetof(struct crypto_skcipher, base),
949 };
950
951 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
952                           const char *name, u32 type, u32 mask)
953 {
954         spawn->base.frontend = &crypto_skcipher_type2;
955         return crypto_grab_spawn(&spawn->base, name, type, mask);
956 }
957 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
958
959 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
960                                               u32 type, u32 mask)
961 {
962         return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
963 }
964 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
965
966 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
967 {
968         return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
969                                    type, mask);
970 }
971 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
972
973 static int skcipher_prepare_alg(struct skcipher_alg *alg)
974 {
975         struct crypto_alg *base = &alg->base;
976
977         if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
978             alg->walksize > PAGE_SIZE / 8)
979                 return -EINVAL;
980
981         if (!alg->chunksize)
982                 alg->chunksize = base->cra_blocksize;
983         if (!alg->walksize)
984                 alg->walksize = alg->chunksize;
985
986         base->cra_type = &crypto_skcipher_type2;
987         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
988         base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
989
990         return 0;
991 }
992
993 int crypto_register_skcipher(struct skcipher_alg *alg)
994 {
995         struct crypto_alg *base = &alg->base;
996         int err;
997
998         err = skcipher_prepare_alg(alg);
999         if (err)
1000                 return err;
1001
1002         return crypto_register_alg(base);
1003 }
1004 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1005
1006 void crypto_unregister_skcipher(struct skcipher_alg *alg)
1007 {
1008         crypto_unregister_alg(&alg->base);
1009 }
1010 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1011
1012 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1013 {
1014         int i, ret;
1015
1016         for (i = 0; i < count; i++) {
1017                 ret = crypto_register_skcipher(&algs[i]);
1018                 if (ret)
1019                         goto err;
1020         }
1021
1022         return 0;
1023
1024 err:
1025         for (--i; i >= 0; --i)
1026                 crypto_unregister_skcipher(&algs[i]);
1027
1028         return ret;
1029 }
1030 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1031
1032 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1033 {
1034         int i;
1035
1036         for (i = count - 1; i >= 0; --i)
1037                 crypto_unregister_skcipher(&algs[i]);
1038 }
1039 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1040
1041 int skcipher_register_instance(struct crypto_template *tmpl,
1042                            struct skcipher_instance *inst)
1043 {
1044         int err;
1045
1046         err = skcipher_prepare_alg(&inst->alg);
1047         if (err)
1048                 return err;
1049
1050         return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1051 }
1052 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1053
1054 MODULE_LICENSE("GPL");
1055 MODULE_DESCRIPTION("Symmetric key cipher type");