macintosh: windfarm: fix MODINFO regression
[platform/kernel/linux-rpi.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 struct ahash_request_priv {
32         crypto_completion_t complete;
33         void *data;
34         u8 *result;
35         u32 flags;
36         void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 };
38
39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 {
41         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42                             halg);
43 }
44
45 static int hash_walk_next(struct crypto_hash_walk *walk)
46 {
47         unsigned int alignmask = walk->alignmask;
48         unsigned int offset = walk->offset;
49         unsigned int nbytes = min(walk->entrylen,
50                                   ((unsigned int)(PAGE_SIZE)) - offset);
51
52         if (walk->flags & CRYPTO_ALG_ASYNC)
53                 walk->data = kmap(walk->pg);
54         else
55                 walk->data = kmap_atomic(walk->pg);
56         walk->data += offset;
57
58         if (offset & alignmask) {
59                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60
61                 if (nbytes > unaligned)
62                         nbytes = unaligned;
63         }
64
65         walk->entrylen -= nbytes;
66         return nbytes;
67 }
68
69 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70 {
71         struct scatterlist *sg;
72
73         sg = walk->sg;
74         walk->offset = sg->offset;
75         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76         walk->offset = offset_in_page(walk->offset);
77         walk->entrylen = sg->length;
78
79         if (walk->entrylen > walk->total)
80                 walk->entrylen = walk->total;
81         walk->total -= walk->entrylen;
82
83         return hash_walk_next(walk);
84 }
85
86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87 {
88         unsigned int alignmask = walk->alignmask;
89
90         walk->data -= walk->offset;
91
92         if (walk->entrylen && (walk->offset & alignmask) && !err) {
93                 unsigned int nbytes;
94
95                 walk->offset = ALIGN(walk->offset, alignmask + 1);
96                 nbytes = min(walk->entrylen,
97                              (unsigned int)(PAGE_SIZE - walk->offset));
98                 if (nbytes) {
99                         walk->entrylen -= nbytes;
100                         walk->data += walk->offset;
101                         return nbytes;
102                 }
103         }
104
105         if (walk->flags & CRYPTO_ALG_ASYNC)
106                 kunmap(walk->pg);
107         else {
108                 kunmap_atomic(walk->data);
109                 /*
110                  * The may sleep test only makes sense for sync users.
111                  * Async users don't need to sleep here anyway.
112                  */
113                 crypto_yield(walk->flags);
114         }
115
116         if (err)
117                 return err;
118
119         if (walk->entrylen) {
120                 walk->offset = 0;
121                 walk->pg++;
122                 return hash_walk_next(walk);
123         }
124
125         if (!walk->total)
126                 return 0;
127
128         walk->sg = sg_next(walk->sg);
129
130         return hash_walk_new_entry(walk);
131 }
132 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
133
134 int crypto_hash_walk_first(struct ahash_request *req,
135                            struct crypto_hash_walk *walk)
136 {
137         walk->total = req->nbytes;
138
139         if (!walk->total) {
140                 walk->entrylen = 0;
141                 return 0;
142         }
143
144         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
145         walk->sg = req->src;
146         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147
148         return hash_walk_new_entry(walk);
149 }
150 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
151
152 int crypto_ahash_walk_first(struct ahash_request *req,
153                             struct crypto_hash_walk *walk)
154 {
155         walk->total = req->nbytes;
156
157         if (!walk->total) {
158                 walk->entrylen = 0;
159                 return 0;
160         }
161
162         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
163         walk->sg = req->src;
164         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
165         walk->flags |= CRYPTO_ALG_ASYNC;
166
167         BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
168
169         return hash_walk_new_entry(walk);
170 }
171 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
172
173 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
174                                 unsigned int keylen)
175 {
176         unsigned long alignmask = crypto_ahash_alignmask(tfm);
177         int ret;
178         u8 *buffer, *alignbuffer;
179         unsigned long absize;
180
181         absize = keylen + alignmask;
182         buffer = kmalloc(absize, GFP_KERNEL);
183         if (!buffer)
184                 return -ENOMEM;
185
186         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
187         memcpy(alignbuffer, key, keylen);
188         ret = tfm->setkey(tfm, alignbuffer, keylen);
189         kzfree(buffer);
190         return ret;
191 }
192
193 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
194                           unsigned int keylen)
195 {
196         return -ENOSYS;
197 }
198
199 static void ahash_set_needkey(struct crypto_ahash *tfm)
200 {
201         const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
202
203         if (tfm->setkey != ahash_nosetkey &&
204             !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
205                 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
206 }
207
208 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
209                         unsigned int keylen)
210 {
211         unsigned long alignmask = crypto_ahash_alignmask(tfm);
212         int err;
213
214         if ((unsigned long)key & alignmask)
215                 err = ahash_setkey_unaligned(tfm, key, keylen);
216         else
217                 err = tfm->setkey(tfm, key, keylen);
218
219         if (unlikely(err)) {
220                 ahash_set_needkey(tfm);
221                 return err;
222         }
223
224         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
225         return 0;
226 }
227 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
228
229 static inline unsigned int ahash_align_buffer_size(unsigned len,
230                                                    unsigned long mask)
231 {
232         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
233 }
234
235 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
236 {
237         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
238         unsigned long alignmask = crypto_ahash_alignmask(tfm);
239         unsigned int ds = crypto_ahash_digestsize(tfm);
240         struct ahash_request_priv *priv;
241
242         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
243                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
244                        GFP_KERNEL : GFP_ATOMIC);
245         if (!priv)
246                 return -ENOMEM;
247
248         /*
249          * WARNING: Voodoo programming below!
250          *
251          * The code below is obscure and hard to understand, thus explanation
252          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
253          * to understand the layout of structures used here!
254          *
255          * The code here will replace portions of the ORIGINAL request with
256          * pointers to new code and buffers so the hashing operation can store
257          * the result in aligned buffer. We will call the modified request
258          * an ADJUSTED request.
259          *
260          * The newly mangled request will look as such:
261          *
262          * req {
263          *   .result        = ADJUSTED[new aligned buffer]
264          *   .base.complete = ADJUSTED[pointer to completion function]
265          *   .base.data     = ADJUSTED[*req (pointer to self)]
266          *   .priv          = ADJUSTED[new priv] {
267          *           .result   = ORIGINAL(result)
268          *           .complete = ORIGINAL(base.complete)
269          *           .data     = ORIGINAL(base.data)
270          *   }
271          */
272
273         priv->result = req->result;
274         priv->complete = req->base.complete;
275         priv->data = req->base.data;
276         priv->flags = req->base.flags;
277
278         /*
279          * WARNING: We do not backup req->priv here! The req->priv
280          *          is for internal use of the Crypto API and the
281          *          user must _NOT_ _EVER_ depend on it's content!
282          */
283
284         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
285         req->base.complete = cplt;
286         req->base.data = req;
287         req->priv = priv;
288
289         return 0;
290 }
291
292 static void ahash_restore_req(struct ahash_request *req, int err)
293 {
294         struct ahash_request_priv *priv = req->priv;
295
296         if (!err)
297                 memcpy(priv->result, req->result,
298                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
299
300         /* Restore the original crypto request. */
301         req->result = priv->result;
302
303         ahash_request_set_callback(req, priv->flags,
304                                    priv->complete, priv->data);
305         req->priv = NULL;
306
307         /* Free the req->priv.priv from the ADJUSTED request. */
308         kzfree(priv);
309 }
310
311 static void ahash_notify_einprogress(struct ahash_request *req)
312 {
313         struct ahash_request_priv *priv = req->priv;
314         struct crypto_async_request oreq;
315
316         oreq.data = priv->data;
317
318         priv->complete(&oreq, -EINPROGRESS);
319 }
320
321 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
322 {
323         struct ahash_request *areq = req->data;
324
325         if (err == -EINPROGRESS) {
326                 ahash_notify_einprogress(areq);
327                 return;
328         }
329
330         /*
331          * Restore the original request, see ahash_op_unaligned() for what
332          * goes where.
333          *
334          * The "struct ahash_request *req" here is in fact the "req.base"
335          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
336          * is a pointer to self, it is also the ADJUSTED "req" .
337          */
338
339         /* First copy req->result into req->priv.result */
340         ahash_restore_req(areq, err);
341
342         /* Complete the ORIGINAL request. */
343         areq->base.complete(&areq->base, err);
344 }
345
346 static int ahash_op_unaligned(struct ahash_request *req,
347                               int (*op)(struct ahash_request *))
348 {
349         int err;
350
351         err = ahash_save_req(req, ahash_op_unaligned_done);
352         if (err)
353                 return err;
354
355         err = op(req);
356         if (err == -EINPROGRESS || err == -EBUSY)
357                 return err;
358
359         ahash_restore_req(req, err);
360
361         return err;
362 }
363
364 static int crypto_ahash_op(struct ahash_request *req,
365                            int (*op)(struct ahash_request *))
366 {
367         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
368         unsigned long alignmask = crypto_ahash_alignmask(tfm);
369
370         if ((unsigned long)req->result & alignmask)
371                 return ahash_op_unaligned(req, op);
372
373         return op(req);
374 }
375
376 int crypto_ahash_final(struct ahash_request *req)
377 {
378         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
379 }
380 EXPORT_SYMBOL_GPL(crypto_ahash_final);
381
382 int crypto_ahash_finup(struct ahash_request *req)
383 {
384         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
385 }
386 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
387
388 int crypto_ahash_digest(struct ahash_request *req)
389 {
390         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
391
392         if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
393                 return -ENOKEY;
394
395         return crypto_ahash_op(req, tfm->digest);
396 }
397 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
398
399 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
400 {
401         struct ahash_request *areq = req->data;
402
403         if (err == -EINPROGRESS)
404                 return;
405
406         ahash_restore_req(areq, err);
407
408         areq->base.complete(&areq->base, err);
409 }
410
411 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
412 {
413         if (err)
414                 goto out;
415
416         req->base.complete = ahash_def_finup_done2;
417
418         err = crypto_ahash_reqtfm(req)->final(req);
419         if (err == -EINPROGRESS || err == -EBUSY)
420                 return err;
421
422 out:
423         ahash_restore_req(req, err);
424         return err;
425 }
426
427 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
428 {
429         struct ahash_request *areq = req->data;
430
431         if (err == -EINPROGRESS) {
432                 ahash_notify_einprogress(areq);
433                 return;
434         }
435
436         areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
437
438         err = ahash_def_finup_finish1(areq, err);
439         if (areq->priv)
440                 return;
441
442         areq->base.complete(&areq->base, err);
443 }
444
445 static int ahash_def_finup(struct ahash_request *req)
446 {
447         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
448         int err;
449
450         err = ahash_save_req(req, ahash_def_finup_done1);
451         if (err)
452                 return err;
453
454         err = tfm->update(req);
455         if (err == -EINPROGRESS || err == -EBUSY)
456                 return err;
457
458         return ahash_def_finup_finish1(req, err);
459 }
460
461 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
462 {
463         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
464         struct ahash_alg *alg = crypto_ahash_alg(hash);
465
466         hash->setkey = ahash_nosetkey;
467
468         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
469                 return crypto_init_shash_ops_async(tfm);
470
471         hash->init = alg->init;
472         hash->update = alg->update;
473         hash->final = alg->final;
474         hash->finup = alg->finup ?: ahash_def_finup;
475         hash->digest = alg->digest;
476         hash->export = alg->export;
477         hash->import = alg->import;
478
479         if (alg->setkey) {
480                 hash->setkey = alg->setkey;
481                 ahash_set_needkey(hash);
482         }
483
484         return 0;
485 }
486
487 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
488 {
489         if (alg->cra_type != &crypto_ahash_type)
490                 return sizeof(struct crypto_shash *);
491
492         return crypto_alg_extsize(alg);
493 }
494
495 #ifdef CONFIG_NET
496 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
497 {
498         struct crypto_report_hash rhash;
499
500         strncpy(rhash.type, "ahash", sizeof(rhash.type));
501
502         rhash.blocksize = alg->cra_blocksize;
503         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
504
505         if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
506                     sizeof(struct crypto_report_hash), &rhash))
507                 goto nla_put_failure;
508         return 0;
509
510 nla_put_failure:
511         return -EMSGSIZE;
512 }
513 #else
514 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
515 {
516         return -ENOSYS;
517 }
518 #endif
519
520 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
521         __maybe_unused;
522 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
523 {
524         seq_printf(m, "type         : ahash\n");
525         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
526                                              "yes" : "no");
527         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
528         seq_printf(m, "digestsize   : %u\n",
529                    __crypto_hash_alg_common(alg)->digestsize);
530 }
531
532 const struct crypto_type crypto_ahash_type = {
533         .extsize = crypto_ahash_extsize,
534         .init_tfm = crypto_ahash_init_tfm,
535 #ifdef CONFIG_PROC_FS
536         .show = crypto_ahash_show,
537 #endif
538         .report = crypto_ahash_report,
539         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
540         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
541         .type = CRYPTO_ALG_TYPE_AHASH,
542         .tfmsize = offsetof(struct crypto_ahash, base),
543 };
544 EXPORT_SYMBOL_GPL(crypto_ahash_type);
545
546 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
547                                         u32 mask)
548 {
549         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
550 }
551 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
552
553 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
554 {
555         return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
556 }
557 EXPORT_SYMBOL_GPL(crypto_has_ahash);
558
559 static int ahash_prepare_alg(struct ahash_alg *alg)
560 {
561         struct crypto_alg *base = &alg->halg.base;
562
563         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
564             alg->halg.statesize > PAGE_SIZE / 8 ||
565             alg->halg.statesize == 0)
566                 return -EINVAL;
567
568         base->cra_type = &crypto_ahash_type;
569         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
570         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
571
572         return 0;
573 }
574
575 int crypto_register_ahash(struct ahash_alg *alg)
576 {
577         struct crypto_alg *base = &alg->halg.base;
578         int err;
579
580         err = ahash_prepare_alg(alg);
581         if (err)
582                 return err;
583
584         return crypto_register_alg(base);
585 }
586 EXPORT_SYMBOL_GPL(crypto_register_ahash);
587
588 int crypto_unregister_ahash(struct ahash_alg *alg)
589 {
590         return crypto_unregister_alg(&alg->halg.base);
591 }
592 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
593
594 int crypto_register_ahashes(struct ahash_alg *algs, int count)
595 {
596         int i, ret;
597
598         for (i = 0; i < count; i++) {
599                 ret = crypto_register_ahash(&algs[i]);
600                 if (ret)
601                         goto err;
602         }
603
604         return 0;
605
606 err:
607         for (--i; i >= 0; --i)
608                 crypto_unregister_ahash(&algs[i]);
609
610         return ret;
611 }
612 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
613
614 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
615 {
616         int i;
617
618         for (i = count - 1; i >= 0; --i)
619                 crypto_unregister_ahash(&algs[i]);
620 }
621 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
622
623 int ahash_register_instance(struct crypto_template *tmpl,
624                             struct ahash_instance *inst)
625 {
626         int err;
627
628         err = ahash_prepare_alg(&inst->alg);
629         if (err)
630                 return err;
631
632         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
633 }
634 EXPORT_SYMBOL_GPL(ahash_register_instance);
635
636 void ahash_free_instance(struct crypto_instance *inst)
637 {
638         crypto_drop_spawn(crypto_instance_ctx(inst));
639         kfree(ahash_instance(inst));
640 }
641 EXPORT_SYMBOL_GPL(ahash_free_instance);
642
643 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
644                             struct hash_alg_common *alg,
645                             struct crypto_instance *inst)
646 {
647         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
648                                   &crypto_ahash_type);
649 }
650 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
651
652 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
653 {
654         struct crypto_alg *alg;
655
656         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
657         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
658 }
659 EXPORT_SYMBOL_GPL(ahash_attr_alg);
660
661 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
662 {
663         struct crypto_alg *alg = &halg->base;
664
665         if (alg->cra_type != &crypto_ahash_type)
666                 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
667
668         return __crypto_ahash_alg(alg)->setkey != NULL;
669 }
670 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
671
672 MODULE_LICENSE("GPL");
673 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");