Merge tag 'x86-urgent-2020-10-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / crypto / ahash.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the asynchronous version of hash.c with notification of
6  * completion via a callback.
7  *
8  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9  */
10
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/seq_file.h>
19 #include <linux/cryptouser.h>
20 #include <linux/compiler.h>
21 #include <net/netlink.h>
22
23 #include "internal.h"
24
25 static const struct crypto_type crypto_ahash_type;
26
27 struct ahash_request_priv {
28         crypto_completion_t complete;
29         void *data;
30         u8 *result;
31         u32 flags;
32         void *ubuf[] CRYPTO_MINALIGN_ATTR;
33 };
34
35 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
36 {
37         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
38                             halg);
39 }
40
41 static int hash_walk_next(struct crypto_hash_walk *walk)
42 {
43         unsigned int alignmask = walk->alignmask;
44         unsigned int offset = walk->offset;
45         unsigned int nbytes = min(walk->entrylen,
46                                   ((unsigned int)(PAGE_SIZE)) - offset);
47
48         walk->data = kmap_atomic(walk->pg);
49         walk->data += offset;
50
51         if (offset & alignmask) {
52                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
53
54                 if (nbytes > unaligned)
55                         nbytes = unaligned;
56         }
57
58         walk->entrylen -= nbytes;
59         return nbytes;
60 }
61
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64         struct scatterlist *sg;
65
66         sg = walk->sg;
67         walk->offset = sg->offset;
68         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
69         walk->offset = offset_in_page(walk->offset);
70         walk->entrylen = sg->length;
71
72         if (walk->entrylen > walk->total)
73                 walk->entrylen = walk->total;
74         walk->total -= walk->entrylen;
75
76         return hash_walk_next(walk);
77 }
78
79 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
80 {
81         unsigned int alignmask = walk->alignmask;
82
83         walk->data -= walk->offset;
84
85         if (walk->entrylen && (walk->offset & alignmask) && !err) {
86                 unsigned int nbytes;
87
88                 walk->offset = ALIGN(walk->offset, alignmask + 1);
89                 nbytes = min(walk->entrylen,
90                              (unsigned int)(PAGE_SIZE - walk->offset));
91                 if (nbytes) {
92                         walk->entrylen -= nbytes;
93                         walk->data += walk->offset;
94                         return nbytes;
95                 }
96         }
97
98         kunmap_atomic(walk->data);
99         crypto_yield(walk->flags);
100
101         if (err)
102                 return err;
103
104         if (walk->entrylen) {
105                 walk->offset = 0;
106                 walk->pg++;
107                 return hash_walk_next(walk);
108         }
109
110         if (!walk->total)
111                 return 0;
112
113         walk->sg = sg_next(walk->sg);
114
115         return hash_walk_new_entry(walk);
116 }
117 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
118
119 int crypto_hash_walk_first(struct ahash_request *req,
120                            struct crypto_hash_walk *walk)
121 {
122         walk->total = req->nbytes;
123
124         if (!walk->total) {
125                 walk->entrylen = 0;
126                 return 0;
127         }
128
129         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
130         walk->sg = req->src;
131         walk->flags = req->base.flags;
132
133         return hash_walk_new_entry(walk);
134 }
135 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
136
137 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
138                                 unsigned int keylen)
139 {
140         unsigned long alignmask = crypto_ahash_alignmask(tfm);
141         int ret;
142         u8 *buffer, *alignbuffer;
143         unsigned long absize;
144
145         absize = keylen + alignmask;
146         buffer = kmalloc(absize, GFP_KERNEL);
147         if (!buffer)
148                 return -ENOMEM;
149
150         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
151         memcpy(alignbuffer, key, keylen);
152         ret = tfm->setkey(tfm, alignbuffer, keylen);
153         kfree_sensitive(buffer);
154         return ret;
155 }
156
157 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
158                           unsigned int keylen)
159 {
160         return -ENOSYS;
161 }
162
163 static void ahash_set_needkey(struct crypto_ahash *tfm)
164 {
165         const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
166
167         if (tfm->setkey != ahash_nosetkey &&
168             !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
169                 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
170 }
171
172 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
173                         unsigned int keylen)
174 {
175         unsigned long alignmask = crypto_ahash_alignmask(tfm);
176         int err;
177
178         if ((unsigned long)key & alignmask)
179                 err = ahash_setkey_unaligned(tfm, key, keylen);
180         else
181                 err = tfm->setkey(tfm, key, keylen);
182
183         if (unlikely(err)) {
184                 ahash_set_needkey(tfm);
185                 return err;
186         }
187
188         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
189         return 0;
190 }
191 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
192
193 static inline unsigned int ahash_align_buffer_size(unsigned len,
194                                                    unsigned long mask)
195 {
196         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
197 }
198
199 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
200 {
201         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
202         unsigned long alignmask = crypto_ahash_alignmask(tfm);
203         unsigned int ds = crypto_ahash_digestsize(tfm);
204         struct ahash_request_priv *priv;
205
206         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
207                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
208                        GFP_KERNEL : GFP_ATOMIC);
209         if (!priv)
210                 return -ENOMEM;
211
212         /*
213          * WARNING: Voodoo programming below!
214          *
215          * The code below is obscure and hard to understand, thus explanation
216          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
217          * to understand the layout of structures used here!
218          *
219          * The code here will replace portions of the ORIGINAL request with
220          * pointers to new code and buffers so the hashing operation can store
221          * the result in aligned buffer. We will call the modified request
222          * an ADJUSTED request.
223          *
224          * The newly mangled request will look as such:
225          *
226          * req {
227          *   .result        = ADJUSTED[new aligned buffer]
228          *   .base.complete = ADJUSTED[pointer to completion function]
229          *   .base.data     = ADJUSTED[*req (pointer to self)]
230          *   .priv          = ADJUSTED[new priv] {
231          *           .result   = ORIGINAL(result)
232          *           .complete = ORIGINAL(base.complete)
233          *           .data     = ORIGINAL(base.data)
234          *   }
235          */
236
237         priv->result = req->result;
238         priv->complete = req->base.complete;
239         priv->data = req->base.data;
240         priv->flags = req->base.flags;
241
242         /*
243          * WARNING: We do not backup req->priv here! The req->priv
244          *          is for internal use of the Crypto API and the
245          *          user must _NOT_ _EVER_ depend on it's content!
246          */
247
248         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
249         req->base.complete = cplt;
250         req->base.data = req;
251         req->priv = priv;
252
253         return 0;
254 }
255
256 static void ahash_restore_req(struct ahash_request *req, int err)
257 {
258         struct ahash_request_priv *priv = req->priv;
259
260         if (!err)
261                 memcpy(priv->result, req->result,
262                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
263
264         /* Restore the original crypto request. */
265         req->result = priv->result;
266
267         ahash_request_set_callback(req, priv->flags,
268                                    priv->complete, priv->data);
269         req->priv = NULL;
270
271         /* Free the req->priv.priv from the ADJUSTED request. */
272         kfree_sensitive(priv);
273 }
274
275 static void ahash_notify_einprogress(struct ahash_request *req)
276 {
277         struct ahash_request_priv *priv = req->priv;
278         struct crypto_async_request oreq;
279
280         oreq.data = priv->data;
281
282         priv->complete(&oreq, -EINPROGRESS);
283 }
284
285 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
286 {
287         struct ahash_request *areq = req->data;
288
289         if (err == -EINPROGRESS) {
290                 ahash_notify_einprogress(areq);
291                 return;
292         }
293
294         /*
295          * Restore the original request, see ahash_op_unaligned() for what
296          * goes where.
297          *
298          * The "struct ahash_request *req" here is in fact the "req.base"
299          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
300          * is a pointer to self, it is also the ADJUSTED "req" .
301          */
302
303         /* First copy req->result into req->priv.result */
304         ahash_restore_req(areq, err);
305
306         /* Complete the ORIGINAL request. */
307         areq->base.complete(&areq->base, err);
308 }
309
310 static int ahash_op_unaligned(struct ahash_request *req,
311                               int (*op)(struct ahash_request *))
312 {
313         int err;
314
315         err = ahash_save_req(req, ahash_op_unaligned_done);
316         if (err)
317                 return err;
318
319         err = op(req);
320         if (err == -EINPROGRESS || err == -EBUSY)
321                 return err;
322
323         ahash_restore_req(req, err);
324
325         return err;
326 }
327
328 static int crypto_ahash_op(struct ahash_request *req,
329                            int (*op)(struct ahash_request *))
330 {
331         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
332         unsigned long alignmask = crypto_ahash_alignmask(tfm);
333
334         if ((unsigned long)req->result & alignmask)
335                 return ahash_op_unaligned(req, op);
336
337         return op(req);
338 }
339
340 int crypto_ahash_final(struct ahash_request *req)
341 {
342         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343         struct crypto_alg *alg = tfm->base.__crt_alg;
344         unsigned int nbytes = req->nbytes;
345         int ret;
346
347         crypto_stats_get(alg);
348         ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
349         crypto_stats_ahash_final(nbytes, ret, alg);
350         return ret;
351 }
352 EXPORT_SYMBOL_GPL(crypto_ahash_final);
353
354 int crypto_ahash_finup(struct ahash_request *req)
355 {
356         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357         struct crypto_alg *alg = tfm->base.__crt_alg;
358         unsigned int nbytes = req->nbytes;
359         int ret;
360
361         crypto_stats_get(alg);
362         ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
363         crypto_stats_ahash_final(nbytes, ret, alg);
364         return ret;
365 }
366 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
367
368 int crypto_ahash_digest(struct ahash_request *req)
369 {
370         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
371         struct crypto_alg *alg = tfm->base.__crt_alg;
372         unsigned int nbytes = req->nbytes;
373         int ret;
374
375         crypto_stats_get(alg);
376         if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
377                 ret = -ENOKEY;
378         else
379                 ret = crypto_ahash_op(req, tfm->digest);
380         crypto_stats_ahash_final(nbytes, ret, alg);
381         return ret;
382 }
383 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
384
385 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
386 {
387         struct ahash_request *areq = req->data;
388
389         if (err == -EINPROGRESS)
390                 return;
391
392         ahash_restore_req(areq, err);
393
394         areq->base.complete(&areq->base, err);
395 }
396
397 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
398 {
399         if (err)
400                 goto out;
401
402         req->base.complete = ahash_def_finup_done2;
403
404         err = crypto_ahash_reqtfm(req)->final(req);
405         if (err == -EINPROGRESS || err == -EBUSY)
406                 return err;
407
408 out:
409         ahash_restore_req(req, err);
410         return err;
411 }
412
413 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
414 {
415         struct ahash_request *areq = req->data;
416
417         if (err == -EINPROGRESS) {
418                 ahash_notify_einprogress(areq);
419                 return;
420         }
421
422         areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
423
424         err = ahash_def_finup_finish1(areq, err);
425         if (areq->priv)
426                 return;
427
428         areq->base.complete(&areq->base, err);
429 }
430
431 static int ahash_def_finup(struct ahash_request *req)
432 {
433         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
434         int err;
435
436         err = ahash_save_req(req, ahash_def_finup_done1);
437         if (err)
438                 return err;
439
440         err = tfm->update(req);
441         if (err == -EINPROGRESS || err == -EBUSY)
442                 return err;
443
444         return ahash_def_finup_finish1(req, err);
445 }
446
447 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
448 {
449         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450         struct ahash_alg *alg = crypto_ahash_alg(hash);
451
452         alg->exit_tfm(hash);
453 }
454
455 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
456 {
457         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
458         struct ahash_alg *alg = crypto_ahash_alg(hash);
459
460         hash->setkey = ahash_nosetkey;
461
462         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
463                 return crypto_init_shash_ops_async(tfm);
464
465         hash->init = alg->init;
466         hash->update = alg->update;
467         hash->final = alg->final;
468         hash->finup = alg->finup ?: ahash_def_finup;
469         hash->digest = alg->digest;
470         hash->export = alg->export;
471         hash->import = alg->import;
472
473         if (alg->setkey) {
474                 hash->setkey = alg->setkey;
475                 ahash_set_needkey(hash);
476         }
477
478         if (alg->exit_tfm)
479                 tfm->exit = crypto_ahash_exit_tfm;
480
481         return alg->init_tfm ? alg->init_tfm(hash) : 0;
482 }
483
484 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
485 {
486         if (alg->cra_type != &crypto_ahash_type)
487                 return sizeof(struct crypto_shash *);
488
489         return crypto_alg_extsize(alg);
490 }
491
492 static void crypto_ahash_free_instance(struct crypto_instance *inst)
493 {
494         struct ahash_instance *ahash = ahash_instance(inst);
495
496         ahash->free(ahash);
497 }
498
499 #ifdef CONFIG_NET
500 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
501 {
502         struct crypto_report_hash rhash;
503
504         memset(&rhash, 0, sizeof(rhash));
505
506         strscpy(rhash.type, "ahash", sizeof(rhash.type));
507
508         rhash.blocksize = alg->cra_blocksize;
509         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
510
511         return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
512 }
513 #else
514 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
515 {
516         return -ENOSYS;
517 }
518 #endif
519
520 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
521         __maybe_unused;
522 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
523 {
524         seq_printf(m, "type         : ahash\n");
525         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
526                                              "yes" : "no");
527         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
528         seq_printf(m, "digestsize   : %u\n",
529                    __crypto_hash_alg_common(alg)->digestsize);
530 }
531
532 static const struct crypto_type crypto_ahash_type = {
533         .extsize = crypto_ahash_extsize,
534         .init_tfm = crypto_ahash_init_tfm,
535         .free = crypto_ahash_free_instance,
536 #ifdef CONFIG_PROC_FS
537         .show = crypto_ahash_show,
538 #endif
539         .report = crypto_ahash_report,
540         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
541         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
542         .type = CRYPTO_ALG_TYPE_AHASH,
543         .tfmsize = offsetof(struct crypto_ahash, base),
544 };
545
546 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
547                       struct crypto_instance *inst,
548                       const char *name, u32 type, u32 mask)
549 {
550         spawn->base.frontend = &crypto_ahash_type;
551         return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
552 }
553 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
554
555 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
556                                         u32 mask)
557 {
558         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
559 }
560 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
561
562 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
563 {
564         return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
565 }
566 EXPORT_SYMBOL_GPL(crypto_has_ahash);
567
568 static int ahash_prepare_alg(struct ahash_alg *alg)
569 {
570         struct crypto_alg *base = &alg->halg.base;
571
572         if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
573             alg->halg.statesize > HASH_MAX_STATESIZE ||
574             alg->halg.statesize == 0)
575                 return -EINVAL;
576
577         base->cra_type = &crypto_ahash_type;
578         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
579         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
580
581         return 0;
582 }
583
584 int crypto_register_ahash(struct ahash_alg *alg)
585 {
586         struct crypto_alg *base = &alg->halg.base;
587         int err;
588
589         err = ahash_prepare_alg(alg);
590         if (err)
591                 return err;
592
593         return crypto_register_alg(base);
594 }
595 EXPORT_SYMBOL_GPL(crypto_register_ahash);
596
597 void crypto_unregister_ahash(struct ahash_alg *alg)
598 {
599         crypto_unregister_alg(&alg->halg.base);
600 }
601 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
602
603 int crypto_register_ahashes(struct ahash_alg *algs, int count)
604 {
605         int i, ret;
606
607         for (i = 0; i < count; i++) {
608                 ret = crypto_register_ahash(&algs[i]);
609                 if (ret)
610                         goto err;
611         }
612
613         return 0;
614
615 err:
616         for (--i; i >= 0; --i)
617                 crypto_unregister_ahash(&algs[i]);
618
619         return ret;
620 }
621 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
622
623 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
624 {
625         int i;
626
627         for (i = count - 1; i >= 0; --i)
628                 crypto_unregister_ahash(&algs[i]);
629 }
630 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
631
632 int ahash_register_instance(struct crypto_template *tmpl,
633                             struct ahash_instance *inst)
634 {
635         int err;
636
637         if (WARN_ON(!inst->free))
638                 return -EINVAL;
639
640         err = ahash_prepare_alg(&inst->alg);
641         if (err)
642                 return err;
643
644         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
645 }
646 EXPORT_SYMBOL_GPL(ahash_register_instance);
647
648 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
649 {
650         struct crypto_alg *alg = &halg->base;
651
652         if (alg->cra_type != &crypto_ahash_type)
653                 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
654
655         return __crypto_ahash_alg(alg)->setkey != NULL;
656 }
657 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
658
659 MODULE_LICENSE("GPL");
660 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");