Merge remote-tracking branch 'regulator/fix/core' into regulator-linus
[platform/kernel/linux-rpi.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31         crypto_completion_t complete;
32         void *data;
33         u8 *result;
34         void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40                             halg);
41 }
42
43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45         unsigned int alignmask = walk->alignmask;
46         unsigned int offset = walk->offset;
47         unsigned int nbytes = min(walk->entrylen,
48                                   ((unsigned int)(PAGE_SIZE)) - offset);
49
50         if (walk->flags & CRYPTO_ALG_ASYNC)
51                 walk->data = kmap(walk->pg);
52         else
53                 walk->data = kmap_atomic(walk->pg);
54         walk->data += offset;
55
56         if (offset & alignmask) {
57                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58                 if (nbytes > unaligned)
59                         nbytes = unaligned;
60         }
61
62         walk->entrylen -= nbytes;
63         return nbytes;
64 }
65
66 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
67 {
68         struct scatterlist *sg;
69
70         sg = walk->sg;
71         walk->pg = sg_page(sg);
72         walk->offset = sg->offset;
73         walk->entrylen = sg->length;
74
75         if (walk->entrylen > walk->total)
76                 walk->entrylen = walk->total;
77         walk->total -= walk->entrylen;
78
79         return hash_walk_next(walk);
80 }
81
82 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
83 {
84         unsigned int alignmask = walk->alignmask;
85         unsigned int nbytes = walk->entrylen;
86
87         walk->data -= walk->offset;
88
89         if (nbytes && walk->offset & alignmask && !err) {
90                 walk->offset = ALIGN(walk->offset, alignmask + 1);
91                 walk->data += walk->offset;
92
93                 nbytes = min(nbytes,
94                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
95                 walk->entrylen -= nbytes;
96
97                 return nbytes;
98         }
99
100         if (walk->flags & CRYPTO_ALG_ASYNC)
101                 kunmap(walk->pg);
102         else {
103                 kunmap_atomic(walk->data);
104                 /*
105                  * The may sleep test only makes sense for sync users.
106                  * Async users don't need to sleep here anyway.
107                  */
108                 crypto_yield(walk->flags);
109         }
110
111         if (err)
112                 return err;
113
114         if (nbytes) {
115                 walk->offset = 0;
116                 walk->pg++;
117                 return hash_walk_next(walk);
118         }
119
120         if (!walk->total)
121                 return 0;
122
123         walk->sg = scatterwalk_sg_next(walk->sg);
124
125         return hash_walk_new_entry(walk);
126 }
127 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
128
129 int crypto_hash_walk_first(struct ahash_request *req,
130                            struct crypto_hash_walk *walk)
131 {
132         walk->total = req->nbytes;
133
134         if (!walk->total) {
135                 walk->entrylen = 0;
136                 return 0;
137         }
138
139         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
140         walk->sg = req->src;
141         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
142
143         return hash_walk_new_entry(walk);
144 }
145 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
146
147 int crypto_ahash_walk_first(struct ahash_request *req,
148                             struct crypto_hash_walk *walk)
149 {
150         walk->total = req->nbytes;
151
152         if (!walk->total) {
153                 walk->entrylen = 0;
154                 return 0;
155         }
156
157         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
158         walk->sg = req->src;
159         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
160         walk->flags |= CRYPTO_ALG_ASYNC;
161
162         BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
163
164         return hash_walk_new_entry(walk);
165 }
166 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
167
168 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
169                                   struct crypto_hash_walk *walk,
170                                   struct scatterlist *sg, unsigned int len)
171 {
172         walk->total = len;
173
174         if (!walk->total) {
175                 walk->entrylen = 0;
176                 return 0;
177         }
178
179         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
180         walk->sg = sg;
181         walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
182
183         return hash_walk_new_entry(walk);
184 }
185
186 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
187                                 unsigned int keylen)
188 {
189         unsigned long alignmask = crypto_ahash_alignmask(tfm);
190         int ret;
191         u8 *buffer, *alignbuffer;
192         unsigned long absize;
193
194         absize = keylen + alignmask;
195         buffer = kmalloc(absize, GFP_KERNEL);
196         if (!buffer)
197                 return -ENOMEM;
198
199         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
200         memcpy(alignbuffer, key, keylen);
201         ret = tfm->setkey(tfm, alignbuffer, keylen);
202         kzfree(buffer);
203         return ret;
204 }
205
206 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
207                         unsigned int keylen)
208 {
209         unsigned long alignmask = crypto_ahash_alignmask(tfm);
210
211         if ((unsigned long)key & alignmask)
212                 return ahash_setkey_unaligned(tfm, key, keylen);
213
214         return tfm->setkey(tfm, key, keylen);
215 }
216 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
217
218 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
219                           unsigned int keylen)
220 {
221         return -ENOSYS;
222 }
223
224 static inline unsigned int ahash_align_buffer_size(unsigned len,
225                                                    unsigned long mask)
226 {
227         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
228 }
229
230 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
231 {
232         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
233         unsigned long alignmask = crypto_ahash_alignmask(tfm);
234         unsigned int ds = crypto_ahash_digestsize(tfm);
235         struct ahash_request_priv *priv;
236
237         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
238                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
239                        GFP_KERNEL : GFP_ATOMIC);
240         if (!priv)
241                 return -ENOMEM;
242
243         /*
244          * WARNING: Voodoo programming below!
245          *
246          * The code below is obscure and hard to understand, thus explanation
247          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
248          * to understand the layout of structures used here!
249          *
250          * The code here will replace portions of the ORIGINAL request with
251          * pointers to new code and buffers so the hashing operation can store
252          * the result in aligned buffer. We will call the modified request
253          * an ADJUSTED request.
254          *
255          * The newly mangled request will look as such:
256          *
257          * req {
258          *   .result        = ADJUSTED[new aligned buffer]
259          *   .base.complete = ADJUSTED[pointer to completion function]
260          *   .base.data     = ADJUSTED[*req (pointer to self)]
261          *   .priv          = ADJUSTED[new priv] {
262          *           .result   = ORIGINAL(result)
263          *           .complete = ORIGINAL(base.complete)
264          *           .data     = ORIGINAL(base.data)
265          *   }
266          */
267
268         priv->result = req->result;
269         priv->complete = req->base.complete;
270         priv->data = req->base.data;
271         /*
272          * WARNING: We do not backup req->priv here! The req->priv
273          *          is for internal use of the Crypto API and the
274          *          user must _NOT_ _EVER_ depend on it's content!
275          */
276
277         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
278         req->base.complete = cplt;
279         req->base.data = req;
280         req->priv = priv;
281
282         return 0;
283 }
284
285 static void ahash_restore_req(struct ahash_request *req)
286 {
287         struct ahash_request_priv *priv = req->priv;
288
289         /* Restore the original crypto request. */
290         req->result = priv->result;
291         req->base.complete = priv->complete;
292         req->base.data = priv->data;
293         req->priv = NULL;
294
295         /* Free the req->priv.priv from the ADJUSTED request. */
296         kzfree(priv);
297 }
298
299 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
300 {
301         struct ahash_request_priv *priv = req->priv;
302
303         if (err == -EINPROGRESS)
304                 return;
305
306         if (!err)
307                 memcpy(priv->result, req->result,
308                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
309
310         ahash_restore_req(req);
311 }
312
313 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
314 {
315         struct ahash_request *areq = req->data;
316
317         /*
318          * Restore the original request, see ahash_op_unaligned() for what
319          * goes where.
320          *
321          * The "struct ahash_request *req" here is in fact the "req.base"
322          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
323          * is a pointer to self, it is also the ADJUSTED "req" .
324          */
325
326         /* First copy req->result into req->priv.result */
327         ahash_op_unaligned_finish(areq, err);
328
329         /* Complete the ORIGINAL request. */
330         areq->base.complete(&areq->base, err);
331 }
332
333 static int ahash_op_unaligned(struct ahash_request *req,
334                               int (*op)(struct ahash_request *))
335 {
336         int err;
337
338         err = ahash_save_req(req, ahash_op_unaligned_done);
339         if (err)
340                 return err;
341
342         err = op(req);
343         ahash_op_unaligned_finish(req, err);
344
345         return err;
346 }
347
348 static int crypto_ahash_op(struct ahash_request *req,
349                            int (*op)(struct ahash_request *))
350 {
351         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
352         unsigned long alignmask = crypto_ahash_alignmask(tfm);
353
354         if ((unsigned long)req->result & alignmask)
355                 return ahash_op_unaligned(req, op);
356
357         return op(req);
358 }
359
360 int crypto_ahash_final(struct ahash_request *req)
361 {
362         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
363 }
364 EXPORT_SYMBOL_GPL(crypto_ahash_final);
365
366 int crypto_ahash_finup(struct ahash_request *req)
367 {
368         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
369 }
370 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
371
372 int crypto_ahash_digest(struct ahash_request *req)
373 {
374         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
375 }
376 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
377
378 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
379 {
380         struct ahash_request_priv *priv = req->priv;
381
382         if (err == -EINPROGRESS)
383                 return;
384
385         if (!err)
386                 memcpy(priv->result, req->result,
387                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
388
389         ahash_restore_req(req);
390 }
391
392 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
393 {
394         struct ahash_request *areq = req->data;
395
396         ahash_def_finup_finish2(areq, err);
397
398         areq->base.complete(&areq->base, err);
399 }
400
401 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
402 {
403         if (err)
404                 goto out;
405
406         req->base.complete = ahash_def_finup_done2;
407         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
408         err = crypto_ahash_reqtfm(req)->final(req);
409
410 out:
411         ahash_def_finup_finish2(req, err);
412         return err;
413 }
414
415 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
416 {
417         struct ahash_request *areq = req->data;
418
419         err = ahash_def_finup_finish1(areq, err);
420
421         areq->base.complete(&areq->base, err);
422 }
423
424 static int ahash_def_finup(struct ahash_request *req)
425 {
426         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
427         int err;
428
429         err = ahash_save_req(req, ahash_def_finup_done1);
430         if (err)
431                 return err;
432
433         err = tfm->update(req);
434         return ahash_def_finup_finish1(req, err);
435 }
436
437 static int ahash_no_export(struct ahash_request *req, void *out)
438 {
439         return -ENOSYS;
440 }
441
442 static int ahash_no_import(struct ahash_request *req, const void *in)
443 {
444         return -ENOSYS;
445 }
446
447 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
448 {
449         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450         struct ahash_alg *alg = crypto_ahash_alg(hash);
451
452         hash->setkey = ahash_nosetkey;
453         hash->export = ahash_no_export;
454         hash->import = ahash_no_import;
455
456         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
457                 return crypto_init_shash_ops_async(tfm);
458
459         hash->init = alg->init;
460         hash->update = alg->update;
461         hash->final = alg->final;
462         hash->finup = alg->finup ?: ahash_def_finup;
463         hash->digest = alg->digest;
464
465         if (alg->setkey)
466                 hash->setkey = alg->setkey;
467         if (alg->export)
468                 hash->export = alg->export;
469         if (alg->import)
470                 hash->import = alg->import;
471
472         return 0;
473 }
474
475 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
476 {
477         if (alg->cra_type == &crypto_ahash_type)
478                 return alg->cra_ctxsize;
479
480         return sizeof(struct crypto_shash *);
481 }
482
483 #ifdef CONFIG_NET
484 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
485 {
486         struct crypto_report_hash rhash;
487
488         strncpy(rhash.type, "ahash", sizeof(rhash.type));
489
490         rhash.blocksize = alg->cra_blocksize;
491         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
492
493         if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
494                     sizeof(struct crypto_report_hash), &rhash))
495                 goto nla_put_failure;
496         return 0;
497
498 nla_put_failure:
499         return -EMSGSIZE;
500 }
501 #else
502 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
503 {
504         return -ENOSYS;
505 }
506 #endif
507
508 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
509         __attribute__ ((unused));
510 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
511 {
512         seq_printf(m, "type         : ahash\n");
513         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
514                                              "yes" : "no");
515         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
516         seq_printf(m, "digestsize   : %u\n",
517                    __crypto_hash_alg_common(alg)->digestsize);
518 }
519
520 const struct crypto_type crypto_ahash_type = {
521         .extsize = crypto_ahash_extsize,
522         .init_tfm = crypto_ahash_init_tfm,
523 #ifdef CONFIG_PROC_FS
524         .show = crypto_ahash_show,
525 #endif
526         .report = crypto_ahash_report,
527         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
528         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
529         .type = CRYPTO_ALG_TYPE_AHASH,
530         .tfmsize = offsetof(struct crypto_ahash, base),
531 };
532 EXPORT_SYMBOL_GPL(crypto_ahash_type);
533
534 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
535                                         u32 mask)
536 {
537         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
538 }
539 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
540
541 static int ahash_prepare_alg(struct ahash_alg *alg)
542 {
543         struct crypto_alg *base = &alg->halg.base;
544
545         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
546             alg->halg.statesize > PAGE_SIZE / 8)
547                 return -EINVAL;
548
549         base->cra_type = &crypto_ahash_type;
550         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
551         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
552
553         return 0;
554 }
555
556 int crypto_register_ahash(struct ahash_alg *alg)
557 {
558         struct crypto_alg *base = &alg->halg.base;
559         int err;
560
561         err = ahash_prepare_alg(alg);
562         if (err)
563                 return err;
564
565         return crypto_register_alg(base);
566 }
567 EXPORT_SYMBOL_GPL(crypto_register_ahash);
568
569 int crypto_unregister_ahash(struct ahash_alg *alg)
570 {
571         return crypto_unregister_alg(&alg->halg.base);
572 }
573 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
574
575 int ahash_register_instance(struct crypto_template *tmpl,
576                             struct ahash_instance *inst)
577 {
578         int err;
579
580         err = ahash_prepare_alg(&inst->alg);
581         if (err)
582                 return err;
583
584         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
585 }
586 EXPORT_SYMBOL_GPL(ahash_register_instance);
587
588 void ahash_free_instance(struct crypto_instance *inst)
589 {
590         crypto_drop_spawn(crypto_instance_ctx(inst));
591         kfree(ahash_instance(inst));
592 }
593 EXPORT_SYMBOL_GPL(ahash_free_instance);
594
595 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
596                             struct hash_alg_common *alg,
597                             struct crypto_instance *inst)
598 {
599         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
600                                   &crypto_ahash_type);
601 }
602 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
603
604 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
605 {
606         struct crypto_alg *alg;
607
608         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
609         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
610 }
611 EXPORT_SYMBOL_GPL(ahash_attr_alg);
612
613 MODULE_LICENSE("GPL");
614 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");