MAINTAINERS: Add entry for gianfar ethernet driver
[platform/kernel/linux-exynos.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31         crypto_completion_t complete;
32         void *data;
33         u8 *result;
34         void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40                             halg);
41 }
42
43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45         unsigned int alignmask = walk->alignmask;
46         unsigned int offset = walk->offset;
47         unsigned int nbytes = min(walk->entrylen,
48                                   ((unsigned int)(PAGE_SIZE)) - offset);
49
50         if (walk->flags & CRYPTO_ALG_ASYNC)
51                 walk->data = kmap(walk->pg);
52         else
53                 walk->data = kmap_atomic(walk->pg);
54         walk->data += offset;
55
56         if (offset & alignmask) {
57                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58
59                 if (nbytes > unaligned)
60                         nbytes = unaligned;
61         }
62
63         walk->entrylen -= nbytes;
64         return nbytes;
65 }
66
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 {
69         struct scatterlist *sg;
70
71         sg = walk->sg;
72         walk->pg = sg_page(sg);
73         walk->offset = sg->offset;
74         walk->entrylen = sg->length;
75
76         if (walk->entrylen > walk->total)
77                 walk->entrylen = walk->total;
78         walk->total -= walk->entrylen;
79
80         return hash_walk_next(walk);
81 }
82
83 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
84 {
85         unsigned int alignmask = walk->alignmask;
86         unsigned int nbytes = walk->entrylen;
87
88         walk->data -= walk->offset;
89
90         if (nbytes && walk->offset & alignmask && !err) {
91                 walk->offset = ALIGN(walk->offset, alignmask + 1);
92                 walk->data += walk->offset;
93
94                 nbytes = min(nbytes,
95                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
96                 walk->entrylen -= nbytes;
97
98                 return nbytes;
99         }
100
101         if (walk->flags & CRYPTO_ALG_ASYNC)
102                 kunmap(walk->pg);
103         else {
104                 kunmap_atomic(walk->data);
105                 /*
106                  * The may sleep test only makes sense for sync users.
107                  * Async users don't need to sleep here anyway.
108                  */
109                 crypto_yield(walk->flags);
110         }
111
112         if (err)
113                 return err;
114
115         if (nbytes) {
116                 walk->offset = 0;
117                 walk->pg++;
118                 return hash_walk_next(walk);
119         }
120
121         if (!walk->total)
122                 return 0;
123
124         walk->sg = sg_next(walk->sg);
125
126         return hash_walk_new_entry(walk);
127 }
128 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
129
130 int crypto_hash_walk_first(struct ahash_request *req,
131                            struct crypto_hash_walk *walk)
132 {
133         walk->total = req->nbytes;
134
135         if (!walk->total) {
136                 walk->entrylen = 0;
137                 return 0;
138         }
139
140         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
141         walk->sg = req->src;
142         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
143
144         return hash_walk_new_entry(walk);
145 }
146 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
147
148 int crypto_ahash_walk_first(struct ahash_request *req,
149                             struct crypto_hash_walk *walk)
150 {
151         walk->total = req->nbytes;
152
153         if (!walk->total) {
154                 walk->entrylen = 0;
155                 return 0;
156         }
157
158         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
159         walk->sg = req->src;
160         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
161         walk->flags |= CRYPTO_ALG_ASYNC;
162
163         BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
164
165         return hash_walk_new_entry(walk);
166 }
167 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
168
169 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
170                                   struct crypto_hash_walk *walk,
171                                   struct scatterlist *sg, unsigned int len)
172 {
173         walk->total = len;
174
175         if (!walk->total) {
176                 walk->entrylen = 0;
177                 return 0;
178         }
179
180         walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
181         walk->sg = sg;
182         walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
183
184         return hash_walk_new_entry(walk);
185 }
186
187 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
188                                 unsigned int keylen)
189 {
190         unsigned long alignmask = crypto_ahash_alignmask(tfm);
191         int ret;
192         u8 *buffer, *alignbuffer;
193         unsigned long absize;
194
195         absize = keylen + alignmask;
196         buffer = kmalloc(absize, GFP_KERNEL);
197         if (!buffer)
198                 return -ENOMEM;
199
200         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
201         memcpy(alignbuffer, key, keylen);
202         ret = tfm->setkey(tfm, alignbuffer, keylen);
203         kzfree(buffer);
204         return ret;
205 }
206
207 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
208                         unsigned int keylen)
209 {
210         unsigned long alignmask = crypto_ahash_alignmask(tfm);
211
212         if ((unsigned long)key & alignmask)
213                 return ahash_setkey_unaligned(tfm, key, keylen);
214
215         return tfm->setkey(tfm, key, keylen);
216 }
217 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
218
219 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
220                           unsigned int keylen)
221 {
222         return -ENOSYS;
223 }
224
225 static inline unsigned int ahash_align_buffer_size(unsigned len,
226                                                    unsigned long mask)
227 {
228         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
229 }
230
231 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
232 {
233         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
234         unsigned long alignmask = crypto_ahash_alignmask(tfm);
235         unsigned int ds = crypto_ahash_digestsize(tfm);
236         struct ahash_request_priv *priv;
237
238         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
239                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
240                        GFP_KERNEL : GFP_ATOMIC);
241         if (!priv)
242                 return -ENOMEM;
243
244         /*
245          * WARNING: Voodoo programming below!
246          *
247          * The code below is obscure and hard to understand, thus explanation
248          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
249          * to understand the layout of structures used here!
250          *
251          * The code here will replace portions of the ORIGINAL request with
252          * pointers to new code and buffers so the hashing operation can store
253          * the result in aligned buffer. We will call the modified request
254          * an ADJUSTED request.
255          *
256          * The newly mangled request will look as such:
257          *
258          * req {
259          *   .result        = ADJUSTED[new aligned buffer]
260          *   .base.complete = ADJUSTED[pointer to completion function]
261          *   .base.data     = ADJUSTED[*req (pointer to self)]
262          *   .priv          = ADJUSTED[new priv] {
263          *           .result   = ORIGINAL(result)
264          *           .complete = ORIGINAL(base.complete)
265          *           .data     = ORIGINAL(base.data)
266          *   }
267          */
268
269         priv->result = req->result;
270         priv->complete = req->base.complete;
271         priv->data = req->base.data;
272         /*
273          * WARNING: We do not backup req->priv here! The req->priv
274          *          is for internal use of the Crypto API and the
275          *          user must _NOT_ _EVER_ depend on it's content!
276          */
277
278         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
279         req->base.complete = cplt;
280         req->base.data = req;
281         req->priv = priv;
282
283         return 0;
284 }
285
286 static void ahash_restore_req(struct ahash_request *req)
287 {
288         struct ahash_request_priv *priv = req->priv;
289
290         /* Restore the original crypto request. */
291         req->result = priv->result;
292         req->base.complete = priv->complete;
293         req->base.data = priv->data;
294         req->priv = NULL;
295
296         /* Free the req->priv.priv from the ADJUSTED request. */
297         kzfree(priv);
298 }
299
300 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
301 {
302         struct ahash_request_priv *priv = req->priv;
303
304         if (err == -EINPROGRESS)
305                 return;
306
307         if (!err)
308                 memcpy(priv->result, req->result,
309                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
310
311         ahash_restore_req(req);
312 }
313
314 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
315 {
316         struct ahash_request *areq = req->data;
317
318         /*
319          * Restore the original request, see ahash_op_unaligned() for what
320          * goes where.
321          *
322          * The "struct ahash_request *req" here is in fact the "req.base"
323          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
324          * is a pointer to self, it is also the ADJUSTED "req" .
325          */
326
327         /* First copy req->result into req->priv.result */
328         ahash_op_unaligned_finish(areq, err);
329
330         /* Complete the ORIGINAL request. */
331         areq->base.complete(&areq->base, err);
332 }
333
334 static int ahash_op_unaligned(struct ahash_request *req,
335                               int (*op)(struct ahash_request *))
336 {
337         int err;
338
339         err = ahash_save_req(req, ahash_op_unaligned_done);
340         if (err)
341                 return err;
342
343         err = op(req);
344         ahash_op_unaligned_finish(req, err);
345
346         return err;
347 }
348
349 static int crypto_ahash_op(struct ahash_request *req,
350                            int (*op)(struct ahash_request *))
351 {
352         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
353         unsigned long alignmask = crypto_ahash_alignmask(tfm);
354
355         if ((unsigned long)req->result & alignmask)
356                 return ahash_op_unaligned(req, op);
357
358         return op(req);
359 }
360
361 int crypto_ahash_final(struct ahash_request *req)
362 {
363         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
364 }
365 EXPORT_SYMBOL_GPL(crypto_ahash_final);
366
367 int crypto_ahash_finup(struct ahash_request *req)
368 {
369         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
370 }
371 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
372
373 int crypto_ahash_digest(struct ahash_request *req)
374 {
375         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
376 }
377 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
378
379 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
380 {
381         struct ahash_request_priv *priv = req->priv;
382
383         if (err == -EINPROGRESS)
384                 return;
385
386         if (!err)
387                 memcpy(priv->result, req->result,
388                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
389
390         ahash_restore_req(req);
391 }
392
393 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
394 {
395         struct ahash_request *areq = req->data;
396
397         ahash_def_finup_finish2(areq, err);
398
399         areq->base.complete(&areq->base, err);
400 }
401
402 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
403 {
404         if (err)
405                 goto out;
406
407         req->base.complete = ahash_def_finup_done2;
408         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409         err = crypto_ahash_reqtfm(req)->final(req);
410
411 out:
412         ahash_def_finup_finish2(req, err);
413         return err;
414 }
415
416 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
417 {
418         struct ahash_request *areq = req->data;
419
420         err = ahash_def_finup_finish1(areq, err);
421
422         areq->base.complete(&areq->base, err);
423 }
424
425 static int ahash_def_finup(struct ahash_request *req)
426 {
427         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
428         int err;
429
430         err = ahash_save_req(req, ahash_def_finup_done1);
431         if (err)
432                 return err;
433
434         err = tfm->update(req);
435         return ahash_def_finup_finish1(req, err);
436 }
437
438 static int ahash_no_export(struct ahash_request *req, void *out)
439 {
440         return -ENOSYS;
441 }
442
443 static int ahash_no_import(struct ahash_request *req, const void *in)
444 {
445         return -ENOSYS;
446 }
447
448 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
449 {
450         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
451         struct ahash_alg *alg = crypto_ahash_alg(hash);
452
453         hash->setkey = ahash_nosetkey;
454         hash->export = ahash_no_export;
455         hash->import = ahash_no_import;
456
457         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
458                 return crypto_init_shash_ops_async(tfm);
459
460         hash->init = alg->init;
461         hash->update = alg->update;
462         hash->final = alg->final;
463         hash->finup = alg->finup ?: ahash_def_finup;
464         hash->digest = alg->digest;
465
466         if (alg->setkey)
467                 hash->setkey = alg->setkey;
468         if (alg->export)
469                 hash->export = alg->export;
470         if (alg->import)
471                 hash->import = alg->import;
472
473         return 0;
474 }
475
476 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
477 {
478         if (alg->cra_type == &crypto_ahash_type)
479                 return alg->cra_ctxsize;
480
481         return sizeof(struct crypto_shash *);
482 }
483
484 #ifdef CONFIG_NET
485 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
486 {
487         struct crypto_report_hash rhash;
488
489         strncpy(rhash.type, "ahash", sizeof(rhash.type));
490
491         rhash.blocksize = alg->cra_blocksize;
492         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
493
494         if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
495                     sizeof(struct crypto_report_hash), &rhash))
496                 goto nla_put_failure;
497         return 0;
498
499 nla_put_failure:
500         return -EMSGSIZE;
501 }
502 #else
503 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
504 {
505         return -ENOSYS;
506 }
507 #endif
508
509 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
510         __attribute__ ((unused));
511 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
512 {
513         seq_printf(m, "type         : ahash\n");
514         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
515                                              "yes" : "no");
516         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
517         seq_printf(m, "digestsize   : %u\n",
518                    __crypto_hash_alg_common(alg)->digestsize);
519 }
520
521 const struct crypto_type crypto_ahash_type = {
522         .extsize = crypto_ahash_extsize,
523         .init_tfm = crypto_ahash_init_tfm,
524 #ifdef CONFIG_PROC_FS
525         .show = crypto_ahash_show,
526 #endif
527         .report = crypto_ahash_report,
528         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
529         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
530         .type = CRYPTO_ALG_TYPE_AHASH,
531         .tfmsize = offsetof(struct crypto_ahash, base),
532 };
533 EXPORT_SYMBOL_GPL(crypto_ahash_type);
534
535 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
536                                         u32 mask)
537 {
538         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
539 }
540 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
541
542 static int ahash_prepare_alg(struct ahash_alg *alg)
543 {
544         struct crypto_alg *base = &alg->halg.base;
545
546         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
547             alg->halg.statesize > PAGE_SIZE / 8 ||
548             alg->halg.statesize == 0)
549                 return -EINVAL;
550
551         base->cra_type = &crypto_ahash_type;
552         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
553         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
554
555         return 0;
556 }
557
558 int crypto_register_ahash(struct ahash_alg *alg)
559 {
560         struct crypto_alg *base = &alg->halg.base;
561         int err;
562
563         err = ahash_prepare_alg(alg);
564         if (err)
565                 return err;
566
567         return crypto_register_alg(base);
568 }
569 EXPORT_SYMBOL_GPL(crypto_register_ahash);
570
571 int crypto_unregister_ahash(struct ahash_alg *alg)
572 {
573         return crypto_unregister_alg(&alg->halg.base);
574 }
575 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
576
577 int ahash_register_instance(struct crypto_template *tmpl,
578                             struct ahash_instance *inst)
579 {
580         int err;
581
582         err = ahash_prepare_alg(&inst->alg);
583         if (err)
584                 return err;
585
586         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
587 }
588 EXPORT_SYMBOL_GPL(ahash_register_instance);
589
590 void ahash_free_instance(struct crypto_instance *inst)
591 {
592         crypto_drop_spawn(crypto_instance_ctx(inst));
593         kfree(ahash_instance(inst));
594 }
595 EXPORT_SYMBOL_GPL(ahash_free_instance);
596
597 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
598                             struct hash_alg_common *alg,
599                             struct crypto_instance *inst)
600 {
601         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
602                                   &crypto_ahash_type);
603 }
604 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
605
606 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
607 {
608         struct crypto_alg *alg;
609
610         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
611         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
612 }
613 EXPORT_SYMBOL_GPL(ahash_attr_alg);
614
615 MODULE_LICENSE("GPL");
616 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");