1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Asynchronous Cryptographic Hash operations.
5 * This is the asynchronous version of hash.c with notification of
6 * completion via a callback.
8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/seq_file.h>
19 #include <linux/cryptouser.h>
20 #include <linux/compiler.h>
21 #include <net/netlink.h>
25 static const struct crypto_type crypto_ahash_type;
27 struct ahash_request_priv {
28 crypto_completion_t complete;
32 void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 static int hash_walk_next(struct crypto_hash_walk *walk)
43 unsigned int alignmask = walk->alignmask;
44 unsigned int offset = walk->offset;
45 unsigned int nbytes = min(walk->entrylen,
46 ((unsigned int)(PAGE_SIZE)) - offset);
48 walk->data = kmap_atomic(walk->pg);
51 if (offset & alignmask) {
52 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54 if (nbytes > unaligned)
58 walk->entrylen -= nbytes;
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
64 struct scatterlist *sg;
67 walk->offset = sg->offset;
68 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
69 walk->offset = offset_in_page(walk->offset);
70 walk->entrylen = sg->length;
72 if (walk->entrylen > walk->total)
73 walk->entrylen = walk->total;
74 walk->total -= walk->entrylen;
76 return hash_walk_next(walk);
79 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
81 unsigned int alignmask = walk->alignmask;
83 walk->data -= walk->offset;
85 if (walk->entrylen && (walk->offset & alignmask) && !err) {
88 walk->offset = ALIGN(walk->offset, alignmask + 1);
89 nbytes = min(walk->entrylen,
90 (unsigned int)(PAGE_SIZE - walk->offset));
92 walk->entrylen -= nbytes;
93 walk->data += walk->offset;
98 kunmap_atomic(walk->data);
99 crypto_yield(walk->flags);
104 if (walk->entrylen) {
107 return hash_walk_next(walk);
113 walk->sg = sg_next(walk->sg);
115 return hash_walk_new_entry(walk);
117 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
119 int crypto_hash_walk_first(struct ahash_request *req,
120 struct crypto_hash_walk *walk)
122 walk->total = req->nbytes;
129 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
131 walk->flags = req->base.flags;
133 return hash_walk_new_entry(walk);
135 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
137 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
140 unsigned long alignmask = crypto_ahash_alignmask(tfm);
142 u8 *buffer, *alignbuffer;
143 unsigned long absize;
145 absize = keylen + alignmask;
146 buffer = kmalloc(absize, GFP_KERNEL);
150 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
151 memcpy(alignbuffer, key, keylen);
152 ret = tfm->setkey(tfm, alignbuffer, keylen);
153 kfree_sensitive(buffer);
157 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
163 static void ahash_set_needkey(struct crypto_ahash *tfm)
165 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
167 if (tfm->setkey != ahash_nosetkey &&
168 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
169 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
172 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
175 unsigned long alignmask = crypto_ahash_alignmask(tfm);
178 if ((unsigned long)key & alignmask)
179 err = ahash_setkey_unaligned(tfm, key, keylen);
181 err = tfm->setkey(tfm, key, keylen);
184 ahash_set_needkey(tfm);
188 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
191 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
193 static inline unsigned int ahash_align_buffer_size(unsigned len,
196 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
199 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
201 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
202 unsigned long alignmask = crypto_ahash_alignmask(tfm);
203 unsigned int ds = crypto_ahash_digestsize(tfm);
204 struct ahash_request_priv *priv;
206 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
207 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
208 GFP_KERNEL : GFP_ATOMIC);
213 * WARNING: Voodoo programming below!
215 * The code below is obscure and hard to understand, thus explanation
216 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
217 * to understand the layout of structures used here!
219 * The code here will replace portions of the ORIGINAL request with
220 * pointers to new code and buffers so the hashing operation can store
221 * the result in aligned buffer. We will call the modified request
222 * an ADJUSTED request.
224 * The newly mangled request will look as such:
227 * .result = ADJUSTED[new aligned buffer]
228 * .base.complete = ADJUSTED[pointer to completion function]
229 * .base.data = ADJUSTED[*req (pointer to self)]
230 * .priv = ADJUSTED[new priv] {
231 * .result = ORIGINAL(result)
232 * .complete = ORIGINAL(base.complete)
233 * .data = ORIGINAL(base.data)
237 priv->result = req->result;
238 priv->complete = req->base.complete;
239 priv->data = req->base.data;
240 priv->flags = req->base.flags;
243 * WARNING: We do not backup req->priv here! The req->priv
244 * is for internal use of the Crypto API and the
245 * user must _NOT_ _EVER_ depend on it's content!
248 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
249 req->base.complete = cplt;
250 req->base.data = req;
256 static void ahash_restore_req(struct ahash_request *req, int err)
258 struct ahash_request_priv *priv = req->priv;
261 memcpy(priv->result, req->result,
262 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
264 /* Restore the original crypto request. */
265 req->result = priv->result;
267 ahash_request_set_callback(req, priv->flags,
268 priv->complete, priv->data);
271 /* Free the req->priv.priv from the ADJUSTED request. */
272 kfree_sensitive(priv);
275 static void ahash_notify_einprogress(struct ahash_request *req)
277 struct ahash_request_priv *priv = req->priv;
278 struct crypto_async_request oreq;
280 oreq.data = priv->data;
282 priv->complete(&oreq, -EINPROGRESS);
285 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
287 struct ahash_request *areq = req->data;
289 if (err == -EINPROGRESS) {
290 ahash_notify_einprogress(areq);
295 * Restore the original request, see ahash_op_unaligned() for what
298 * The "struct ahash_request *req" here is in fact the "req.base"
299 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
300 * is a pointer to self, it is also the ADJUSTED "req" .
303 /* First copy req->result into req->priv.result */
304 ahash_restore_req(areq, err);
306 /* Complete the ORIGINAL request. */
307 areq->base.complete(&areq->base, err);
310 static int ahash_op_unaligned(struct ahash_request *req,
311 int (*op)(struct ahash_request *))
315 err = ahash_save_req(req, ahash_op_unaligned_done);
320 if (err == -EINPROGRESS || err == -EBUSY)
323 ahash_restore_req(req, err);
328 static int crypto_ahash_op(struct ahash_request *req,
329 int (*op)(struct ahash_request *))
331 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
332 unsigned long alignmask = crypto_ahash_alignmask(tfm);
334 if ((unsigned long)req->result & alignmask)
335 return ahash_op_unaligned(req, op);
340 int crypto_ahash_final(struct ahash_request *req)
342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343 struct crypto_alg *alg = tfm->base.__crt_alg;
344 unsigned int nbytes = req->nbytes;
347 crypto_stats_get(alg);
348 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
349 crypto_stats_ahash_final(nbytes, ret, alg);
352 EXPORT_SYMBOL_GPL(crypto_ahash_final);
354 int crypto_ahash_finup(struct ahash_request *req)
356 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357 struct crypto_alg *alg = tfm->base.__crt_alg;
358 unsigned int nbytes = req->nbytes;
361 crypto_stats_get(alg);
362 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
363 crypto_stats_ahash_final(nbytes, ret, alg);
366 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
368 int crypto_ahash_digest(struct ahash_request *req)
370 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
371 struct crypto_alg *alg = tfm->base.__crt_alg;
372 unsigned int nbytes = req->nbytes;
375 crypto_stats_get(alg);
376 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
379 ret = crypto_ahash_op(req, tfm->digest);
380 crypto_stats_ahash_final(nbytes, ret, alg);
383 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
385 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
387 struct ahash_request *areq = req->data;
389 if (err == -EINPROGRESS)
392 ahash_restore_req(areq, err);
394 areq->base.complete(&areq->base, err);
397 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
402 req->base.complete = ahash_def_finup_done2;
404 err = crypto_ahash_reqtfm(req)->final(req);
405 if (err == -EINPROGRESS || err == -EBUSY)
409 ahash_restore_req(req, err);
413 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
415 struct ahash_request *areq = req->data;
417 if (err == -EINPROGRESS) {
418 ahash_notify_einprogress(areq);
422 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
424 err = ahash_def_finup_finish1(areq, err);
428 areq->base.complete(&areq->base, err);
431 static int ahash_def_finup(struct ahash_request *req)
433 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
436 err = ahash_save_req(req, ahash_def_finup_done1);
440 err = tfm->update(req);
441 if (err == -EINPROGRESS || err == -EBUSY)
444 return ahash_def_finup_finish1(req, err);
447 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
449 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450 struct ahash_alg *alg = crypto_ahash_alg(hash);
455 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
457 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
458 struct ahash_alg *alg = crypto_ahash_alg(hash);
460 hash->setkey = ahash_nosetkey;
462 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
463 return crypto_init_shash_ops_async(tfm);
465 hash->init = alg->init;
466 hash->update = alg->update;
467 hash->final = alg->final;
468 hash->finup = alg->finup ?: ahash_def_finup;
469 hash->digest = alg->digest;
470 hash->export = alg->export;
471 hash->import = alg->import;
474 hash->setkey = alg->setkey;
475 ahash_set_needkey(hash);
479 tfm->exit = crypto_ahash_exit_tfm;
481 return alg->init_tfm ? alg->init_tfm(hash) : 0;
484 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
486 if (alg->cra_type != &crypto_ahash_type)
487 return sizeof(struct crypto_shash *);
489 return crypto_alg_extsize(alg);
492 static void crypto_ahash_free_instance(struct crypto_instance *inst)
494 struct ahash_instance *ahash = ahash_instance(inst);
500 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
502 struct crypto_report_hash rhash;
504 memset(&rhash, 0, sizeof(rhash));
506 strscpy(rhash.type, "ahash", sizeof(rhash.type));
508 rhash.blocksize = alg->cra_blocksize;
509 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
511 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
514 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
520 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
522 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
524 seq_printf(m, "type : ahash\n");
525 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
527 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
528 seq_printf(m, "digestsize : %u\n",
529 __crypto_hash_alg_common(alg)->digestsize);
532 static const struct crypto_type crypto_ahash_type = {
533 .extsize = crypto_ahash_extsize,
534 .init_tfm = crypto_ahash_init_tfm,
535 .free = crypto_ahash_free_instance,
536 #ifdef CONFIG_PROC_FS
537 .show = crypto_ahash_show,
539 .report = crypto_ahash_report,
540 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
541 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
542 .type = CRYPTO_ALG_TYPE_AHASH,
543 .tfmsize = offsetof(struct crypto_ahash, base),
546 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
547 struct crypto_instance *inst,
548 const char *name, u32 type, u32 mask)
550 spawn->base.frontend = &crypto_ahash_type;
551 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
553 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
555 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
558 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
560 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
562 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
564 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
566 EXPORT_SYMBOL_GPL(crypto_has_ahash);
568 static int ahash_prepare_alg(struct ahash_alg *alg)
570 struct crypto_alg *base = &alg->halg.base;
572 if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
573 alg->halg.statesize > HASH_MAX_STATESIZE ||
574 alg->halg.statesize == 0)
577 base->cra_type = &crypto_ahash_type;
578 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
579 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
584 int crypto_register_ahash(struct ahash_alg *alg)
586 struct crypto_alg *base = &alg->halg.base;
589 err = ahash_prepare_alg(alg);
593 return crypto_register_alg(base);
595 EXPORT_SYMBOL_GPL(crypto_register_ahash);
597 void crypto_unregister_ahash(struct ahash_alg *alg)
599 crypto_unregister_alg(&alg->halg.base);
601 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
603 int crypto_register_ahashes(struct ahash_alg *algs, int count)
607 for (i = 0; i < count; i++) {
608 ret = crypto_register_ahash(&algs[i]);
616 for (--i; i >= 0; --i)
617 crypto_unregister_ahash(&algs[i]);
621 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
623 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
627 for (i = count - 1; i >= 0; --i)
628 crypto_unregister_ahash(&algs[i]);
630 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
632 int ahash_register_instance(struct crypto_template *tmpl,
633 struct ahash_instance *inst)
637 if (WARN_ON(!inst->free))
640 err = ahash_prepare_alg(&inst->alg);
644 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
646 EXPORT_SYMBOL_GPL(ahash_register_instance);
648 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
650 struct crypto_alg *alg = &halg->base;
652 if (alg->cra_type != &crypto_ahash_type)
653 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
655 return __crypto_ahash_alg(alg)->setkey != NULL;
657 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
659 MODULE_LICENSE("GPL");
660 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");