2 * Multi buffer SHA256 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2016 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
25 * Copyright(c) 2016 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha256_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha256_mb_alg_state;
76 struct sha256_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
83 struct ahash_request *areq;
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
92 return container_of((void *) ctx, struct ahash_request, __ctx);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
98 rctx->flag = HASH_UPDATE;
101 static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
102 static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
103 (struct sha256_mb_mgr *state, struct job_sha256 *job);
104 static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
105 (struct sha256_mb_mgr *state);
106 static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state);
109 inline void sha256_init_digest(uint32_t *digest)
111 static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
112 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
113 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
114 memcpy(digest, initial_digest, sizeof(initial_digest));
117 inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
120 uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
122 memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
125 i += ((SHA256_BLOCK_SIZE - 1) &
126 (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
127 + 1 + SHA256_PADLENGTHFIELD_SIZE;
129 #if SHA256_PADLENGTHFIELD_SIZE == 16
130 *((uint64_t *) &padblock[i - 16]) = 0;
133 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
135 /* Number of extra blocks to hash */
136 return i >> SHA256_LOG2_BLOCK_SIZE;
139 static struct sha256_hash_ctx
140 *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
141 struct sha256_hash_ctx *ctx)
144 if (ctx->status & HASH_CTX_STS_COMPLETE) {
145 /* Clear PROCESSING bit */
146 ctx->status = HASH_CTX_STS_COMPLETE;
151 * If the extra blocks are empty, begin hashing what remains
152 * in the user's buffer.
154 if (ctx->partial_block_buffer_length == 0 &&
155 ctx->incoming_buffer_length) {
157 const void *buffer = ctx->incoming_buffer;
158 uint32_t len = ctx->incoming_buffer_length;
162 * Only entire blocks can be hashed.
163 * Copy remainder to extra blocks buffer.
165 copy_len = len & (SHA256_BLOCK_SIZE-1);
169 memcpy(ctx->partial_block_buffer,
170 ((const char *) buffer + len),
172 ctx->partial_block_buffer_length = copy_len;
175 ctx->incoming_buffer_length = 0;
177 /* len should be a multiple of the block size now */
178 assert((len % SHA256_BLOCK_SIZE) == 0);
180 /* Set len to the number of blocks to be hashed */
181 len >>= SHA256_LOG2_BLOCK_SIZE;
185 ctx->job.buffer = (uint8_t *) buffer;
187 ctx = (struct sha256_hash_ctx *)
188 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
194 * If the extra blocks are not empty, then we are
195 * either on the last block(s) or we need more
196 * user input before continuing.
198 if (ctx->status & HASH_CTX_STS_LAST) {
200 uint8_t *buf = ctx->partial_block_buffer;
201 uint32_t n_extra_blocks =
202 sha256_pad(buf, ctx->total_length);
204 ctx->status = (HASH_CTX_STS_PROCESSING |
205 HASH_CTX_STS_COMPLETE);
206 ctx->job.buffer = buf;
207 ctx->job.len = (uint32_t) n_extra_blocks;
208 ctx = (struct sha256_hash_ctx *)
209 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
213 ctx->status = HASH_CTX_STS_IDLE;
220 static struct sha256_hash_ctx
221 *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
224 * If get_comp_job returns NULL, there are no jobs complete.
225 * If get_comp_job returns a job, verify that it is safe to return to
226 * the user. If it is not ready, resubmit the job to finish processing.
227 * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
228 * returned. Otherwise, all jobs currently being managed by the
229 * hash_ctx_mgr still need processing.
231 struct sha256_hash_ctx *ctx;
233 ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
234 return sha256_ctx_mgr_resubmit(mgr, ctx);
237 static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
239 sha256_job_mgr_init(&mgr->mgr);
242 static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
243 struct sha256_hash_ctx *ctx,
248 if (flags & (~HASH_ENTIRE)) {
249 /* User should not pass anything other than FIRST, UPDATE
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
256 if (ctx->status & HASH_CTX_STS_PROCESSING) {
257 /* Cannot submit to a currently processing job. */
258 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
263 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
268 if (flags & HASH_FIRST) {
270 sha256_init_digest(ctx->job.result_digest);
272 /* Reset byte counter */
273 ctx->total_length = 0;
275 /* Clear extra blocks */
276 ctx->partial_block_buffer_length = 0;
279 /* If we made it here, there was no error during this call to submit */
280 ctx->error = HASH_CTX_ERROR_NONE;
282 /* Store buffer ptr info from user */
283 ctx->incoming_buffer = buffer;
284 ctx->incoming_buffer_length = len;
287 * Store the user's request flags and mark this ctx as currently
290 ctx->status = (flags & HASH_LAST) ?
291 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
292 HASH_CTX_STS_PROCESSING;
294 /* Advance byte counter */
295 ctx->total_length += len;
298 * If there is anything currently buffered in the extra blocks,
299 * append to it until it contains a whole block.
300 * Or if the user's buffer contains less than a whole block,
301 * append as much as possible to the extra block.
303 if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
305 * Compute how many bytes to copy from user buffer into
308 uint32_t copy_len = SHA256_BLOCK_SIZE -
309 ctx->partial_block_buffer_length;
314 /* Copy and update relevant pointers and counters */
316 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
319 ctx->partial_block_buffer_length += copy_len;
320 ctx->incoming_buffer = (const void *)
321 ((const char *)buffer + copy_len);
322 ctx->incoming_buffer_length = len - copy_len;
325 /* The extra block should never contain more than 1 block */
326 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
329 * If the extra block buffer contains exactly 1 block,
332 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
333 ctx->partial_block_buffer_length = 0;
335 ctx->job.buffer = ctx->partial_block_buffer;
337 ctx = (struct sha256_hash_ctx *)
338 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
342 return sha256_ctx_mgr_resubmit(mgr, ctx);
345 static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
347 struct sha256_hash_ctx *ctx;
350 ctx = (struct sha256_hash_ctx *)
351 sha256_job_mgr_flush(&mgr->mgr);
353 /* If flush returned 0, there are no more jobs in flight. */
358 * If flush returned a job, resubmit the job to finish
361 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
364 * If sha256_ctx_mgr_resubmit returned a job, it is ready to
365 * be returned. Otherwise, all jobs currently being managed by
366 * the sha256_ctx_mgr still need processing. Loop.
373 static int sha256_mb_init(struct ahash_request *areq)
375 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
378 sctx->job.result_digest[0] = SHA256_H0;
379 sctx->job.result_digest[1] = SHA256_H1;
380 sctx->job.result_digest[2] = SHA256_H2;
381 sctx->job.result_digest[3] = SHA256_H3;
382 sctx->job.result_digest[4] = SHA256_H4;
383 sctx->job.result_digest[5] = SHA256_H5;
384 sctx->job.result_digest[6] = SHA256_H6;
385 sctx->job.result_digest[7] = SHA256_H7;
386 sctx->total_length = 0;
387 sctx->partial_block_buffer_length = 0;
388 sctx->status = HASH_CTX_STS_IDLE;
393 static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
396 struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
397 __be32 *dst = (__be32 *) rctx->out;
399 for (i = 0; i < 8; ++i)
400 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
405 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
406 struct mcryptd_alg_cstate *cstate, bool flush)
408 int flag = HASH_UPDATE;
410 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
411 struct sha256_hash_ctx *sha_ctx;
414 while (!(rctx->flag & HASH_DONE)) {
415 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
420 /* check if the walk is done */
421 if (crypto_ahash_walk_last(&rctx->walk)) {
422 rctx->flag |= HASH_DONE;
423 if (rctx->flag & HASH_FINAL)
427 sha_ctx = (struct sha256_hash_ctx *)
428 ahash_request_ctx(&rctx->areq);
430 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
431 rctx->walk.data, nbytes, flag);
434 sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
438 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
445 /* copy the results */
446 if (rctx->flag & HASH_FINAL)
447 sha256_mb_set_results(rctx);
454 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
455 struct mcryptd_alg_cstate *cstate,
458 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
459 struct sha256_hash_ctx *sha_ctx;
460 struct mcryptd_hash_request_ctx *req_ctx;
463 /* remove from work list */
464 spin_lock(&cstate->work_lock);
465 list_del(&rctx->waiter);
466 spin_unlock(&cstate->work_lock);
469 rctx->complete(&req->base, err);
472 rctx->complete(&req->base, err);
476 /* check to see if there are other jobs that are done */
477 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
479 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
480 ret = sha_finish_walk(&req_ctx, cstate, false);
482 spin_lock(&cstate->work_lock);
483 list_del(&req_ctx->waiter);
484 spin_unlock(&cstate->work_lock);
486 req = cast_mcryptd_ctx_to_req(req_ctx);
488 rctx->complete(&req->base, ret);
491 rctx->complete(&req->base, ret);
495 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
501 static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
502 struct mcryptd_alg_cstate *cstate)
504 unsigned long next_flush;
505 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
508 rctx->tag.arrival = jiffies; /* tag the arrival time */
509 rctx->tag.seq_num = cstate->next_seq_num++;
510 next_flush = rctx->tag.arrival + delay;
511 rctx->tag.expire = next_flush;
513 spin_lock(&cstate->work_lock);
514 list_add_tail(&rctx->waiter, &cstate->work_list);
515 spin_unlock(&cstate->work_lock);
517 mcryptd_arm_flusher(cstate, delay);
520 static int sha256_mb_update(struct ahash_request *areq)
522 struct mcryptd_hash_request_ctx *rctx =
523 container_of(areq, struct mcryptd_hash_request_ctx, areq);
524 struct mcryptd_alg_cstate *cstate =
525 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
527 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
528 struct sha256_hash_ctx *sha_ctx;
532 if (rctx->tag.cpu != smp_processor_id()) {
533 pr_err("mcryptd error: cpu clash\n");
537 /* need to init context */
538 req_ctx_init(rctx, areq);
540 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
547 if (crypto_ahash_walk_last(&rctx->walk))
548 rctx->flag |= HASH_DONE;
551 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
552 sha256_mb_add_list(rctx, cstate);
554 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
555 nbytes, HASH_UPDATE);
558 /* check if anything is returned */
562 if (sha_ctx->error) {
563 ret = sha_ctx->error;
564 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
568 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
569 ret = sha_finish_walk(&rctx, cstate, false);
574 sha_complete_job(rctx, cstate, ret);
578 static int sha256_mb_finup(struct ahash_request *areq)
580 struct mcryptd_hash_request_ctx *rctx =
581 container_of(areq, struct mcryptd_hash_request_ctx, areq);
582 struct mcryptd_alg_cstate *cstate =
583 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
585 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
586 struct sha256_hash_ctx *sha_ctx;
587 int ret = 0, flag = HASH_UPDATE, nbytes;
590 if (rctx->tag.cpu != smp_processor_id()) {
591 pr_err("mcryptd error: cpu clash\n");
595 /* need to init context */
596 req_ctx_init(rctx, areq);
598 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
605 if (crypto_ahash_walk_last(&rctx->walk)) {
606 rctx->flag |= HASH_DONE;
611 rctx->flag |= HASH_FINAL;
612 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
613 sha256_mb_add_list(rctx, cstate);
616 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
620 /* check if anything is returned */
624 if (sha_ctx->error) {
625 ret = sha_ctx->error;
629 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
630 ret = sha_finish_walk(&rctx, cstate, false);
634 sha_complete_job(rctx, cstate, ret);
638 static int sha256_mb_final(struct ahash_request *areq)
640 struct mcryptd_hash_request_ctx *rctx =
641 container_of(areq, struct mcryptd_hash_request_ctx,
643 struct mcryptd_alg_cstate *cstate =
644 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
646 struct sha256_hash_ctx *sha_ctx;
651 if (rctx->tag.cpu != smp_processor_id()) {
652 pr_err("mcryptd error: cpu clash\n");
656 /* need to init context */
657 req_ctx_init(rctx, areq);
659 rctx->flag |= HASH_DONE | HASH_FINAL;
661 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
662 /* flag HASH_FINAL and 0 data size */
663 sha256_mb_add_list(rctx, cstate);
665 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
669 /* check if anything is returned */
673 if (sha_ctx->error) {
674 ret = sha_ctx->error;
675 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
679 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
680 ret = sha_finish_walk(&rctx, cstate, false);
684 sha_complete_job(rctx, cstate, ret);
688 static int sha256_mb_export(struct ahash_request *areq, void *out)
690 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
692 memcpy(out, sctx, sizeof(*sctx));
697 static int sha256_mb_import(struct ahash_request *areq, const void *in)
699 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
701 memcpy(sctx, in, sizeof(*sctx));
706 static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
708 struct mcryptd_ahash *mcryptd_tfm;
709 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
710 struct mcryptd_hash_ctx *mctx;
712 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
714 CRYPTO_ALG_INTERNAL);
715 if (IS_ERR(mcryptd_tfm))
716 return PTR_ERR(mcryptd_tfm);
717 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
718 mctx->alg_state = &sha256_mb_alg_state;
719 ctx->mcryptd_tfm = mcryptd_tfm;
720 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
721 sizeof(struct ahash_request) +
722 crypto_ahash_reqsize(&mcryptd_tfm->base));
727 static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
729 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
731 mcryptd_free_ahash(ctx->mcryptd_tfm);
734 static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
736 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
737 sizeof(struct ahash_request) +
738 sizeof(struct sha256_hash_ctx));
743 static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
745 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
747 mcryptd_free_ahash(ctx->mcryptd_tfm);
750 static struct ahash_alg sha256_mb_areq_alg = {
751 .init = sha256_mb_init,
752 .update = sha256_mb_update,
753 .final = sha256_mb_final,
754 .finup = sha256_mb_finup,
755 .export = sha256_mb_export,
756 .import = sha256_mb_import,
758 .digestsize = SHA256_DIGEST_SIZE,
759 .statesize = sizeof(struct sha256_hash_ctx),
761 .cra_name = "__sha256-mb",
762 .cra_driver_name = "__intel_sha256-mb",
765 * use ASYNC flag as some buffers in multi-buffer
766 * algo may not have completed before hashing thread
769 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
772 .cra_blocksize = SHA256_BLOCK_SIZE,
773 .cra_module = THIS_MODULE,
774 .cra_list = LIST_HEAD_INIT
775 (sha256_mb_areq_alg.halg.base.cra_list),
776 .cra_init = sha256_mb_areq_init_tfm,
777 .cra_exit = sha256_mb_areq_exit_tfm,
778 .cra_ctxsize = sizeof(struct sha256_hash_ctx),
783 static int sha256_mb_async_init(struct ahash_request *req)
785 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
786 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
787 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
788 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
790 memcpy(mcryptd_req, req, sizeof(*req));
791 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
792 return crypto_ahash_init(mcryptd_req);
795 static int sha256_mb_async_update(struct ahash_request *req)
797 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
799 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
800 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
801 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
803 memcpy(mcryptd_req, req, sizeof(*req));
804 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
805 return crypto_ahash_update(mcryptd_req);
808 static int sha256_mb_async_finup(struct ahash_request *req)
810 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
813 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
814 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
816 memcpy(mcryptd_req, req, sizeof(*req));
817 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
818 return crypto_ahash_finup(mcryptd_req);
821 static int sha256_mb_async_final(struct ahash_request *req)
823 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
825 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
826 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
827 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
829 memcpy(mcryptd_req, req, sizeof(*req));
830 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
831 return crypto_ahash_final(mcryptd_req);
834 static int sha256_mb_async_digest(struct ahash_request *req)
836 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
837 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
841 memcpy(mcryptd_req, req, sizeof(*req));
842 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
843 return crypto_ahash_digest(mcryptd_req);
846 static int sha256_mb_async_export(struct ahash_request *req, void *out)
848 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
849 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
850 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
851 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
853 memcpy(mcryptd_req, req, sizeof(*req));
854 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
855 return crypto_ahash_export(mcryptd_req, out);
858 static int sha256_mb_async_import(struct ahash_request *req, const void *in)
860 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
861 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
862 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
863 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
864 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
865 struct mcryptd_hash_request_ctx *rctx;
866 struct ahash_request *areq;
868 memcpy(mcryptd_req, req, sizeof(*req));
869 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
870 rctx = ahash_request_ctx(mcryptd_req);
873 ahash_request_set_tfm(areq, child);
874 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
875 rctx->complete, req);
877 return crypto_ahash_import(mcryptd_req, in);
880 static struct ahash_alg sha256_mb_async_alg = {
881 .init = sha256_mb_async_init,
882 .update = sha256_mb_async_update,
883 .final = sha256_mb_async_final,
884 .finup = sha256_mb_async_finup,
885 .export = sha256_mb_async_export,
886 .import = sha256_mb_async_import,
887 .digest = sha256_mb_async_digest,
889 .digestsize = SHA256_DIGEST_SIZE,
890 .statesize = sizeof(struct sha256_hash_ctx),
892 .cra_name = "sha256",
893 .cra_driver_name = "sha256_mb",
895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
897 .cra_blocksize = SHA256_BLOCK_SIZE,
898 .cra_type = &crypto_ahash_type,
899 .cra_module = THIS_MODULE,
900 .cra_list = LIST_HEAD_INIT
901 (sha256_mb_async_alg.halg.base.cra_list),
902 .cra_init = sha256_mb_async_init_tfm,
903 .cra_exit = sha256_mb_async_exit_tfm,
904 .cra_ctxsize = sizeof(struct sha256_mb_ctx),
910 static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
912 struct mcryptd_hash_request_ctx *rctx;
913 unsigned long cur_time;
914 unsigned long next_flush = 0;
915 struct sha256_hash_ctx *sha_ctx;
920 while (!list_empty(&cstate->work_list)) {
921 rctx = list_entry(cstate->work_list.next,
922 struct mcryptd_hash_request_ctx, waiter);
923 if (time_before(cur_time, rctx->tag.expire))
926 sha_ctx = (struct sha256_hash_ctx *)
927 sha256_ctx_mgr_flush(cstate->mgr);
930 pr_err("sha256_mb error: nothing got"
931 " flushed for non-empty list\n");
934 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
935 sha_finish_walk(&rctx, cstate, true);
936 sha_complete_job(rctx, cstate, 0);
939 if (!list_empty(&cstate->work_list)) {
940 rctx = list_entry(cstate->work_list.next,
941 struct mcryptd_hash_request_ctx, waiter);
942 /* get the hash context and then flush time */
943 next_flush = rctx->tag.expire;
944 mcryptd_arm_flusher(cstate, get_delay(next_flush));
949 static int __init sha256_mb_mod_init(void)
954 struct mcryptd_alg_cstate *cpu_state;
956 /* check for dependent cpu features */
957 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
958 !boot_cpu_has(X86_FEATURE_BMI2))
961 /* initialize multibuffer structures */
962 sha256_mb_alg_state.alg_cstate = alloc_percpu
963 (struct mcryptd_alg_cstate);
965 sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
966 sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
967 sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
968 sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
970 if (!sha256_mb_alg_state.alg_cstate)
972 for_each_possible_cpu(cpu) {
973 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
974 cpu_state->next_flush = 0;
975 cpu_state->next_seq_num = 0;
976 cpu_state->flusher_engaged = false;
977 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
978 cpu_state->cpu = cpu;
979 cpu_state->alg_state = &sha256_mb_alg_state;
980 cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
984 sha256_ctx_mgr_init(cpu_state->mgr);
985 INIT_LIST_HEAD(&cpu_state->work_list);
986 spin_lock_init(&cpu_state->work_lock);
988 sha256_mb_alg_state.flusher = &sha256_mb_flusher;
990 err = crypto_register_ahash(&sha256_mb_areq_alg);
993 err = crypto_register_ahash(&sha256_mb_async_alg);
1000 crypto_unregister_ahash(&sha256_mb_areq_alg);
1002 for_each_possible_cpu(cpu) {
1003 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1004 kfree(cpu_state->mgr);
1006 free_percpu(sha256_mb_alg_state.alg_cstate);
1010 static void __exit sha256_mb_mod_fini(void)
1013 struct mcryptd_alg_cstate *cpu_state;
1015 crypto_unregister_ahash(&sha256_mb_async_alg);
1016 crypto_unregister_ahash(&sha256_mb_areq_alg);
1017 for_each_possible_cpu(cpu) {
1018 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1019 kfree(cpu_state->mgr);
1021 free_percpu(sha256_mb_alg_state.alg_cstate);
1024 module_init(sha256_mb_mod_init);
1025 module_exit(sha256_mb_mod_fini);
1027 MODULE_LICENSE("GPL");
1028 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
1030 MODULE_ALIAS_CRYPTO("sha256");