2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
123 struct caam_hash_state {
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151 u8 *result, int digestsize)
155 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
168 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device *jrdev,
176 struct scatterlist *src, int src_nents,
177 struct sec4_sg_entry *sec4_sg,
180 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
181 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190 u8 *buf, dma_addr_t buf_dma, int buflen,
193 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205 struct caam_hash_state *state,
207 struct sec4_sg_entry *sec4_sg,
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218 ctx->split_key_len, CLASS_2 |
219 KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227 init_sh_desc(desc, HDR_SHARE_SERIAL);
229 if (ctx->split_key_len) {
230 /* Skip if already shared */
231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
234 append_key_ahash(desc, ctx);
236 set_jump_tgt_here(desc, key_jump_cmd);
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
248 static inline void ahash_append_load_str(u32 *desc, int digestsize)
250 /* Calculate remaining bytes to read */
251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255 FIFOLD_TYPE_MSG | KEY_VLF);
257 /* Store class2 context bytes */
258 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259 LDST_SRCDST_BYTE_CONTEXT);
263 * For ahash update, final and finup, import context, read and write to seqout
265 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267 struct caam_hash_ctx *ctx)
269 init_sh_desc_key_ahash(desc, ctx);
271 /* Import context from software */
272 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273 LDST_CLASS_2_CCB | ctx->ctx_len);
275 /* Class 2 operation */
276 append_operation(desc, op | state | OP_ALG_ENCRYPT);
279 * Load from buf and/or src and write to req->result or state->context
281 ahash_append_load_str(desc, digestsize);
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize, struct caam_hash_ctx *ctx)
288 init_sh_desc_key_ahash(desc, ctx);
290 /* Class 2 operation */
291 append_operation(desc, op | state | OP_ALG_ENCRYPT);
294 * Load from buf and/or src and write to req->result or state->context
296 ahash_append_load_str(desc, digestsize);
299 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302 int digestsize = crypto_ahash_digestsize(ahash);
303 struct device *jrdev = ctx->jrdev;
307 if (ctx->split_key_len)
308 have_key = OP_ALG_AAI_HMAC_PRECOMP;
310 /* ahash_update shared descriptor */
311 desc = ctx->sh_desc_update;
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
315 /* Import context from software */
316 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317 LDST_CLASS_2_CCB | ctx->ctx_len);
319 /* Class 2 operation */
320 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc, ctx->ctx_len);
326 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
333 print_hex_dump(KERN_ERR,
334 "ahash update shdesc@"__stringify(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
352 print_hex_dump(KERN_ERR,
353 "ahash update first shdesc@"__stringify(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
357 /* ahash_final shared descriptor */
358 desc = ctx->sh_desc_fin;
360 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361 OP_ALG_AS_FINALIZE, digestsize, ctx);
363 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
365 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366 dev_err(jrdev, "unable to map shared descriptor\n");
370 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
371 DUMP_PREFIX_ADDRESS, 16, 4, desc,
372 desc_bytes(desc), 1);
375 /* ahash_finup shared descriptor */
376 desc = ctx->sh_desc_finup;
378 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379 OP_ALG_AS_FINALIZE, digestsize, ctx);
381 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
383 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384 dev_err(jrdev, "unable to map shared descriptor\n");
388 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
389 DUMP_PREFIX_ADDRESS, 16, 4, desc,
390 desc_bytes(desc), 1);
393 /* ahash_digest shared descriptor */
394 desc = ctx->sh_desc_digest;
396 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
399 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
402 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403 dev_err(jrdev, "unable to map shared descriptor\n");
407 print_hex_dump(KERN_ERR,
408 "ahash digest shdesc@"__stringify(__LINE__)": ",
409 DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 desc_bytes(desc), 1);
416 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
419 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420 ctx->split_key_pad_len, key_in, keylen,
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
426 u32 *keylen, u8 *key_out, u32 digestsize)
428 struct device *jrdev = ctx->jrdev;
430 struct split_key_result result;
431 dma_addr_t src_dma, dst_dma;
434 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
436 dev_err(jrdev, "unable to allocate key input memory\n");
440 init_job_desc(desc, 0);
442 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
444 if (dma_mapping_error(jrdev, src_dma)) {
445 dev_err(jrdev, "unable to map key input memory\n");
449 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
451 if (dma_mapping_error(jrdev, dst_dma)) {
452 dev_err(jrdev, "unable to map key output memory\n");
453 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
458 /* Job descriptor to perform unkeyed hash on key_in */
459 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460 OP_ALG_AS_INITFINAL);
461 append_seq_in_ptr(desc, src_dma, *keylen, 0);
462 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466 LDST_SRCDST_BYTE_CONTEXT);
469 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
470 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
471 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
472 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
476 init_completion(&result.completion);
478 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
481 wait_for_completion_interruptible(&result.completion);
484 print_hex_dump(KERN_ERR,
485 "digested key@"__stringify(__LINE__)": ",
486 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
490 *keylen = digestsize;
492 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
500 static int ahash_setkey(struct crypto_ahash *ahash,
501 const u8 *key, unsigned int keylen)
503 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506 struct device *jrdev = ctx->jrdev;
507 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508 int digestsize = crypto_ahash_digestsize(ahash);
510 u8 *hashed_key = NULL;
513 printk(KERN_ERR "keylen %d\n", keylen);
516 if (keylen > blocksize) {
517 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
521 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
528 /* Pick class 2 key length from algorithm submask */
529 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530 OP_ALG_ALGSEL_SHIFT] * 2;
531 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
534 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535 ctx->split_key_len, ctx->split_key_pad_len);
536 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
537 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
540 ret = gen_split_hash_key(ctx, key, keylen);
544 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n");
551 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
552 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
553 ctx->split_key_pad_len, 1);
556 ret = ahash_set_sh_desc(ahash);
558 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
566 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
571 * ahash_edesc - s/w-extended ahash descriptor
572 * @dst_dma: physical mapped address of req->result
573 * @sec4_sg_dma: physical mapped address of h/w link table
574 * @chained: if source is chained
575 * @src_nents: number of segments in input scatterlist
576 * @sec4_sg_bytes: length of dma mapped sec4_sg space
577 * @sec4_sg: pointer to h/w link table
578 * @hw_desc: the h/w job descriptor followed by any referenced link tables
582 dma_addr_t sec4_sg_dma;
586 struct sec4_sg_entry *sec4_sg;
590 static inline void ahash_unmap(struct device *dev,
591 struct ahash_edesc *edesc,
592 struct ahash_request *req, int dst_len)
594 if (edesc->src_nents)
595 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
596 DMA_TO_DEVICE, edesc->chained);
598 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
600 if (edesc->sec4_sg_bytes)
601 dma_unmap_single(dev, edesc->sec4_sg_dma,
602 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
605 static inline void ahash_unmap_ctx(struct device *dev,
606 struct ahash_edesc *edesc,
607 struct ahash_request *req, int dst_len, u32 flag)
609 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
610 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
611 struct caam_hash_state *state = ahash_request_ctx(req);
614 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
615 ahash_unmap(dev, edesc, req, dst_len);
618 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
621 struct ahash_request *req = context;
622 struct ahash_edesc *edesc;
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 int digestsize = crypto_ahash_digestsize(ahash);
626 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
627 struct caam_hash_state *state = ahash_request_ctx(req);
629 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
632 edesc = (struct ahash_edesc *)((char *)desc -
633 offsetof(struct ahash_edesc, hw_desc));
635 char tmp[CAAM_ERROR_STR_MAX];
637 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
640 ahash_unmap(jrdev, edesc, req, digestsize);
644 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
645 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
648 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
649 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
653 req->base.complete(&req->base, err);
656 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
659 struct ahash_request *req = context;
660 struct ahash_edesc *edesc;
661 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
662 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
664 struct caam_hash_state *state = ahash_request_ctx(req);
665 int digestsize = crypto_ahash_digestsize(ahash);
667 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
670 edesc = (struct ahash_edesc *)((char *)desc -
671 offsetof(struct ahash_edesc, hw_desc));
673 char tmp[CAAM_ERROR_STR_MAX];
675 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
678 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
682 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
683 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
686 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
687 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
691 req->base.complete(&req->base, err);
694 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
697 struct ahash_request *req = context;
698 struct ahash_edesc *edesc;
699 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
700 int digestsize = crypto_ahash_digestsize(ahash);
702 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
703 struct caam_hash_state *state = ahash_request_ctx(req);
705 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
708 edesc = (struct ahash_edesc *)((char *)desc -
709 offsetof(struct ahash_edesc, hw_desc));
711 char tmp[CAAM_ERROR_STR_MAX];
713 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
716 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
720 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
721 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
724 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
725 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
729 req->base.complete(&req->base, err);
732 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
735 struct ahash_request *req = context;
736 struct ahash_edesc *edesc;
737 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
738 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
740 struct caam_hash_state *state = ahash_request_ctx(req);
741 int digestsize = crypto_ahash_digestsize(ahash);
743 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
746 edesc = (struct ahash_edesc *)((char *)desc -
747 offsetof(struct ahash_edesc, hw_desc));
749 char tmp[CAAM_ERROR_STR_MAX];
751 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
754 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
758 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
759 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
763 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
767 req->base.complete(&req->base, err);
770 /* submit update job descriptor */
771 static int ahash_update_ctx(struct ahash_request *req)
773 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
774 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
775 struct caam_hash_state *state = ahash_request_ctx(req);
776 struct device *jrdev = ctx->jrdev;
777 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
778 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
779 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
780 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
781 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
782 int *next_buflen = state->current_buf ? &state->buflen_0 :
783 &state->buflen_1, last_buflen;
784 int in_len = *buflen + req->nbytes, to_hash;
785 u32 *sh_desc = ctx->sh_desc_update, *desc;
786 dma_addr_t ptr = ctx->sh_desc_update_dma;
787 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
788 struct ahash_edesc *edesc;
789 bool chained = false;
793 last_buflen = *next_buflen;
794 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
795 to_hash = in_len - *next_buflen;
798 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
800 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
801 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
802 sizeof(struct sec4_sg_entry);
805 * allocate space for base edesc and hw desc commands,
808 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
809 sec4_sg_bytes, GFP_DMA | flags);
812 "could not allocate extended descriptor\n");
816 edesc->src_nents = src_nents;
817 edesc->chained = chained;
818 edesc->sec4_sg_bytes = sec4_sg_bytes;
819 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
821 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
825 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
826 edesc->sec4_sg, DMA_BIDIRECTIONAL);
828 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
831 *buflen, last_buflen);
834 src_map_to_sec4_sg(jrdev, req->src, src_nents,
835 edesc->sec4_sg + sec4_sg_src_index,
838 scatterwalk_map_and_copy(next_buf, req->src,
841 state->current_buf = !state->current_buf;
844 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
848 sh_len = desc_len(sh_desc);
849 desc = edesc->hw_desc;
850 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
853 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
856 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
859 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
860 DUMP_PREFIX_ADDRESS, 16, 4, desc,
861 desc_bytes(desc), 1);
864 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
868 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
872 } else if (*next_buflen) {
873 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
875 *buflen = *next_buflen;
876 *next_buflen = last_buflen;
879 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
880 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
881 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
882 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
889 static int ahash_final_ctx(struct ahash_request *req)
891 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
892 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
893 struct caam_hash_state *state = ahash_request_ctx(req);
894 struct device *jrdev = ctx->jrdev;
895 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
896 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
897 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
898 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
899 int last_buflen = state->current_buf ? state->buflen_0 :
901 u32 *sh_desc = ctx->sh_desc_fin, *desc;
902 dma_addr_t ptr = ctx->sh_desc_fin_dma;
904 int digestsize = crypto_ahash_digestsize(ahash);
905 struct ahash_edesc *edesc;
909 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
911 /* allocate space for base edesc and hw desc commands, link tables */
912 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
913 sec4_sg_bytes, GFP_DMA | flags);
915 dev_err(jrdev, "could not allocate extended descriptor\n");
919 sh_len = desc_len(sh_desc);
920 desc = edesc->hw_desc;
921 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
923 edesc->sec4_sg_bytes = sec4_sg_bytes;
924 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
926 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
927 sec4_sg_bytes, DMA_TO_DEVICE);
928 edesc->src_nents = 0;
930 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
933 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
934 buf, state->buf_dma, buflen,
936 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
938 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
941 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
945 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
946 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
949 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
953 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
960 static int ahash_finup_ctx(struct ahash_request *req)
962 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
963 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
964 struct caam_hash_state *state = ahash_request_ctx(req);
965 struct device *jrdev = ctx->jrdev;
966 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
967 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
968 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
969 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
970 int last_buflen = state->current_buf ? state->buflen_0 :
972 u32 *sh_desc = ctx->sh_desc_finup, *desc;
973 dma_addr_t ptr = ctx->sh_desc_finup_dma;
974 int sec4_sg_bytes, sec4_sg_src_index;
976 int digestsize = crypto_ahash_digestsize(ahash);
977 struct ahash_edesc *edesc;
978 bool chained = false;
982 src_nents = __sg_count(req->src, req->nbytes, &chained);
983 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
984 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
985 sizeof(struct sec4_sg_entry);
987 /* allocate space for base edesc and hw desc commands, link tables */
988 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
989 sec4_sg_bytes, GFP_DMA | flags);
991 dev_err(jrdev, "could not allocate extended descriptor\n");
995 sh_len = desc_len(sh_desc);
996 desc = edesc->hw_desc;
997 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
999 edesc->src_nents = src_nents;
1000 edesc->chained = chained;
1001 edesc->sec4_sg_bytes = sec4_sg_bytes;
1002 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1004 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1005 sec4_sg_bytes, DMA_TO_DEVICE);
1007 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1010 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1011 buf, state->buf_dma, buflen,
1014 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1015 sec4_sg_src_index, chained);
1017 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1018 buflen + req->nbytes, LDST_SGF);
1020 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1024 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1025 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1028 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1032 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1039 static int ahash_digest(struct ahash_request *req)
1041 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1042 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1043 struct device *jrdev = ctx->jrdev;
1044 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1045 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1046 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1047 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1048 int digestsize = crypto_ahash_digestsize(ahash);
1049 int src_nents, sec4_sg_bytes;
1051 struct ahash_edesc *edesc;
1052 bool chained = false;
1057 src_nents = sg_count(req->src, req->nbytes, &chained);
1058 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1060 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1062 /* allocate space for base edesc and hw desc commands, link tables */
1063 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1064 DESC_JOB_IO_LEN, GFP_DMA | flags);
1066 dev_err(jrdev, "could not allocate extended descriptor\n");
1069 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1071 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1072 sec4_sg_bytes, DMA_TO_DEVICE);
1073 edesc->src_nents = src_nents;
1074 edesc->chained = chained;
1076 sh_len = desc_len(sh_desc);
1077 desc = edesc->hw_desc;
1078 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1081 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1082 src_dma = edesc->sec4_sg_dma;
1085 src_dma = sg_dma_address(req->src);
1088 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1090 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1094 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1095 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1098 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1102 ahash_unmap(jrdev, edesc, req, digestsize);
1109 /* submit ahash final if it the first job descriptor */
1110 static int ahash_final_no_ctx(struct ahash_request *req)
1112 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1113 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1114 struct caam_hash_state *state = ahash_request_ctx(req);
1115 struct device *jrdev = ctx->jrdev;
1116 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1117 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1118 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1119 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1120 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1121 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1122 int digestsize = crypto_ahash_digestsize(ahash);
1123 struct ahash_edesc *edesc;
1127 /* allocate space for base edesc and hw desc commands, link tables */
1128 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1131 dev_err(jrdev, "could not allocate extended descriptor\n");
1135 sh_len = desc_len(sh_desc);
1136 desc = edesc->hw_desc;
1137 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1139 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1141 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1143 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1145 edesc->src_nents = 0;
1148 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1149 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1152 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1156 ahash_unmap(jrdev, edesc, req, digestsize);
1163 /* submit ahash update if it the first job descriptor after update */
1164 static int ahash_update_no_ctx(struct ahash_request *req)
1166 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1167 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1168 struct caam_hash_state *state = ahash_request_ctx(req);
1169 struct device *jrdev = ctx->jrdev;
1170 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1171 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1172 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1173 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1174 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1175 int *next_buflen = state->current_buf ? &state->buflen_0 :
1177 int in_len = *buflen + req->nbytes, to_hash;
1178 int sec4_sg_bytes, src_nents;
1179 struct ahash_edesc *edesc;
1180 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1181 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1182 bool chained = false;
1186 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1187 to_hash = in_len - *next_buflen;
1190 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1192 sec4_sg_bytes = (1 + src_nents) *
1193 sizeof(struct sec4_sg_entry);
1196 * allocate space for base edesc and hw desc commands,
1199 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1200 sec4_sg_bytes, GFP_DMA | flags);
1203 "could not allocate extended descriptor\n");
1207 edesc->src_nents = src_nents;
1208 edesc->chained = chained;
1209 edesc->sec4_sg_bytes = sec4_sg_bytes;
1210 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1212 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1216 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1218 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1219 edesc->sec4_sg + 1, chained);
1221 scatterwalk_map_and_copy(next_buf, req->src,
1224 state->current_buf = !state->current_buf;
1227 sh_len = desc_len(sh_desc);
1228 desc = edesc->hw_desc;
1229 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1232 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1234 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1237 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1238 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1239 desc_bytes(desc), 1);
1242 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1245 state->update = ahash_update_ctx;
1246 state->finup = ahash_finup_ctx;
1247 state->final = ahash_final_ctx;
1249 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1253 } else if (*next_buflen) {
1254 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1256 *buflen = *next_buflen;
1260 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1261 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1262 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1263 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1270 /* submit ahash finup if it the first job descriptor after update */
1271 static int ahash_finup_no_ctx(struct ahash_request *req)
1273 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1274 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1275 struct caam_hash_state *state = ahash_request_ctx(req);
1276 struct device *jrdev = ctx->jrdev;
1277 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1278 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1279 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1280 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1281 int last_buflen = state->current_buf ? state->buflen_0 :
1283 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1284 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1285 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1286 int digestsize = crypto_ahash_digestsize(ahash);
1287 struct ahash_edesc *edesc;
1288 bool chained = false;
1292 src_nents = __sg_count(req->src, req->nbytes, &chained);
1293 sec4_sg_src_index = 2;
1294 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1295 sizeof(struct sec4_sg_entry);
1297 /* allocate space for base edesc and hw desc commands, link tables */
1298 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1299 sec4_sg_bytes, GFP_DMA | flags);
1301 dev_err(jrdev, "could not allocate extended descriptor\n");
1305 sh_len = desc_len(sh_desc);
1306 desc = edesc->hw_desc;
1307 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1309 edesc->src_nents = src_nents;
1310 edesc->chained = chained;
1311 edesc->sec4_sg_bytes = sec4_sg_bytes;
1312 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1314 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1315 sec4_sg_bytes, DMA_TO_DEVICE);
1317 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1318 state->buf_dma, buflen,
1321 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1324 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1325 req->nbytes, LDST_SGF);
1327 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1331 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1335 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1339 ahash_unmap(jrdev, edesc, req, digestsize);
1346 /* submit first update job descriptor after init */
1347 static int ahash_update_first(struct ahash_request *req)
1349 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1350 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1351 struct caam_hash_state *state = ahash_request_ctx(req);
1352 struct device *jrdev = ctx->jrdev;
1353 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1354 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1355 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1356 int *next_buflen = state->current_buf ?
1357 &state->buflen_1 : &state->buflen_0;
1359 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1360 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1361 int sec4_sg_bytes, src_nents;
1364 struct ahash_edesc *edesc;
1365 bool chained = false;
1369 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1371 to_hash = req->nbytes - *next_buflen;
1374 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1376 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1377 DMA_TO_DEVICE, chained);
1378 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1381 * allocate space for base edesc and hw desc commands,
1384 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1385 sec4_sg_bytes, GFP_DMA | flags);
1388 "could not allocate extended descriptor\n");
1392 edesc->src_nents = src_nents;
1393 edesc->chained = chained;
1394 edesc->sec4_sg_bytes = sec4_sg_bytes;
1395 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1397 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1402 sg_to_sec4_sg_last(req->src, src_nents,
1404 src_dma = edesc->sec4_sg_dma;
1407 src_dma = sg_dma_address(req->src);
1412 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1415 sh_len = desc_len(sh_desc);
1416 desc = edesc->hw_desc;
1417 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1420 append_seq_in_ptr(desc, src_dma, to_hash, options);
1422 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1425 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1426 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1427 desc_bytes(desc), 1);
1430 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1434 state->update = ahash_update_ctx;
1435 state->finup = ahash_finup_ctx;
1436 state->final = ahash_final_ctx;
1438 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1442 } else if (*next_buflen) {
1443 state->update = ahash_update_no_ctx;
1444 state->finup = ahash_finup_no_ctx;
1445 state->final = ahash_final_no_ctx;
1446 scatterwalk_map_and_copy(next_buf, req->src, 0,
1450 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1451 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1458 static int ahash_finup_first(struct ahash_request *req)
1460 return ahash_digest(req);
1463 static int ahash_init(struct ahash_request *req)
1465 struct caam_hash_state *state = ahash_request_ctx(req);
1467 state->update = ahash_update_first;
1468 state->finup = ahash_finup_first;
1469 state->final = ahash_final_no_ctx;
1471 state->current_buf = 0;
1476 static int ahash_update(struct ahash_request *req)
1478 struct caam_hash_state *state = ahash_request_ctx(req);
1480 return state->update(req);
1483 static int ahash_finup(struct ahash_request *req)
1485 struct caam_hash_state *state = ahash_request_ctx(req);
1487 return state->finup(req);
1490 static int ahash_final(struct ahash_request *req)
1492 struct caam_hash_state *state = ahash_request_ctx(req);
1494 return state->final(req);
1497 static int ahash_export(struct ahash_request *req, void *out)
1499 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1500 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1501 struct caam_hash_state *state = ahash_request_ctx(req);
1503 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1504 memcpy(out + sizeof(struct caam_hash_ctx), state,
1505 sizeof(struct caam_hash_state));
1509 static int ahash_import(struct ahash_request *req, const void *in)
1511 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1512 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1513 struct caam_hash_state *state = ahash_request_ctx(req);
1515 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1516 memcpy(state, in + sizeof(struct caam_hash_ctx),
1517 sizeof(struct caam_hash_state));
1521 struct caam_hash_template {
1522 char name[CRYPTO_MAX_ALG_NAME];
1523 char driver_name[CRYPTO_MAX_ALG_NAME];
1524 char hmac_name[CRYPTO_MAX_ALG_NAME];
1525 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1526 unsigned int blocksize;
1527 struct ahash_alg template_ahash;
1532 /* ahash descriptors */
1533 static struct caam_hash_template driver_hash[] = {
1536 .driver_name = "sha1-caam",
1537 .hmac_name = "hmac(sha1)",
1538 .hmac_driver_name = "hmac-sha1-caam",
1539 .blocksize = SHA1_BLOCK_SIZE,
1542 .update = ahash_update,
1543 .final = ahash_final,
1544 .finup = ahash_finup,
1545 .digest = ahash_digest,
1546 .export = ahash_export,
1547 .import = ahash_import,
1548 .setkey = ahash_setkey,
1550 .digestsize = SHA1_DIGEST_SIZE,
1553 .alg_type = OP_ALG_ALGSEL_SHA1,
1554 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1557 .driver_name = "sha224-caam",
1558 .hmac_name = "hmac(sha224)",
1559 .hmac_driver_name = "hmac-sha224-caam",
1560 .blocksize = SHA224_BLOCK_SIZE,
1563 .update = ahash_update,
1564 .final = ahash_final,
1565 .finup = ahash_finup,
1566 .digest = ahash_digest,
1567 .export = ahash_export,
1568 .import = ahash_import,
1569 .setkey = ahash_setkey,
1571 .digestsize = SHA224_DIGEST_SIZE,
1574 .alg_type = OP_ALG_ALGSEL_SHA224,
1575 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1578 .driver_name = "sha256-caam",
1579 .hmac_name = "hmac(sha256)",
1580 .hmac_driver_name = "hmac-sha256-caam",
1581 .blocksize = SHA256_BLOCK_SIZE,
1584 .update = ahash_update,
1585 .final = ahash_final,
1586 .finup = ahash_finup,
1587 .digest = ahash_digest,
1588 .export = ahash_export,
1589 .import = ahash_import,
1590 .setkey = ahash_setkey,
1592 .digestsize = SHA256_DIGEST_SIZE,
1595 .alg_type = OP_ALG_ALGSEL_SHA256,
1596 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1599 .driver_name = "sha384-caam",
1600 .hmac_name = "hmac(sha384)",
1601 .hmac_driver_name = "hmac-sha384-caam",
1602 .blocksize = SHA384_BLOCK_SIZE,
1605 .update = ahash_update,
1606 .final = ahash_final,
1607 .finup = ahash_finup,
1608 .digest = ahash_digest,
1609 .export = ahash_export,
1610 .import = ahash_import,
1611 .setkey = ahash_setkey,
1613 .digestsize = SHA384_DIGEST_SIZE,
1616 .alg_type = OP_ALG_ALGSEL_SHA384,
1617 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1620 .driver_name = "sha512-caam",
1621 .hmac_name = "hmac(sha512)",
1622 .hmac_driver_name = "hmac-sha512-caam",
1623 .blocksize = SHA512_BLOCK_SIZE,
1626 .update = ahash_update,
1627 .final = ahash_final,
1628 .finup = ahash_finup,
1629 .digest = ahash_digest,
1630 .export = ahash_export,
1631 .import = ahash_import,
1632 .setkey = ahash_setkey,
1634 .digestsize = SHA512_DIGEST_SIZE,
1637 .alg_type = OP_ALG_ALGSEL_SHA512,
1638 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1641 .driver_name = "md5-caam",
1642 .hmac_name = "hmac(md5)",
1643 .hmac_driver_name = "hmac-md5-caam",
1644 .blocksize = MD5_BLOCK_WORDS * 4,
1647 .update = ahash_update,
1648 .final = ahash_final,
1649 .finup = ahash_finup,
1650 .digest = ahash_digest,
1651 .export = ahash_export,
1652 .import = ahash_import,
1653 .setkey = ahash_setkey,
1655 .digestsize = MD5_DIGEST_SIZE,
1658 .alg_type = OP_ALG_ALGSEL_MD5,
1659 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1663 struct caam_hash_alg {
1664 struct list_head entry;
1667 struct ahash_alg ahash_alg;
1670 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1672 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1673 struct crypto_alg *base = tfm->__crt_alg;
1674 struct hash_alg_common *halg =
1675 container_of(base, struct hash_alg_common, base);
1676 struct ahash_alg *alg =
1677 container_of(halg, struct ahash_alg, halg);
1678 struct caam_hash_alg *caam_hash =
1679 container_of(alg, struct caam_hash_alg, ahash_alg);
1680 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1681 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1682 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1683 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1685 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1687 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1691 * Get a Job ring from Job Ring driver to ensure in-order
1692 * crypto request processing per tfm
1694 ctx->jrdev = caam_jr_alloc();
1695 if (IS_ERR(ctx->jrdev)) {
1696 pr_err("Job Ring Device allocation for transform failed\n");
1697 return PTR_ERR(ctx->jrdev);
1699 /* copy descriptor header template value */
1700 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1701 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1703 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1704 OP_ALG_ALGSEL_SHIFT];
1706 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1707 sizeof(struct caam_hash_state));
1709 ret = ahash_set_sh_desc(ahash);
1714 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1716 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1718 if (ctx->sh_desc_update_dma &&
1719 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1720 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1721 desc_bytes(ctx->sh_desc_update),
1723 if (ctx->sh_desc_update_first_dma &&
1724 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1725 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1726 desc_bytes(ctx->sh_desc_update_first),
1728 if (ctx->sh_desc_fin_dma &&
1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1731 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1732 if (ctx->sh_desc_digest_dma &&
1733 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1734 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1735 desc_bytes(ctx->sh_desc_digest),
1737 if (ctx->sh_desc_finup_dma &&
1738 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1739 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1740 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1742 caam_jr_free(ctx->jrdev);
1745 static void __exit caam_algapi_hash_exit(void)
1747 struct caam_hash_alg *t_alg, *n;
1749 if (!hash_list.next)
1752 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1753 crypto_unregister_ahash(&t_alg->ahash_alg);
1754 list_del(&t_alg->entry);
1759 static struct caam_hash_alg *
1760 caam_hash_alloc(struct caam_hash_template *template,
1763 struct caam_hash_alg *t_alg;
1764 struct ahash_alg *halg;
1765 struct crypto_alg *alg;
1767 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1769 pr_err("failed to allocate t_alg\n");
1770 return ERR_PTR(-ENOMEM);
1773 t_alg->ahash_alg = template->template_ahash;
1774 halg = &t_alg->ahash_alg;
1775 alg = &halg->halg.base;
1778 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1779 template->hmac_name);
1780 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1781 template->hmac_driver_name);
1783 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1785 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1786 template->driver_name);
1788 alg->cra_module = THIS_MODULE;
1789 alg->cra_init = caam_hash_cra_init;
1790 alg->cra_exit = caam_hash_cra_exit;
1791 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1792 alg->cra_priority = CAAM_CRA_PRIORITY;
1793 alg->cra_blocksize = template->blocksize;
1794 alg->cra_alignmask = 0;
1795 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1796 alg->cra_type = &crypto_ahash_type;
1798 t_alg->alg_type = template->alg_type;
1799 t_alg->alg_op = template->alg_op;
1804 static int __init caam_algapi_hash_init(void)
1808 INIT_LIST_HEAD(&hash_list);
1810 /* register crypto algorithms the device supports */
1811 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1812 /* TODO: check if h/w supports alg */
1813 struct caam_hash_alg *t_alg;
1815 /* register hmac version */
1816 t_alg = caam_hash_alloc(&driver_hash[i], true);
1817 if (IS_ERR(t_alg)) {
1818 err = PTR_ERR(t_alg);
1819 pr_warn("%s alg allocation failed\n",
1820 driver_hash[i].driver_name);
1824 err = crypto_register_ahash(&t_alg->ahash_alg);
1826 pr_warn("%s alg registration failed\n",
1827 t_alg->ahash_alg.halg.base.cra_driver_name);
1830 list_add_tail(&t_alg->entry, &hash_list);
1832 /* register unkeyed version */
1833 t_alg = caam_hash_alloc(&driver_hash[i], false);
1834 if (IS_ERR(t_alg)) {
1835 err = PTR_ERR(t_alg);
1836 pr_warn("%s alg allocation failed\n",
1837 driver_hash[i].driver_name);
1841 err = crypto_register_ahash(&t_alg->ahash_alg);
1843 pr_warn("%s alg registration failed\n",
1844 t_alg->ahash_alg.halg.base.cra_driver_name);
1847 list_add_tail(&t_alg->entry, &hash_list);
1853 module_init(caam_algapi_hash_init);
1854 module_exit(caam_algapi_hash_exit);
1856 MODULE_LICENSE("GPL");
1857 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1858 MODULE_AUTHOR("Freescale Semiconductor - NMG");