1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/kernel.h>
72 #define CAAM_CRA_PRIORITY 3000
74 /* max hash key is max split key size */
75 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
77 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
78 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
80 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
81 CAAM_MAX_HASH_KEY_SIZE)
82 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
84 /* caam context sizes for hashes: running digest + 8 */
85 #define HASH_MSG_LEN 8
86 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
88 static struct list_head hash_list;
90 /* ahash per-session context */
91 struct caam_hash_ctx {
92 struct crypto_engine_ctx enginectx;
93 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
96 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
98 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
99 dma_addr_t sh_desc_update_first_dma;
100 dma_addr_t sh_desc_fin_dma;
101 dma_addr_t sh_desc_digest_dma;
102 enum dma_data_direction dir;
103 enum dma_data_direction key_dir;
104 struct device *jrdev;
106 struct alginfo adata;
110 struct caam_hash_state {
114 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
117 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
118 int (*update)(struct ahash_request *req) ____cacheline_aligned;
119 int (*final)(struct ahash_request *req);
120 int (*finup)(struct ahash_request *req);
121 struct ahash_edesc *edesc;
122 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
126 struct caam_export_state {
127 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128 u8 caam_ctx[MAX_CTX_LEN];
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
135 static inline bool is_cmac_aes(u32 algtype)
137 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
138 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
140 /* Common job descriptor seq in/out ptr routines */
142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144 struct caam_hash_state *state,
147 state->ctx_dma_len = ctx_len;
148 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 ctx_len, DMA_FROM_DEVICE);
150 if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 dev_err(jrdev, "unable to map ctx\n");
156 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161 /* Map current buffer in state (if length > 0) and put it in link table */
162 static inline int buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
164 struct caam_hash_state *state)
166 int buflen = state->buflen;
171 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
173 if (dma_mapping_error(jrdev, state->buf_dma)) {
174 dev_err(jrdev, "unable to map buf\n");
179 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
184 /* Map state->caam_ctx, and add it to link table */
185 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
186 struct caam_hash_state *state, int ctx_len,
187 struct sec4_sg_entry *sec4_sg, u32 flag)
189 state->ctx_dma_len = ctx_len;
190 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
191 if (dma_mapping_error(jrdev, state->ctx_dma)) {
192 dev_err(jrdev, "unable to map ctx\n");
197 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
202 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
204 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
205 int digestsize = crypto_ahash_digestsize(ahash);
206 struct device *jrdev = ctx->jrdev;
207 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
210 ctx->adata.key_virt = ctx->key;
212 /* ahash_update shared descriptor */
213 desc = ctx->sh_desc_update;
214 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
215 ctx->ctx_len, true, ctrlpriv->era);
216 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
217 desc_bytes(desc), ctx->dir);
219 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
220 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
223 /* ahash_update_first shared descriptor */
224 desc = ctx->sh_desc_update_first;
225 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
226 ctx->ctx_len, false, ctrlpriv->era);
227 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
228 desc_bytes(desc), ctx->dir);
229 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
230 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
231 desc_bytes(desc), 1);
233 /* ahash_final shared descriptor */
234 desc = ctx->sh_desc_fin;
235 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
236 ctx->ctx_len, true, ctrlpriv->era);
237 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
238 desc_bytes(desc), ctx->dir);
240 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
241 DUMP_PREFIX_ADDRESS, 16, 4, desc,
242 desc_bytes(desc), 1);
244 /* ahash_digest shared descriptor */
245 desc = ctx->sh_desc_digest;
246 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
247 ctx->ctx_len, false, ctrlpriv->era);
248 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
249 desc_bytes(desc), ctx->dir);
251 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
252 DUMP_PREFIX_ADDRESS, 16, 4, desc,
253 desc_bytes(desc), 1);
258 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
260 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
261 int digestsize = crypto_ahash_digestsize(ahash);
262 struct device *jrdev = ctx->jrdev;
265 /* shared descriptor for ahash_update */
266 desc = ctx->sh_desc_update;
267 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
268 ctx->ctx_len, ctx->ctx_len);
269 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
270 desc_bytes(desc), ctx->dir);
271 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
275 /* shared descriptor for ahash_{final,finup} */
276 desc = ctx->sh_desc_fin;
277 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
278 digestsize, ctx->ctx_len);
279 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
280 desc_bytes(desc), ctx->dir);
281 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
282 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
285 /* key is immediate data for INIT and INITFINAL states */
286 ctx->adata.key_virt = ctx->key;
288 /* shared descriptor for first invocation of ahash_update */
289 desc = ctx->sh_desc_update_first;
290 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293 desc_bytes(desc), ctx->dir);
294 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
295 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
296 desc_bytes(desc), 1);
298 /* shared descriptor for ahash_digest */
299 desc = ctx->sh_desc_digest;
300 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
301 digestsize, ctx->ctx_len);
302 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
303 desc_bytes(desc), ctx->dir);
304 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
310 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
312 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
313 int digestsize = crypto_ahash_digestsize(ahash);
314 struct device *jrdev = ctx->jrdev;
317 /* shared descriptor for ahash_update */
318 desc = ctx->sh_desc_update;
319 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
320 ctx->ctx_len, ctx->ctx_len);
321 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
322 desc_bytes(desc), ctx->dir);
323 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
324 DUMP_PREFIX_ADDRESS, 16, 4, desc,
325 desc_bytes(desc), 1);
327 /* shared descriptor for ahash_{final,finup} */
328 desc = ctx->sh_desc_fin;
329 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
330 digestsize, ctx->ctx_len);
331 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
332 desc_bytes(desc), ctx->dir);
333 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
334 DUMP_PREFIX_ADDRESS, 16, 4, desc,
335 desc_bytes(desc), 1);
337 /* shared descriptor for first invocation of ahash_update */
338 desc = ctx->sh_desc_update_first;
339 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
341 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
342 desc_bytes(desc), ctx->dir);
343 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
344 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
345 desc_bytes(desc), 1);
347 /* shared descriptor for ahash_digest */
348 desc = ctx->sh_desc_digest;
349 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
350 digestsize, ctx->ctx_len);
351 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
352 desc_bytes(desc), ctx->dir);
353 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
360 /* Digest hash size if it is too large */
361 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
364 struct device *jrdev = ctx->jrdev;
366 struct split_key_result result;
370 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
374 init_job_desc(desc, 0);
376 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377 if (dma_mapping_error(jrdev, key_dma)) {
378 dev_err(jrdev, "unable to map key memory\n");
383 /* Job descriptor to perform unkeyed hash on key_in */
384 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385 OP_ALG_AS_INITFINAL);
386 append_seq_in_ptr(desc, key_dma, *keylen, 0);
387 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389 append_seq_out_ptr(desc, key_dma, digestsize, 0);
390 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391 LDST_SRCDST_BYTE_CONTEXT);
393 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
400 init_completion(&result.completion);
402 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403 if (ret == -EINPROGRESS) {
405 wait_for_completion(&result.completion);
408 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409 DUMP_PREFIX_ADDRESS, 16, 4, key,
412 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
414 *keylen = digestsize;
421 static int ahash_setkey(struct crypto_ahash *ahash,
422 const u8 *key, unsigned int keylen)
424 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
425 struct device *jrdev = ctx->jrdev;
426 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427 int digestsize = crypto_ahash_digestsize(ahash);
428 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
430 u8 *hashed_key = NULL;
432 dev_dbg(jrdev, "keylen %d\n", keylen);
434 if (keylen > blocksize) {
435 unsigned int aligned_len =
436 ALIGN(keylen, dma_get_cache_alignment());
438 if (aligned_len < keylen)
441 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
444 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
451 * If DKP is supported, use it in the shared descriptor to generate
454 if (ctrlpriv->era >= 6) {
455 ctx->adata.key_inline = true;
456 ctx->adata.keylen = keylen;
457 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
460 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
463 memcpy(ctx->key, key, keylen);
466 * In case |user key| > |derived key|, using DKP<imm,imm>
467 * would result in invalid opcodes (last bytes of user key) in
468 * the resulting descriptor. Use DKP<ptr,imm> instead => both
469 * virtual and dma key addresses are needed.
471 if (keylen > ctx->adata.keylen_pad)
472 dma_sync_single_for_device(ctx->jrdev,
474 ctx->adata.keylen_pad,
477 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
478 keylen, CAAM_MAX_HASH_KEY_SIZE);
484 return ahash_set_sh_desc(ahash);
490 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
493 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
494 struct device *jrdev = ctx->jrdev;
496 if (keylen != AES_KEYSIZE_128)
499 memcpy(ctx->key, key, keylen);
500 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
502 ctx->adata.keylen = keylen;
504 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
505 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
507 return axcbc_set_sh_desc(ahash);
510 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
513 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
516 err = aes_check_keylen(keylen);
520 /* key is immediate data for all cmac shared descriptors */
521 ctx->adata.key_virt = key;
522 ctx->adata.keylen = keylen;
524 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
525 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
527 return acmac_set_sh_desc(ahash);
531 * ahash_edesc - s/w-extended ahash descriptor
532 * @sec4_sg_dma: physical mapped address of h/w link table
533 * @src_nents: number of segments in input scatterlist
534 * @sec4_sg_bytes: length of dma mapped sec4_sg space
535 * @bklog: stored to determine if the request needs backlog
536 * @hw_desc: the h/w job descriptor followed by any referenced link tables
537 * @sec4_sg: h/w link table
540 dma_addr_t sec4_sg_dma;
544 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
545 struct sec4_sg_entry sec4_sg[];
548 static inline void ahash_unmap(struct device *dev,
549 struct ahash_edesc *edesc,
550 struct ahash_request *req, int dst_len)
552 struct caam_hash_state *state = ahash_request_ctx_dma(req);
554 if (edesc->src_nents)
555 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
557 if (edesc->sec4_sg_bytes)
558 dma_unmap_single(dev, edesc->sec4_sg_dma,
559 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
561 if (state->buf_dma) {
562 dma_unmap_single(dev, state->buf_dma, state->buflen,
568 static inline void ahash_unmap_ctx(struct device *dev,
569 struct ahash_edesc *edesc,
570 struct ahash_request *req, int dst_len, u32 flag)
572 struct caam_hash_state *state = ahash_request_ctx_dma(req);
574 if (state->ctx_dma) {
575 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
578 ahash_unmap(dev, edesc, req, dst_len);
581 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
582 void *context, enum dma_data_direction dir)
584 struct ahash_request *req = context;
585 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
586 struct ahash_edesc *edesc;
587 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
588 int digestsize = crypto_ahash_digestsize(ahash);
589 struct caam_hash_state *state = ahash_request_ctx_dma(req);
590 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
594 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
596 edesc = state->edesc;
597 has_bklog = edesc->bklog;
600 ecode = caam_jr_strstatus(jrdev, err);
602 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
603 memcpy(req->result, state->caam_ctx, digestsize);
606 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
607 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611 * If no backlog flag, the completion of the request is done
612 * by CAAM, not crypto engine.
615 ahash_request_complete(req, ecode);
617 crypto_finalize_hash_request(jrp->engine, req, ecode);
620 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
623 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
626 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
629 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
632 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
633 void *context, enum dma_data_direction dir)
635 struct ahash_request *req = context;
636 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
637 struct ahash_edesc *edesc;
638 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
639 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
640 struct caam_hash_state *state = ahash_request_ctx_dma(req);
641 int digestsize = crypto_ahash_digestsize(ahash);
645 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
647 edesc = state->edesc;
648 has_bklog = edesc->bklog;
650 ecode = caam_jr_strstatus(jrdev, err);
652 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
655 scatterwalk_map_and_copy(state->buf, req->src,
656 req->nbytes - state->next_buflen,
657 state->next_buflen, 0);
658 state->buflen = state->next_buflen;
660 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
661 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
664 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
665 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
668 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
669 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
673 * If no backlog flag, the completion of the request is done
674 * by CAAM, not crypto engine.
677 ahash_request_complete(req, ecode);
679 crypto_finalize_hash_request(jrp->engine, req, ecode);
683 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
686 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
689 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
692 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
696 * Allocate an enhanced descriptor, which contains the hardware descriptor
697 * and space for hardware scatter table containing sg_num entries.
699 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
700 int sg_num, u32 *sh_desc,
701 dma_addr_t sh_desc_dma)
703 struct caam_hash_state *state = ahash_request_ctx_dma(req);
704 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
705 GFP_KERNEL : GFP_ATOMIC;
706 struct ahash_edesc *edesc;
708 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
712 state->edesc = edesc;
714 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
715 HDR_SHARE_DEFER | HDR_REVERSE);
720 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
721 struct ahash_edesc *edesc,
722 struct ahash_request *req, int nents,
723 unsigned int first_sg,
724 unsigned int first_bytes, size_t to_hash)
729 if (nents > 1 || first_sg) {
730 struct sec4_sg_entry *sg = edesc->sec4_sg;
731 unsigned int sgsize = sizeof(*sg) *
732 pad_sg_nents(first_sg + nents);
734 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
736 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
737 if (dma_mapping_error(ctx->jrdev, src_dma)) {
738 dev_err(ctx->jrdev, "unable to map S/G table\n");
742 edesc->sec4_sg_bytes = sgsize;
743 edesc->sec4_sg_dma = src_dma;
746 src_dma = sg_dma_address(req->src);
750 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
756 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
758 struct ahash_request *req = ahash_request_cast(areq);
759 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
760 struct caam_hash_state *state = ahash_request_ctx_dma(req);
761 struct device *jrdev = ctx->jrdev;
762 u32 *desc = state->edesc->hw_desc;
765 state->edesc->bklog = true;
767 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
769 if (ret == -ENOSPC && engine->retry_support)
772 if (ret != -EINPROGRESS) {
773 ahash_unmap(jrdev, state->edesc, req, 0);
782 static int ahash_enqueue_req(struct device *jrdev,
783 void (*cbk)(struct device *jrdev, u32 *desc,
784 u32 err, void *context),
785 struct ahash_request *req,
786 int dst_len, enum dma_data_direction dir)
788 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
789 struct caam_hash_state *state = ahash_request_ctx_dma(req);
790 struct ahash_edesc *edesc = state->edesc;
791 u32 *desc = edesc->hw_desc;
794 state->ahash_op_done = cbk;
797 * Only the backlog request are sent to crypto-engine since the others
798 * can be handled by CAAM, if free, especially since JR has up to 1024
799 * entries (more than the 10 entries from crypto-engine).
801 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
802 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
805 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
807 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
808 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
815 /* submit update job descriptor */
816 static int ahash_update_ctx(struct ahash_request *req)
818 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
819 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
820 struct caam_hash_state *state = ahash_request_ctx_dma(req);
821 struct device *jrdev = ctx->jrdev;
822 u8 *buf = state->buf;
823 int *buflen = &state->buflen;
824 int *next_buflen = &state->next_buflen;
825 int blocksize = crypto_ahash_blocksize(ahash);
826 int in_len = *buflen + req->nbytes, to_hash;
828 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
829 struct ahash_edesc *edesc;
832 *next_buflen = in_len & (blocksize - 1);
833 to_hash = in_len - *next_buflen;
836 * For XCBC and CMAC, if to_hash is multiple of block size,
837 * keep last block in internal buffer
839 if ((is_xcbc_aes(ctx->adata.algtype) ||
840 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
841 (*next_buflen == 0)) {
842 *next_buflen = blocksize;
843 to_hash -= blocksize;
848 int src_len = req->nbytes - *next_buflen;
850 src_nents = sg_nents_for_len(req->src, src_len);
852 dev_err(jrdev, "Invalid number of src SG.\n");
857 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
860 dev_err(jrdev, "unable to DMA map source\n");
867 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
868 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
869 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
872 * allocate space for base edesc and hw desc commands,
875 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
876 ctx->sh_desc_update_dma);
878 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
882 edesc->src_nents = src_nents;
883 edesc->sec4_sg_bytes = sec4_sg_bytes;
885 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
886 edesc->sec4_sg, DMA_BIDIRECTIONAL);
890 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
895 sg_to_sec4_sg_last(req->src, src_len,
896 edesc->sec4_sg + sec4_sg_src_index,
899 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
902 desc = edesc->hw_desc;
904 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
907 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
908 dev_err(jrdev, "unable to map S/G table\n");
913 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
916 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
918 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
919 DUMP_PREFIX_ADDRESS, 16, 4, desc,
920 desc_bytes(desc), 1);
922 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
923 ctx->ctx_len, DMA_BIDIRECTIONAL);
924 } else if (*next_buflen) {
925 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
927 *buflen = *next_buflen;
929 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
930 DUMP_PREFIX_ADDRESS, 16, 4, buf,
936 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
941 static int ahash_final_ctx(struct ahash_request *req)
943 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
944 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
945 struct caam_hash_state *state = ahash_request_ctx_dma(req);
946 struct device *jrdev = ctx->jrdev;
947 int buflen = state->buflen;
950 int digestsize = crypto_ahash_digestsize(ahash);
951 struct ahash_edesc *edesc;
954 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
955 sizeof(struct sec4_sg_entry);
957 /* allocate space for base edesc and hw desc commands, link tables */
958 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
959 ctx->sh_desc_fin_dma);
963 desc = edesc->hw_desc;
965 edesc->sec4_sg_bytes = sec4_sg_bytes;
967 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
968 edesc->sec4_sg, DMA_BIDIRECTIONAL);
972 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
976 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
978 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
979 sec4_sg_bytes, DMA_TO_DEVICE);
980 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
981 dev_err(jrdev, "unable to map S/G table\n");
986 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
988 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
990 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
991 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
994 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
995 digestsize, DMA_BIDIRECTIONAL);
997 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1002 static int ahash_finup_ctx(struct ahash_request *req)
1004 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1005 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1006 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1007 struct device *jrdev = ctx->jrdev;
1008 int buflen = state->buflen;
1010 int sec4_sg_src_index;
1011 int src_nents, mapped_nents;
1012 int digestsize = crypto_ahash_digestsize(ahash);
1013 struct ahash_edesc *edesc;
1016 src_nents = sg_nents_for_len(req->src, req->nbytes);
1017 if (src_nents < 0) {
1018 dev_err(jrdev, "Invalid number of src SG.\n");
1023 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1025 if (!mapped_nents) {
1026 dev_err(jrdev, "unable to DMA map source\n");
1033 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1035 /* allocate space for base edesc and hw desc commands, link tables */
1036 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1037 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1039 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1043 desc = edesc->hw_desc;
1045 edesc->src_nents = src_nents;
1047 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1048 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1052 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1056 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1057 sec4_sg_src_index, ctx->ctx_len + buflen,
1062 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1064 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1065 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1068 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1069 digestsize, DMA_BIDIRECTIONAL);
1071 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1076 static int ahash_digest(struct ahash_request *req)
1078 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1079 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1080 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1081 struct device *jrdev = ctx->jrdev;
1083 int digestsize = crypto_ahash_digestsize(ahash);
1084 int src_nents, mapped_nents;
1085 struct ahash_edesc *edesc;
1090 src_nents = sg_nents_for_len(req->src, req->nbytes);
1091 if (src_nents < 0) {
1092 dev_err(jrdev, "Invalid number of src SG.\n");
1097 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1099 if (!mapped_nents) {
1100 dev_err(jrdev, "unable to map source for DMA\n");
1107 /* allocate space for base edesc and hw desc commands, link tables */
1108 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1109 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1111 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1115 edesc->src_nents = src_nents;
1117 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1120 ahash_unmap(jrdev, edesc, req, digestsize);
1125 desc = edesc->hw_desc;
1127 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1129 ahash_unmap(jrdev, edesc, req, digestsize);
1134 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1135 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1138 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1142 /* submit ahash final if it the first job descriptor */
1143 static int ahash_final_no_ctx(struct ahash_request *req)
1145 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1146 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1147 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1148 struct device *jrdev = ctx->jrdev;
1149 u8 *buf = state->buf;
1150 int buflen = state->buflen;
1152 int digestsize = crypto_ahash_digestsize(ahash);
1153 struct ahash_edesc *edesc;
1156 /* allocate space for base edesc and hw desc commands, link tables */
1157 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1158 ctx->sh_desc_digest_dma);
1162 desc = edesc->hw_desc;
1165 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1167 if (dma_mapping_error(jrdev, state->buf_dma)) {
1168 dev_err(jrdev, "unable to map src\n");
1172 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1175 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1179 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1180 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1183 return ahash_enqueue_req(jrdev, ahash_done, req,
1184 digestsize, DMA_FROM_DEVICE);
1186 ahash_unmap(jrdev, edesc, req, digestsize);
1191 /* submit ahash update if it the first job descriptor after update */
1192 static int ahash_update_no_ctx(struct ahash_request *req)
1194 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1195 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1196 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1197 struct device *jrdev = ctx->jrdev;
1198 u8 *buf = state->buf;
1199 int *buflen = &state->buflen;
1200 int *next_buflen = &state->next_buflen;
1201 int blocksize = crypto_ahash_blocksize(ahash);
1202 int in_len = *buflen + req->nbytes, to_hash;
1203 int sec4_sg_bytes, src_nents, mapped_nents;
1204 struct ahash_edesc *edesc;
1208 *next_buflen = in_len & (blocksize - 1);
1209 to_hash = in_len - *next_buflen;
1212 * For XCBC and CMAC, if to_hash is multiple of block size,
1213 * keep last block in internal buffer
1215 if ((is_xcbc_aes(ctx->adata.algtype) ||
1216 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1217 (*next_buflen == 0)) {
1218 *next_buflen = blocksize;
1219 to_hash -= blocksize;
1224 int src_len = req->nbytes - *next_buflen;
1226 src_nents = sg_nents_for_len(req->src, src_len);
1227 if (src_nents < 0) {
1228 dev_err(jrdev, "Invalid number of src SG.\n");
1233 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1235 if (!mapped_nents) {
1236 dev_err(jrdev, "unable to DMA map source\n");
1243 pad_nents = pad_sg_nents(1 + mapped_nents);
1244 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1247 * allocate space for base edesc and hw desc commands,
1250 edesc = ahash_edesc_alloc(req, pad_nents,
1251 ctx->sh_desc_update_first,
1252 ctx->sh_desc_update_first_dma);
1254 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1258 edesc->src_nents = src_nents;
1259 edesc->sec4_sg_bytes = sec4_sg_bytes;
1261 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1265 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1267 desc = edesc->hw_desc;
1269 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1272 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1273 dev_err(jrdev, "unable to map S/G table\n");
1278 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1280 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1284 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1285 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1286 desc_bytes(desc), 1);
1288 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1289 ctx->ctx_len, DMA_TO_DEVICE);
1290 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1292 state->update = ahash_update_ctx;
1293 state->finup = ahash_finup_ctx;
1294 state->final = ahash_final_ctx;
1295 } else if (*next_buflen) {
1296 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1298 *buflen = *next_buflen;
1300 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1301 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1312 /* submit ahash finup if it the first job descriptor after update */
1313 static int ahash_finup_no_ctx(struct ahash_request *req)
1315 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1316 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1317 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1318 struct device *jrdev = ctx->jrdev;
1319 int buflen = state->buflen;
1321 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1322 int digestsize = crypto_ahash_digestsize(ahash);
1323 struct ahash_edesc *edesc;
1326 src_nents = sg_nents_for_len(req->src, req->nbytes);
1327 if (src_nents < 0) {
1328 dev_err(jrdev, "Invalid number of src SG.\n");
1333 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1335 if (!mapped_nents) {
1336 dev_err(jrdev, "unable to DMA map source\n");
1343 sec4_sg_src_index = 2;
1344 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1345 sizeof(struct sec4_sg_entry);
1347 /* allocate space for base edesc and hw desc commands, link tables */
1348 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1349 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1351 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1355 desc = edesc->hw_desc;
1357 edesc->src_nents = src_nents;
1358 edesc->sec4_sg_bytes = sec4_sg_bytes;
1360 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1364 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1367 dev_err(jrdev, "unable to map S/G table\n");
1371 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1375 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1376 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1379 return ahash_enqueue_req(jrdev, ahash_done, req,
1380 digestsize, DMA_FROM_DEVICE);
1382 ahash_unmap(jrdev, edesc, req, digestsize);
1388 /* submit first update job descriptor after init */
1389 static int ahash_update_first(struct ahash_request *req)
1391 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1392 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1393 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1394 struct device *jrdev = ctx->jrdev;
1395 u8 *buf = state->buf;
1396 int *buflen = &state->buflen;
1397 int *next_buflen = &state->next_buflen;
1399 int blocksize = crypto_ahash_blocksize(ahash);
1401 int src_nents, mapped_nents;
1402 struct ahash_edesc *edesc;
1405 *next_buflen = req->nbytes & (blocksize - 1);
1406 to_hash = req->nbytes - *next_buflen;
1409 * For XCBC and CMAC, if to_hash is multiple of block size,
1410 * keep last block in internal buffer
1412 if ((is_xcbc_aes(ctx->adata.algtype) ||
1413 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1414 (*next_buflen == 0)) {
1415 *next_buflen = blocksize;
1416 to_hash -= blocksize;
1420 src_nents = sg_nents_for_len(req->src,
1421 req->nbytes - *next_buflen);
1422 if (src_nents < 0) {
1423 dev_err(jrdev, "Invalid number of src SG.\n");
1428 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1430 if (!mapped_nents) {
1431 dev_err(jrdev, "unable to map source for DMA\n");
1439 * allocate space for base edesc and hw desc commands,
1442 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1444 ctx->sh_desc_update_first,
1445 ctx->sh_desc_update_first_dma);
1447 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1451 edesc->src_nents = src_nents;
1453 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1458 desc = edesc->hw_desc;
1460 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1464 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1465 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1466 desc_bytes(desc), 1);
1468 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1469 ctx->ctx_len, DMA_TO_DEVICE);
1470 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1472 state->update = ahash_update_ctx;
1473 state->finup = ahash_finup_ctx;
1474 state->final = ahash_final_ctx;
1475 } else if (*next_buflen) {
1476 state->update = ahash_update_no_ctx;
1477 state->finup = ahash_finup_no_ctx;
1478 state->final = ahash_final_no_ctx;
1479 scatterwalk_map_and_copy(buf, req->src, 0,
1481 *buflen = *next_buflen;
1483 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1484 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1490 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1495 static int ahash_finup_first(struct ahash_request *req)
1497 return ahash_digest(req);
1500 static int ahash_init(struct ahash_request *req)
1502 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1504 state->update = ahash_update_first;
1505 state->finup = ahash_finup_first;
1506 state->final = ahash_final_no_ctx;
1509 state->ctx_dma_len = 0;
1512 state->next_buflen = 0;
1517 static int ahash_update(struct ahash_request *req)
1519 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1521 return state->update(req);
1524 static int ahash_finup(struct ahash_request *req)
1526 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1528 return state->finup(req);
1531 static int ahash_final(struct ahash_request *req)
1533 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1535 return state->final(req);
1538 static int ahash_export(struct ahash_request *req, void *out)
1540 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1541 struct caam_export_state *export = out;
1542 u8 *buf = state->buf;
1543 int len = state->buflen;
1545 memcpy(export->buf, buf, len);
1546 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1547 export->buflen = len;
1548 export->update = state->update;
1549 export->final = state->final;
1550 export->finup = state->finup;
1555 static int ahash_import(struct ahash_request *req, const void *in)
1557 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1558 const struct caam_export_state *export = in;
1560 memset(state, 0, sizeof(*state));
1561 memcpy(state->buf, export->buf, export->buflen);
1562 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1563 state->buflen = export->buflen;
1564 state->update = export->update;
1565 state->final = export->final;
1566 state->finup = export->finup;
1571 struct caam_hash_template {
1572 char name[CRYPTO_MAX_ALG_NAME];
1573 char driver_name[CRYPTO_MAX_ALG_NAME];
1574 char hmac_name[CRYPTO_MAX_ALG_NAME];
1575 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1576 unsigned int blocksize;
1577 struct ahash_alg template_ahash;
1581 /* ahash descriptors */
1582 static struct caam_hash_template driver_hash[] = {
1585 .driver_name = "sha1-caam",
1586 .hmac_name = "hmac(sha1)",
1587 .hmac_driver_name = "hmac-sha1-caam",
1588 .blocksize = SHA1_BLOCK_SIZE,
1591 .update = ahash_update,
1592 .final = ahash_final,
1593 .finup = ahash_finup,
1594 .digest = ahash_digest,
1595 .export = ahash_export,
1596 .import = ahash_import,
1597 .setkey = ahash_setkey,
1599 .digestsize = SHA1_DIGEST_SIZE,
1600 .statesize = sizeof(struct caam_export_state),
1603 .alg_type = OP_ALG_ALGSEL_SHA1,
1606 .driver_name = "sha224-caam",
1607 .hmac_name = "hmac(sha224)",
1608 .hmac_driver_name = "hmac-sha224-caam",
1609 .blocksize = SHA224_BLOCK_SIZE,
1612 .update = ahash_update,
1613 .final = ahash_final,
1614 .finup = ahash_finup,
1615 .digest = ahash_digest,
1616 .export = ahash_export,
1617 .import = ahash_import,
1618 .setkey = ahash_setkey,
1620 .digestsize = SHA224_DIGEST_SIZE,
1621 .statesize = sizeof(struct caam_export_state),
1624 .alg_type = OP_ALG_ALGSEL_SHA224,
1627 .driver_name = "sha256-caam",
1628 .hmac_name = "hmac(sha256)",
1629 .hmac_driver_name = "hmac-sha256-caam",
1630 .blocksize = SHA256_BLOCK_SIZE,
1633 .update = ahash_update,
1634 .final = ahash_final,
1635 .finup = ahash_finup,
1636 .digest = ahash_digest,
1637 .export = ahash_export,
1638 .import = ahash_import,
1639 .setkey = ahash_setkey,
1641 .digestsize = SHA256_DIGEST_SIZE,
1642 .statesize = sizeof(struct caam_export_state),
1645 .alg_type = OP_ALG_ALGSEL_SHA256,
1648 .driver_name = "sha384-caam",
1649 .hmac_name = "hmac(sha384)",
1650 .hmac_driver_name = "hmac-sha384-caam",
1651 .blocksize = SHA384_BLOCK_SIZE,
1654 .update = ahash_update,
1655 .final = ahash_final,
1656 .finup = ahash_finup,
1657 .digest = ahash_digest,
1658 .export = ahash_export,
1659 .import = ahash_import,
1660 .setkey = ahash_setkey,
1662 .digestsize = SHA384_DIGEST_SIZE,
1663 .statesize = sizeof(struct caam_export_state),
1666 .alg_type = OP_ALG_ALGSEL_SHA384,
1669 .driver_name = "sha512-caam",
1670 .hmac_name = "hmac(sha512)",
1671 .hmac_driver_name = "hmac-sha512-caam",
1672 .blocksize = SHA512_BLOCK_SIZE,
1675 .update = ahash_update,
1676 .final = ahash_final,
1677 .finup = ahash_finup,
1678 .digest = ahash_digest,
1679 .export = ahash_export,
1680 .import = ahash_import,
1681 .setkey = ahash_setkey,
1683 .digestsize = SHA512_DIGEST_SIZE,
1684 .statesize = sizeof(struct caam_export_state),
1687 .alg_type = OP_ALG_ALGSEL_SHA512,
1690 .driver_name = "md5-caam",
1691 .hmac_name = "hmac(md5)",
1692 .hmac_driver_name = "hmac-md5-caam",
1693 .blocksize = MD5_BLOCK_WORDS * 4,
1696 .update = ahash_update,
1697 .final = ahash_final,
1698 .finup = ahash_finup,
1699 .digest = ahash_digest,
1700 .export = ahash_export,
1701 .import = ahash_import,
1702 .setkey = ahash_setkey,
1704 .digestsize = MD5_DIGEST_SIZE,
1705 .statesize = sizeof(struct caam_export_state),
1708 .alg_type = OP_ALG_ALGSEL_MD5,
1710 .hmac_name = "xcbc(aes)",
1711 .hmac_driver_name = "xcbc-aes-caam",
1712 .blocksize = AES_BLOCK_SIZE,
1715 .update = ahash_update,
1716 .final = ahash_final,
1717 .finup = ahash_finup,
1718 .digest = ahash_digest,
1719 .export = ahash_export,
1720 .import = ahash_import,
1721 .setkey = axcbc_setkey,
1723 .digestsize = AES_BLOCK_SIZE,
1724 .statesize = sizeof(struct caam_export_state),
1727 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1729 .hmac_name = "cmac(aes)",
1730 .hmac_driver_name = "cmac-aes-caam",
1731 .blocksize = AES_BLOCK_SIZE,
1734 .update = ahash_update,
1735 .final = ahash_final,
1736 .finup = ahash_finup,
1737 .digest = ahash_digest,
1738 .export = ahash_export,
1739 .import = ahash_import,
1740 .setkey = acmac_setkey,
1742 .digestsize = AES_BLOCK_SIZE,
1743 .statesize = sizeof(struct caam_export_state),
1746 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1750 struct caam_hash_alg {
1751 struct list_head entry;
1753 struct ahash_alg ahash_alg;
1756 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1758 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1759 struct crypto_alg *base = tfm->__crt_alg;
1760 struct hash_alg_common *halg =
1761 container_of(base, struct hash_alg_common, base);
1762 struct ahash_alg *alg =
1763 container_of(halg, struct ahash_alg, halg);
1764 struct caam_hash_alg *caam_hash =
1765 container_of(alg, struct caam_hash_alg, ahash_alg);
1766 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1767 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1768 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1769 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1771 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1773 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1774 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1776 dma_addr_t dma_addr;
1777 struct caam_drv_private *priv;
1780 * Get a Job ring from Job Ring driver to ensure in-order
1781 * crypto request processing per tfm
1783 ctx->jrdev = caam_jr_alloc();
1784 if (IS_ERR(ctx->jrdev)) {
1785 pr_err("Job Ring Device allocation for transform failed\n");
1786 return PTR_ERR(ctx->jrdev);
1789 priv = dev_get_drvdata(ctx->jrdev->parent);
1791 if (is_xcbc_aes(caam_hash->alg_type)) {
1792 ctx->dir = DMA_TO_DEVICE;
1793 ctx->key_dir = DMA_BIDIRECTIONAL;
1794 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1796 } else if (is_cmac_aes(caam_hash->alg_type)) {
1797 ctx->dir = DMA_TO_DEVICE;
1798 ctx->key_dir = DMA_NONE;
1799 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1802 if (priv->era >= 6) {
1803 ctx->dir = DMA_BIDIRECTIONAL;
1804 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1806 ctx->dir = DMA_TO_DEVICE;
1807 ctx->key_dir = DMA_NONE;
1809 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1810 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1811 OP_ALG_ALGSEL_SUBMASK) >>
1812 OP_ALG_ALGSEL_SHIFT];
1815 if (ctx->key_dir != DMA_NONE) {
1816 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1817 ARRAY_SIZE(ctx->key),
1819 DMA_ATTR_SKIP_CPU_SYNC);
1820 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1821 dev_err(ctx->jrdev, "unable to map key\n");
1822 caam_jr_free(ctx->jrdev);
1827 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1828 offsetof(struct caam_hash_ctx, key) -
1829 sh_desc_update_offset,
1830 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1831 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1832 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1834 if (ctx->key_dir != DMA_NONE)
1835 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1836 ARRAY_SIZE(ctx->key),
1838 DMA_ATTR_SKIP_CPU_SYNC);
1840 caam_jr_free(ctx->jrdev);
1844 ctx->sh_desc_update_dma = dma_addr;
1845 ctx->sh_desc_update_first_dma = dma_addr +
1846 offsetof(struct caam_hash_ctx,
1847 sh_desc_update_first) -
1848 sh_desc_update_offset;
1849 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1851 sh_desc_update_offset;
1852 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1854 sh_desc_update_offset;
1856 ctx->enginectx.op.do_one_request = ahash_do_one_req;
1858 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1861 * For keyed hash algorithms shared descriptors
1862 * will be created later in setkey() callback
1864 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1867 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1869 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1871 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1872 offsetof(struct caam_hash_ctx, key) -
1873 offsetof(struct caam_hash_ctx, sh_desc_update),
1874 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1875 if (ctx->key_dir != DMA_NONE)
1876 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1877 ARRAY_SIZE(ctx->key), ctx->key_dir,
1878 DMA_ATTR_SKIP_CPU_SYNC);
1879 caam_jr_free(ctx->jrdev);
1882 void caam_algapi_hash_exit(void)
1884 struct caam_hash_alg *t_alg, *n;
1886 if (!hash_list.next)
1889 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1890 crypto_unregister_ahash(&t_alg->ahash_alg);
1891 list_del(&t_alg->entry);
1896 static struct caam_hash_alg *
1897 caam_hash_alloc(struct caam_hash_template *template,
1900 struct caam_hash_alg *t_alg;
1901 struct ahash_alg *halg;
1902 struct crypto_alg *alg;
1904 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1906 return ERR_PTR(-ENOMEM);
1908 t_alg->ahash_alg = template->template_ahash;
1909 halg = &t_alg->ahash_alg;
1910 alg = &halg->halg.base;
1913 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1914 template->hmac_name);
1915 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 template->hmac_driver_name);
1918 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1920 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1921 template->driver_name);
1922 t_alg->ahash_alg.setkey = NULL;
1924 alg->cra_module = THIS_MODULE;
1925 alg->cra_init = caam_hash_cra_init;
1926 alg->cra_exit = caam_hash_cra_exit;
1927 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1928 alg->cra_priority = CAAM_CRA_PRIORITY;
1929 alg->cra_blocksize = template->blocksize;
1930 alg->cra_alignmask = 0;
1931 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1933 t_alg->alg_type = template->alg_type;
1938 int caam_algapi_hash_init(struct device *ctrldev)
1941 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1942 unsigned int md_limit = SHA512_DIGEST_SIZE;
1943 u32 md_inst, md_vid;
1946 * Register crypto algorithms the device supports. First, identify
1947 * presence and attributes of MD block.
1949 if (priv->era < 10) {
1950 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1952 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1953 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1954 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1955 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1957 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1959 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1960 md_inst = mdha & CHA_VER_NUM_MASK;
1964 * Skip registration of any hashing algorithms if MD block
1970 /* Limit digest size based on LP256 */
1971 if (md_vid == CHA_VER_VID_MD_LP256)
1972 md_limit = SHA256_DIGEST_SIZE;
1974 INIT_LIST_HEAD(&hash_list);
1976 /* register crypto algorithms the device supports */
1977 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1978 struct caam_hash_alg *t_alg;
1979 struct caam_hash_template *alg = driver_hash + i;
1981 /* If MD size is not supported by device, skip registration */
1982 if (is_mdha(alg->alg_type) &&
1983 alg->template_ahash.halg.digestsize > md_limit)
1986 /* register hmac version */
1987 t_alg = caam_hash_alloc(alg, true);
1988 if (IS_ERR(t_alg)) {
1989 err = PTR_ERR(t_alg);
1990 pr_warn("%s alg allocation failed\n",
1991 alg->hmac_driver_name);
1995 err = crypto_register_ahash(&t_alg->ahash_alg);
1997 pr_warn("%s alg registration failed: %d\n",
1998 t_alg->ahash_alg.halg.base.cra_driver_name,
2002 list_add_tail(&t_alg->entry, &hash_list);
2004 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2007 /* register unkeyed version */
2008 t_alg = caam_hash_alloc(alg, false);
2009 if (IS_ERR(t_alg)) {
2010 err = PTR_ERR(t_alg);
2011 pr_warn("%s alg allocation failed\n", alg->driver_name);
2015 err = crypto_register_ahash(&t_alg->ahash_alg);
2017 pr_warn("%s alg registration failed: %d\n",
2018 t_alg->ahash_alg.halg.base.cra_driver_name,
2022 list_add_tail(&t_alg->entry, &hash_list);