1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/kernel.h>
72 #define CAAM_CRA_PRIORITY 3000
74 /* max hash key is max split key size */
75 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
77 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
78 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
80 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
81 CAAM_MAX_HASH_KEY_SIZE)
82 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
84 /* caam context sizes for hashes: running digest + 8 */
85 #define HASH_MSG_LEN 8
86 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
88 static struct list_head hash_list;
90 /* ahash per-session context */
91 struct caam_hash_ctx {
92 struct crypto_engine_ctx enginectx;
93 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
96 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
98 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
99 dma_addr_t sh_desc_update_first_dma;
100 dma_addr_t sh_desc_fin_dma;
101 dma_addr_t sh_desc_digest_dma;
102 enum dma_data_direction dir;
103 enum dma_data_direction key_dir;
104 struct device *jrdev;
106 struct alginfo adata;
110 struct caam_hash_state {
114 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
117 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
118 int (*update)(struct ahash_request *req) ____cacheline_aligned;
119 int (*final)(struct ahash_request *req);
120 int (*finup)(struct ahash_request *req);
121 struct ahash_edesc *edesc;
122 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
126 struct caam_export_state {
127 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128 u8 caam_ctx[MAX_CTX_LEN];
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
135 static inline bool is_cmac_aes(u32 algtype)
137 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
138 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
140 /* Common job descriptor seq in/out ptr routines */
142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144 struct caam_hash_state *state,
147 state->ctx_dma_len = ctx_len;
148 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 ctx_len, DMA_FROM_DEVICE);
150 if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 dev_err(jrdev, "unable to map ctx\n");
156 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161 /* Map current buffer in state (if length > 0) and put it in link table */
162 static inline int buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
164 struct caam_hash_state *state)
166 int buflen = state->buflen;
171 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
173 if (dma_mapping_error(jrdev, state->buf_dma)) {
174 dev_err(jrdev, "unable to map buf\n");
179 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
184 /* Map state->caam_ctx, and add it to link table */
185 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
186 struct caam_hash_state *state, int ctx_len,
187 struct sec4_sg_entry *sec4_sg, u32 flag)
189 state->ctx_dma_len = ctx_len;
190 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
191 if (dma_mapping_error(jrdev, state->ctx_dma)) {
192 dev_err(jrdev, "unable to map ctx\n");
197 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
202 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
204 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
205 int digestsize = crypto_ahash_digestsize(ahash);
206 struct device *jrdev = ctx->jrdev;
207 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
210 ctx->adata.key_virt = ctx->key;
212 /* ahash_update shared descriptor */
213 desc = ctx->sh_desc_update;
214 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
215 ctx->ctx_len, true, ctrlpriv->era);
216 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
217 desc_bytes(desc), ctx->dir);
219 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
220 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
223 /* ahash_update_first shared descriptor */
224 desc = ctx->sh_desc_update_first;
225 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
226 ctx->ctx_len, false, ctrlpriv->era);
227 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
228 desc_bytes(desc), ctx->dir);
229 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
230 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
231 desc_bytes(desc), 1);
233 /* ahash_final shared descriptor */
234 desc = ctx->sh_desc_fin;
235 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
236 ctx->ctx_len, true, ctrlpriv->era);
237 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
238 desc_bytes(desc), ctx->dir);
240 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
241 DUMP_PREFIX_ADDRESS, 16, 4, desc,
242 desc_bytes(desc), 1);
244 /* ahash_digest shared descriptor */
245 desc = ctx->sh_desc_digest;
246 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
247 ctx->ctx_len, false, ctrlpriv->era);
248 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
249 desc_bytes(desc), ctx->dir);
251 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
252 DUMP_PREFIX_ADDRESS, 16, 4, desc,
253 desc_bytes(desc), 1);
258 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
260 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
261 int digestsize = crypto_ahash_digestsize(ahash);
262 struct device *jrdev = ctx->jrdev;
265 /* shared descriptor for ahash_update */
266 desc = ctx->sh_desc_update;
267 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
268 ctx->ctx_len, ctx->ctx_len);
269 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
270 desc_bytes(desc), ctx->dir);
271 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
275 /* shared descriptor for ahash_{final,finup} */
276 desc = ctx->sh_desc_fin;
277 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
278 digestsize, ctx->ctx_len);
279 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
280 desc_bytes(desc), ctx->dir);
281 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
282 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
285 /* key is immediate data for INIT and INITFINAL states */
286 ctx->adata.key_virt = ctx->key;
288 /* shared descriptor for first invocation of ahash_update */
289 desc = ctx->sh_desc_update_first;
290 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293 desc_bytes(desc), ctx->dir);
294 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
295 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
296 desc_bytes(desc), 1);
298 /* shared descriptor for ahash_digest */
299 desc = ctx->sh_desc_digest;
300 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
301 digestsize, ctx->ctx_len);
302 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
303 desc_bytes(desc), ctx->dir);
304 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
310 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
312 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
313 int digestsize = crypto_ahash_digestsize(ahash);
314 struct device *jrdev = ctx->jrdev;
317 /* shared descriptor for ahash_update */
318 desc = ctx->sh_desc_update;
319 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
320 ctx->ctx_len, ctx->ctx_len);
321 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
322 desc_bytes(desc), ctx->dir);
323 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
324 DUMP_PREFIX_ADDRESS, 16, 4, desc,
325 desc_bytes(desc), 1);
327 /* shared descriptor for ahash_{final,finup} */
328 desc = ctx->sh_desc_fin;
329 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
330 digestsize, ctx->ctx_len);
331 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
332 desc_bytes(desc), ctx->dir);
333 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
334 DUMP_PREFIX_ADDRESS, 16, 4, desc,
335 desc_bytes(desc), 1);
337 /* shared descriptor for first invocation of ahash_update */
338 desc = ctx->sh_desc_update_first;
339 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
341 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
342 desc_bytes(desc), ctx->dir);
343 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
344 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
345 desc_bytes(desc), 1);
347 /* shared descriptor for ahash_digest */
348 desc = ctx->sh_desc_digest;
349 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
350 digestsize, ctx->ctx_len);
351 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
352 desc_bytes(desc), ctx->dir);
353 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
360 /* Digest hash size if it is too large */
361 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
364 struct device *jrdev = ctx->jrdev;
366 struct split_key_result result;
370 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
372 dev_err(jrdev, "unable to allocate key input memory\n");
376 init_job_desc(desc, 0);
378 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
379 if (dma_mapping_error(jrdev, key_dma)) {
380 dev_err(jrdev, "unable to map key memory\n");
385 /* Job descriptor to perform unkeyed hash on key_in */
386 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
387 OP_ALG_AS_INITFINAL);
388 append_seq_in_ptr(desc, key_dma, *keylen, 0);
389 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
390 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
391 append_seq_out_ptr(desc, key_dma, digestsize, 0);
392 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
393 LDST_SRCDST_BYTE_CONTEXT);
395 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
396 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
397 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
398 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
402 init_completion(&result.completion);
404 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
405 if (ret == -EINPROGRESS) {
407 wait_for_completion(&result.completion);
410 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
411 DUMP_PREFIX_ADDRESS, 16, 4, key,
414 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
416 *keylen = digestsize;
423 static int ahash_setkey(struct crypto_ahash *ahash,
424 const u8 *key, unsigned int keylen)
426 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
427 struct device *jrdev = ctx->jrdev;
428 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
429 int digestsize = crypto_ahash_digestsize(ahash);
430 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
432 u8 *hashed_key = NULL;
434 dev_dbg(jrdev, "keylen %d\n", keylen);
436 if (keylen > blocksize) {
437 unsigned int aligned_len =
438 ALIGN(keylen, dma_get_cache_alignment());
440 if (aligned_len < keylen)
443 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
446 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
453 * If DKP is supported, use it in the shared descriptor to generate
456 if (ctrlpriv->era >= 6) {
457 ctx->adata.key_inline = true;
458 ctx->adata.keylen = keylen;
459 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
462 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
465 memcpy(ctx->key, key, keylen);
468 * In case |user key| > |derived key|, using DKP<imm,imm>
469 * would result in invalid opcodes (last bytes of user key) in
470 * the resulting descriptor. Use DKP<ptr,imm> instead => both
471 * virtual and dma key addresses are needed.
473 if (keylen > ctx->adata.keylen_pad)
474 dma_sync_single_for_device(ctx->jrdev,
476 ctx->adata.keylen_pad,
479 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
480 keylen, CAAM_MAX_HASH_KEY_SIZE);
486 return ahash_set_sh_desc(ahash);
492 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
495 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
496 struct device *jrdev = ctx->jrdev;
498 if (keylen != AES_KEYSIZE_128)
501 memcpy(ctx->key, key, keylen);
502 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
504 ctx->adata.keylen = keylen;
506 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
507 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
509 return axcbc_set_sh_desc(ahash);
512 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
515 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
518 err = aes_check_keylen(keylen);
522 /* key is immediate data for all cmac shared descriptors */
523 ctx->adata.key_virt = key;
524 ctx->adata.keylen = keylen;
526 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
527 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
529 return acmac_set_sh_desc(ahash);
533 * ahash_edesc - s/w-extended ahash descriptor
534 * @sec4_sg_dma: physical mapped address of h/w link table
535 * @src_nents: number of segments in input scatterlist
536 * @sec4_sg_bytes: length of dma mapped sec4_sg space
537 * @bklog: stored to determine if the request needs backlog
538 * @hw_desc: the h/w job descriptor followed by any referenced link tables
539 * @sec4_sg: h/w link table
542 dma_addr_t sec4_sg_dma;
546 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
547 struct sec4_sg_entry sec4_sg[];
550 static inline void ahash_unmap(struct device *dev,
551 struct ahash_edesc *edesc,
552 struct ahash_request *req, int dst_len)
554 struct caam_hash_state *state = ahash_request_ctx_dma(req);
556 if (edesc->src_nents)
557 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
559 if (edesc->sec4_sg_bytes)
560 dma_unmap_single(dev, edesc->sec4_sg_dma,
561 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
563 if (state->buf_dma) {
564 dma_unmap_single(dev, state->buf_dma, state->buflen,
570 static inline void ahash_unmap_ctx(struct device *dev,
571 struct ahash_edesc *edesc,
572 struct ahash_request *req, int dst_len, u32 flag)
574 struct caam_hash_state *state = ahash_request_ctx_dma(req);
576 if (state->ctx_dma) {
577 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
580 ahash_unmap(dev, edesc, req, dst_len);
583 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
584 void *context, enum dma_data_direction dir)
586 struct ahash_request *req = context;
587 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
588 struct ahash_edesc *edesc;
589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
590 int digestsize = crypto_ahash_digestsize(ahash);
591 struct caam_hash_state *state = ahash_request_ctx_dma(req);
592 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
596 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
598 edesc = state->edesc;
599 has_bklog = edesc->bklog;
602 ecode = caam_jr_strstatus(jrdev, err);
604 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
605 memcpy(req->result, state->caam_ctx, digestsize);
608 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
613 * If no backlog flag, the completion of the request is done
614 * by CAAM, not crypto engine.
617 ahash_request_complete(req, ecode);
619 crypto_finalize_hash_request(jrp->engine, req, ecode);
622 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
625 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
628 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
631 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
634 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
635 void *context, enum dma_data_direction dir)
637 struct ahash_request *req = context;
638 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
639 struct ahash_edesc *edesc;
640 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
641 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
642 struct caam_hash_state *state = ahash_request_ctx_dma(req);
643 int digestsize = crypto_ahash_digestsize(ahash);
647 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649 edesc = state->edesc;
650 has_bklog = edesc->bklog;
652 ecode = caam_jr_strstatus(jrdev, err);
654 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
657 scatterwalk_map_and_copy(state->buf, req->src,
658 req->nbytes - state->next_buflen,
659 state->next_buflen, 0);
660 state->buflen = state->next_buflen;
662 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
663 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
666 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
667 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
670 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
671 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
675 * If no backlog flag, the completion of the request is done
676 * by CAAM, not crypto engine.
679 ahash_request_complete(req, ecode);
681 crypto_finalize_hash_request(jrp->engine, req, ecode);
685 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
688 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
691 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
694 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
698 * Allocate an enhanced descriptor, which contains the hardware descriptor
699 * and space for hardware scatter table containing sg_num entries.
701 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
702 int sg_num, u32 *sh_desc,
703 dma_addr_t sh_desc_dma)
705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
707 struct caam_hash_state *state = ahash_request_ctx_dma(req);
708 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
709 GFP_KERNEL : GFP_ATOMIC;
710 struct ahash_edesc *edesc;
712 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
714 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
718 state->edesc = edesc;
720 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
721 HDR_SHARE_DEFER | HDR_REVERSE);
726 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
727 struct ahash_edesc *edesc,
728 struct ahash_request *req, int nents,
729 unsigned int first_sg,
730 unsigned int first_bytes, size_t to_hash)
735 if (nents > 1 || first_sg) {
736 struct sec4_sg_entry *sg = edesc->sec4_sg;
737 unsigned int sgsize = sizeof(*sg) *
738 pad_sg_nents(first_sg + nents);
740 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
742 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
743 if (dma_mapping_error(ctx->jrdev, src_dma)) {
744 dev_err(ctx->jrdev, "unable to map S/G table\n");
748 edesc->sec4_sg_bytes = sgsize;
749 edesc->sec4_sg_dma = src_dma;
752 src_dma = sg_dma_address(req->src);
756 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
762 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
764 struct ahash_request *req = ahash_request_cast(areq);
765 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
766 struct caam_hash_state *state = ahash_request_ctx_dma(req);
767 struct device *jrdev = ctx->jrdev;
768 u32 *desc = state->edesc->hw_desc;
771 state->edesc->bklog = true;
773 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
775 if (ret == -ENOSPC && engine->retry_support)
778 if (ret != -EINPROGRESS) {
779 ahash_unmap(jrdev, state->edesc, req, 0);
788 static int ahash_enqueue_req(struct device *jrdev,
789 void (*cbk)(struct device *jrdev, u32 *desc,
790 u32 err, void *context),
791 struct ahash_request *req,
792 int dst_len, enum dma_data_direction dir)
794 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
795 struct caam_hash_state *state = ahash_request_ctx_dma(req);
796 struct ahash_edesc *edesc = state->edesc;
797 u32 *desc = edesc->hw_desc;
800 state->ahash_op_done = cbk;
803 * Only the backlog request are sent to crypto-engine since the others
804 * can be handled by CAAM, if free, especially since JR has up to 1024
805 * entries (more than the 10 entries from crypto-engine).
807 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
808 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
811 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
813 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
814 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
821 /* submit update job descriptor */
822 static int ahash_update_ctx(struct ahash_request *req)
824 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
825 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
826 struct caam_hash_state *state = ahash_request_ctx_dma(req);
827 struct device *jrdev = ctx->jrdev;
828 u8 *buf = state->buf;
829 int *buflen = &state->buflen;
830 int *next_buflen = &state->next_buflen;
831 int blocksize = crypto_ahash_blocksize(ahash);
832 int in_len = *buflen + req->nbytes, to_hash;
834 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
835 struct ahash_edesc *edesc;
838 *next_buflen = in_len & (blocksize - 1);
839 to_hash = in_len - *next_buflen;
842 * For XCBC and CMAC, if to_hash is multiple of block size,
843 * keep last block in internal buffer
845 if ((is_xcbc_aes(ctx->adata.algtype) ||
846 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
847 (*next_buflen == 0)) {
848 *next_buflen = blocksize;
849 to_hash -= blocksize;
854 int src_len = req->nbytes - *next_buflen;
856 src_nents = sg_nents_for_len(req->src, src_len);
858 dev_err(jrdev, "Invalid number of src SG.\n");
863 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
866 dev_err(jrdev, "unable to DMA map source\n");
873 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
874 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
875 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
878 * allocate space for base edesc and hw desc commands,
881 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
882 ctx->sh_desc_update_dma);
884 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
888 edesc->src_nents = src_nents;
889 edesc->sec4_sg_bytes = sec4_sg_bytes;
891 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
892 edesc->sec4_sg, DMA_BIDIRECTIONAL);
896 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
901 sg_to_sec4_sg_last(req->src, src_len,
902 edesc->sec4_sg + sec4_sg_src_index,
905 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
908 desc = edesc->hw_desc;
910 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
913 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
914 dev_err(jrdev, "unable to map S/G table\n");
919 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
922 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
924 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
925 DUMP_PREFIX_ADDRESS, 16, 4, desc,
926 desc_bytes(desc), 1);
928 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
929 ctx->ctx_len, DMA_BIDIRECTIONAL);
930 } else if (*next_buflen) {
931 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
933 *buflen = *next_buflen;
935 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
936 DUMP_PREFIX_ADDRESS, 16, 4, buf,
942 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
947 static int ahash_final_ctx(struct ahash_request *req)
949 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
950 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
951 struct caam_hash_state *state = ahash_request_ctx_dma(req);
952 struct device *jrdev = ctx->jrdev;
953 int buflen = state->buflen;
956 int digestsize = crypto_ahash_digestsize(ahash);
957 struct ahash_edesc *edesc;
960 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
961 sizeof(struct sec4_sg_entry);
963 /* allocate space for base edesc and hw desc commands, link tables */
964 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
965 ctx->sh_desc_fin_dma);
969 desc = edesc->hw_desc;
971 edesc->sec4_sg_bytes = sec4_sg_bytes;
973 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
974 edesc->sec4_sg, DMA_BIDIRECTIONAL);
978 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
982 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
984 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
985 sec4_sg_bytes, DMA_TO_DEVICE);
986 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
987 dev_err(jrdev, "unable to map S/G table\n");
992 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
994 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
996 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
997 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1000 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1001 digestsize, DMA_BIDIRECTIONAL);
1003 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1008 static int ahash_finup_ctx(struct ahash_request *req)
1010 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1011 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1012 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1013 struct device *jrdev = ctx->jrdev;
1014 int buflen = state->buflen;
1016 int sec4_sg_src_index;
1017 int src_nents, mapped_nents;
1018 int digestsize = crypto_ahash_digestsize(ahash);
1019 struct ahash_edesc *edesc;
1022 src_nents = sg_nents_for_len(req->src, req->nbytes);
1023 if (src_nents < 0) {
1024 dev_err(jrdev, "Invalid number of src SG.\n");
1029 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1031 if (!mapped_nents) {
1032 dev_err(jrdev, "unable to DMA map source\n");
1039 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1041 /* allocate space for base edesc and hw desc commands, link tables */
1042 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1043 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1045 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1049 desc = edesc->hw_desc;
1051 edesc->src_nents = src_nents;
1053 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1054 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1058 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1062 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1063 sec4_sg_src_index, ctx->ctx_len + buflen,
1068 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1070 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1071 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1074 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1075 digestsize, DMA_BIDIRECTIONAL);
1077 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1082 static int ahash_digest(struct ahash_request *req)
1084 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1085 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1086 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1087 struct device *jrdev = ctx->jrdev;
1089 int digestsize = crypto_ahash_digestsize(ahash);
1090 int src_nents, mapped_nents;
1091 struct ahash_edesc *edesc;
1096 src_nents = sg_nents_for_len(req->src, req->nbytes);
1097 if (src_nents < 0) {
1098 dev_err(jrdev, "Invalid number of src SG.\n");
1103 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1105 if (!mapped_nents) {
1106 dev_err(jrdev, "unable to map source for DMA\n");
1113 /* allocate space for base edesc and hw desc commands, link tables */
1114 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1115 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1117 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1121 edesc->src_nents = src_nents;
1123 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1126 ahash_unmap(jrdev, edesc, req, digestsize);
1131 desc = edesc->hw_desc;
1133 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1135 ahash_unmap(jrdev, edesc, req, digestsize);
1140 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1141 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1144 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1148 /* submit ahash final if it the first job descriptor */
1149 static int ahash_final_no_ctx(struct ahash_request *req)
1151 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1152 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1153 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1154 struct device *jrdev = ctx->jrdev;
1155 u8 *buf = state->buf;
1156 int buflen = state->buflen;
1158 int digestsize = crypto_ahash_digestsize(ahash);
1159 struct ahash_edesc *edesc;
1162 /* allocate space for base edesc and hw desc commands, link tables */
1163 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1164 ctx->sh_desc_digest_dma);
1168 desc = edesc->hw_desc;
1171 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1173 if (dma_mapping_error(jrdev, state->buf_dma)) {
1174 dev_err(jrdev, "unable to map src\n");
1178 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1181 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1185 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1186 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1189 return ahash_enqueue_req(jrdev, ahash_done, req,
1190 digestsize, DMA_FROM_DEVICE);
1192 ahash_unmap(jrdev, edesc, req, digestsize);
1197 /* submit ahash update if it the first job descriptor after update */
1198 static int ahash_update_no_ctx(struct ahash_request *req)
1200 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1201 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1202 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1203 struct device *jrdev = ctx->jrdev;
1204 u8 *buf = state->buf;
1205 int *buflen = &state->buflen;
1206 int *next_buflen = &state->next_buflen;
1207 int blocksize = crypto_ahash_blocksize(ahash);
1208 int in_len = *buflen + req->nbytes, to_hash;
1209 int sec4_sg_bytes, src_nents, mapped_nents;
1210 struct ahash_edesc *edesc;
1214 *next_buflen = in_len & (blocksize - 1);
1215 to_hash = in_len - *next_buflen;
1218 * For XCBC and CMAC, if to_hash is multiple of block size,
1219 * keep last block in internal buffer
1221 if ((is_xcbc_aes(ctx->adata.algtype) ||
1222 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1223 (*next_buflen == 0)) {
1224 *next_buflen = blocksize;
1225 to_hash -= blocksize;
1230 int src_len = req->nbytes - *next_buflen;
1232 src_nents = sg_nents_for_len(req->src, src_len);
1233 if (src_nents < 0) {
1234 dev_err(jrdev, "Invalid number of src SG.\n");
1239 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1241 if (!mapped_nents) {
1242 dev_err(jrdev, "unable to DMA map source\n");
1249 pad_nents = pad_sg_nents(1 + mapped_nents);
1250 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1253 * allocate space for base edesc and hw desc commands,
1256 edesc = ahash_edesc_alloc(req, pad_nents,
1257 ctx->sh_desc_update_first,
1258 ctx->sh_desc_update_first_dma);
1260 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1264 edesc->src_nents = src_nents;
1265 edesc->sec4_sg_bytes = sec4_sg_bytes;
1267 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1271 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1273 desc = edesc->hw_desc;
1275 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1278 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1279 dev_err(jrdev, "unable to map S/G table\n");
1284 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1286 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1290 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1291 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1292 desc_bytes(desc), 1);
1294 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1295 ctx->ctx_len, DMA_TO_DEVICE);
1296 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1298 state->update = ahash_update_ctx;
1299 state->finup = ahash_finup_ctx;
1300 state->final = ahash_final_ctx;
1301 } else if (*next_buflen) {
1302 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1304 *buflen = *next_buflen;
1306 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1307 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1313 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1318 /* submit ahash finup if it the first job descriptor after update */
1319 static int ahash_finup_no_ctx(struct ahash_request *req)
1321 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1322 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1323 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1324 struct device *jrdev = ctx->jrdev;
1325 int buflen = state->buflen;
1327 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1328 int digestsize = crypto_ahash_digestsize(ahash);
1329 struct ahash_edesc *edesc;
1332 src_nents = sg_nents_for_len(req->src, req->nbytes);
1333 if (src_nents < 0) {
1334 dev_err(jrdev, "Invalid number of src SG.\n");
1339 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1341 if (!mapped_nents) {
1342 dev_err(jrdev, "unable to DMA map source\n");
1349 sec4_sg_src_index = 2;
1350 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1351 sizeof(struct sec4_sg_entry);
1353 /* allocate space for base edesc and hw desc commands, link tables */
1354 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1355 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1357 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1361 desc = edesc->hw_desc;
1363 edesc->src_nents = src_nents;
1364 edesc->sec4_sg_bytes = sec4_sg_bytes;
1366 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1370 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1373 dev_err(jrdev, "unable to map S/G table\n");
1377 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1381 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1382 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1385 return ahash_enqueue_req(jrdev, ahash_done, req,
1386 digestsize, DMA_FROM_DEVICE);
1388 ahash_unmap(jrdev, edesc, req, digestsize);
1394 /* submit first update job descriptor after init */
1395 static int ahash_update_first(struct ahash_request *req)
1397 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1398 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1399 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1400 struct device *jrdev = ctx->jrdev;
1401 u8 *buf = state->buf;
1402 int *buflen = &state->buflen;
1403 int *next_buflen = &state->next_buflen;
1405 int blocksize = crypto_ahash_blocksize(ahash);
1407 int src_nents, mapped_nents;
1408 struct ahash_edesc *edesc;
1411 *next_buflen = req->nbytes & (blocksize - 1);
1412 to_hash = req->nbytes - *next_buflen;
1415 * For XCBC and CMAC, if to_hash is multiple of block size,
1416 * keep last block in internal buffer
1418 if ((is_xcbc_aes(ctx->adata.algtype) ||
1419 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1420 (*next_buflen == 0)) {
1421 *next_buflen = blocksize;
1422 to_hash -= blocksize;
1426 src_nents = sg_nents_for_len(req->src,
1427 req->nbytes - *next_buflen);
1428 if (src_nents < 0) {
1429 dev_err(jrdev, "Invalid number of src SG.\n");
1434 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1436 if (!mapped_nents) {
1437 dev_err(jrdev, "unable to map source for DMA\n");
1445 * allocate space for base edesc and hw desc commands,
1448 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1450 ctx->sh_desc_update_first,
1451 ctx->sh_desc_update_first_dma);
1453 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1457 edesc->src_nents = src_nents;
1459 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1464 desc = edesc->hw_desc;
1466 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1470 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1471 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1472 desc_bytes(desc), 1);
1474 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1475 ctx->ctx_len, DMA_TO_DEVICE);
1476 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1478 state->update = ahash_update_ctx;
1479 state->finup = ahash_finup_ctx;
1480 state->final = ahash_final_ctx;
1481 } else if (*next_buflen) {
1482 state->update = ahash_update_no_ctx;
1483 state->finup = ahash_finup_no_ctx;
1484 state->final = ahash_final_no_ctx;
1485 scatterwalk_map_and_copy(buf, req->src, 0,
1487 *buflen = *next_buflen;
1489 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1490 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1496 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1501 static int ahash_finup_first(struct ahash_request *req)
1503 return ahash_digest(req);
1506 static int ahash_init(struct ahash_request *req)
1508 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1510 state->update = ahash_update_first;
1511 state->finup = ahash_finup_first;
1512 state->final = ahash_final_no_ctx;
1515 state->ctx_dma_len = 0;
1518 state->next_buflen = 0;
1523 static int ahash_update(struct ahash_request *req)
1525 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1527 return state->update(req);
1530 static int ahash_finup(struct ahash_request *req)
1532 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1534 return state->finup(req);
1537 static int ahash_final(struct ahash_request *req)
1539 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1541 return state->final(req);
1544 static int ahash_export(struct ahash_request *req, void *out)
1546 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1547 struct caam_export_state *export = out;
1548 u8 *buf = state->buf;
1549 int len = state->buflen;
1551 memcpy(export->buf, buf, len);
1552 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1553 export->buflen = len;
1554 export->update = state->update;
1555 export->final = state->final;
1556 export->finup = state->finup;
1561 static int ahash_import(struct ahash_request *req, const void *in)
1563 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1564 const struct caam_export_state *export = in;
1566 memset(state, 0, sizeof(*state));
1567 memcpy(state->buf, export->buf, export->buflen);
1568 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1569 state->buflen = export->buflen;
1570 state->update = export->update;
1571 state->final = export->final;
1572 state->finup = export->finup;
1577 struct caam_hash_template {
1578 char name[CRYPTO_MAX_ALG_NAME];
1579 char driver_name[CRYPTO_MAX_ALG_NAME];
1580 char hmac_name[CRYPTO_MAX_ALG_NAME];
1581 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1582 unsigned int blocksize;
1583 struct ahash_alg template_ahash;
1587 /* ahash descriptors */
1588 static struct caam_hash_template driver_hash[] = {
1591 .driver_name = "sha1-caam",
1592 .hmac_name = "hmac(sha1)",
1593 .hmac_driver_name = "hmac-sha1-caam",
1594 .blocksize = SHA1_BLOCK_SIZE,
1597 .update = ahash_update,
1598 .final = ahash_final,
1599 .finup = ahash_finup,
1600 .digest = ahash_digest,
1601 .export = ahash_export,
1602 .import = ahash_import,
1603 .setkey = ahash_setkey,
1605 .digestsize = SHA1_DIGEST_SIZE,
1606 .statesize = sizeof(struct caam_export_state),
1609 .alg_type = OP_ALG_ALGSEL_SHA1,
1612 .driver_name = "sha224-caam",
1613 .hmac_name = "hmac(sha224)",
1614 .hmac_driver_name = "hmac-sha224-caam",
1615 .blocksize = SHA224_BLOCK_SIZE,
1618 .update = ahash_update,
1619 .final = ahash_final,
1620 .finup = ahash_finup,
1621 .digest = ahash_digest,
1622 .export = ahash_export,
1623 .import = ahash_import,
1624 .setkey = ahash_setkey,
1626 .digestsize = SHA224_DIGEST_SIZE,
1627 .statesize = sizeof(struct caam_export_state),
1630 .alg_type = OP_ALG_ALGSEL_SHA224,
1633 .driver_name = "sha256-caam",
1634 .hmac_name = "hmac(sha256)",
1635 .hmac_driver_name = "hmac-sha256-caam",
1636 .blocksize = SHA256_BLOCK_SIZE,
1639 .update = ahash_update,
1640 .final = ahash_final,
1641 .finup = ahash_finup,
1642 .digest = ahash_digest,
1643 .export = ahash_export,
1644 .import = ahash_import,
1645 .setkey = ahash_setkey,
1647 .digestsize = SHA256_DIGEST_SIZE,
1648 .statesize = sizeof(struct caam_export_state),
1651 .alg_type = OP_ALG_ALGSEL_SHA256,
1654 .driver_name = "sha384-caam",
1655 .hmac_name = "hmac(sha384)",
1656 .hmac_driver_name = "hmac-sha384-caam",
1657 .blocksize = SHA384_BLOCK_SIZE,
1660 .update = ahash_update,
1661 .final = ahash_final,
1662 .finup = ahash_finup,
1663 .digest = ahash_digest,
1664 .export = ahash_export,
1665 .import = ahash_import,
1666 .setkey = ahash_setkey,
1668 .digestsize = SHA384_DIGEST_SIZE,
1669 .statesize = sizeof(struct caam_export_state),
1672 .alg_type = OP_ALG_ALGSEL_SHA384,
1675 .driver_name = "sha512-caam",
1676 .hmac_name = "hmac(sha512)",
1677 .hmac_driver_name = "hmac-sha512-caam",
1678 .blocksize = SHA512_BLOCK_SIZE,
1681 .update = ahash_update,
1682 .final = ahash_final,
1683 .finup = ahash_finup,
1684 .digest = ahash_digest,
1685 .export = ahash_export,
1686 .import = ahash_import,
1687 .setkey = ahash_setkey,
1689 .digestsize = SHA512_DIGEST_SIZE,
1690 .statesize = sizeof(struct caam_export_state),
1693 .alg_type = OP_ALG_ALGSEL_SHA512,
1696 .driver_name = "md5-caam",
1697 .hmac_name = "hmac(md5)",
1698 .hmac_driver_name = "hmac-md5-caam",
1699 .blocksize = MD5_BLOCK_WORDS * 4,
1702 .update = ahash_update,
1703 .final = ahash_final,
1704 .finup = ahash_finup,
1705 .digest = ahash_digest,
1706 .export = ahash_export,
1707 .import = ahash_import,
1708 .setkey = ahash_setkey,
1710 .digestsize = MD5_DIGEST_SIZE,
1711 .statesize = sizeof(struct caam_export_state),
1714 .alg_type = OP_ALG_ALGSEL_MD5,
1716 .hmac_name = "xcbc(aes)",
1717 .hmac_driver_name = "xcbc-aes-caam",
1718 .blocksize = AES_BLOCK_SIZE,
1721 .update = ahash_update,
1722 .final = ahash_final,
1723 .finup = ahash_finup,
1724 .digest = ahash_digest,
1725 .export = ahash_export,
1726 .import = ahash_import,
1727 .setkey = axcbc_setkey,
1729 .digestsize = AES_BLOCK_SIZE,
1730 .statesize = sizeof(struct caam_export_state),
1733 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1735 .hmac_name = "cmac(aes)",
1736 .hmac_driver_name = "cmac-aes-caam",
1737 .blocksize = AES_BLOCK_SIZE,
1740 .update = ahash_update,
1741 .final = ahash_final,
1742 .finup = ahash_finup,
1743 .digest = ahash_digest,
1744 .export = ahash_export,
1745 .import = ahash_import,
1746 .setkey = acmac_setkey,
1748 .digestsize = AES_BLOCK_SIZE,
1749 .statesize = sizeof(struct caam_export_state),
1752 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1756 struct caam_hash_alg {
1757 struct list_head entry;
1759 struct ahash_alg ahash_alg;
1762 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1764 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1765 struct crypto_alg *base = tfm->__crt_alg;
1766 struct hash_alg_common *halg =
1767 container_of(base, struct hash_alg_common, base);
1768 struct ahash_alg *alg =
1769 container_of(halg, struct ahash_alg, halg);
1770 struct caam_hash_alg *caam_hash =
1771 container_of(alg, struct caam_hash_alg, ahash_alg);
1772 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1773 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1774 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1775 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1777 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1779 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1780 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1782 dma_addr_t dma_addr;
1783 struct caam_drv_private *priv;
1786 * Get a Job ring from Job Ring driver to ensure in-order
1787 * crypto request processing per tfm
1789 ctx->jrdev = caam_jr_alloc();
1790 if (IS_ERR(ctx->jrdev)) {
1791 pr_err("Job Ring Device allocation for transform failed\n");
1792 return PTR_ERR(ctx->jrdev);
1795 priv = dev_get_drvdata(ctx->jrdev->parent);
1797 if (is_xcbc_aes(caam_hash->alg_type)) {
1798 ctx->dir = DMA_TO_DEVICE;
1799 ctx->key_dir = DMA_BIDIRECTIONAL;
1800 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1802 } else if (is_cmac_aes(caam_hash->alg_type)) {
1803 ctx->dir = DMA_TO_DEVICE;
1804 ctx->key_dir = DMA_NONE;
1805 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1808 if (priv->era >= 6) {
1809 ctx->dir = DMA_BIDIRECTIONAL;
1810 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1812 ctx->dir = DMA_TO_DEVICE;
1813 ctx->key_dir = DMA_NONE;
1815 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1816 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1817 OP_ALG_ALGSEL_SUBMASK) >>
1818 OP_ALG_ALGSEL_SHIFT];
1821 if (ctx->key_dir != DMA_NONE) {
1822 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1823 ARRAY_SIZE(ctx->key),
1825 DMA_ATTR_SKIP_CPU_SYNC);
1826 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1827 dev_err(ctx->jrdev, "unable to map key\n");
1828 caam_jr_free(ctx->jrdev);
1833 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1834 offsetof(struct caam_hash_ctx, key) -
1835 sh_desc_update_offset,
1836 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1837 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1838 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1840 if (ctx->key_dir != DMA_NONE)
1841 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1842 ARRAY_SIZE(ctx->key),
1844 DMA_ATTR_SKIP_CPU_SYNC);
1846 caam_jr_free(ctx->jrdev);
1850 ctx->sh_desc_update_dma = dma_addr;
1851 ctx->sh_desc_update_first_dma = dma_addr +
1852 offsetof(struct caam_hash_ctx,
1853 sh_desc_update_first) -
1854 sh_desc_update_offset;
1855 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1857 sh_desc_update_offset;
1858 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1860 sh_desc_update_offset;
1862 ctx->enginectx.op.do_one_request = ahash_do_one_req;
1864 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1867 * For keyed hash algorithms shared descriptors
1868 * will be created later in setkey() callback
1870 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1873 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1875 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1877 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1878 offsetof(struct caam_hash_ctx, key) -
1879 offsetof(struct caam_hash_ctx, sh_desc_update),
1880 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1881 if (ctx->key_dir != DMA_NONE)
1882 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1883 ARRAY_SIZE(ctx->key), ctx->key_dir,
1884 DMA_ATTR_SKIP_CPU_SYNC);
1885 caam_jr_free(ctx->jrdev);
1888 void caam_algapi_hash_exit(void)
1890 struct caam_hash_alg *t_alg, *n;
1892 if (!hash_list.next)
1895 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1896 crypto_unregister_ahash(&t_alg->ahash_alg);
1897 list_del(&t_alg->entry);
1902 static struct caam_hash_alg *
1903 caam_hash_alloc(struct caam_hash_template *template,
1906 struct caam_hash_alg *t_alg;
1907 struct ahash_alg *halg;
1908 struct crypto_alg *alg;
1910 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1912 pr_err("failed to allocate t_alg\n");
1913 return ERR_PTR(-ENOMEM);
1916 t_alg->ahash_alg = template->template_ahash;
1917 halg = &t_alg->ahash_alg;
1918 alg = &halg->halg.base;
1921 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1922 template->hmac_name);
1923 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1924 template->hmac_driver_name);
1926 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1928 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1929 template->driver_name);
1930 t_alg->ahash_alg.setkey = NULL;
1932 alg->cra_module = THIS_MODULE;
1933 alg->cra_init = caam_hash_cra_init;
1934 alg->cra_exit = caam_hash_cra_exit;
1935 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1936 alg->cra_priority = CAAM_CRA_PRIORITY;
1937 alg->cra_blocksize = template->blocksize;
1938 alg->cra_alignmask = 0;
1939 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1941 t_alg->alg_type = template->alg_type;
1946 int caam_algapi_hash_init(struct device *ctrldev)
1949 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1950 unsigned int md_limit = SHA512_DIGEST_SIZE;
1951 u32 md_inst, md_vid;
1954 * Register crypto algorithms the device supports. First, identify
1955 * presence and attributes of MD block.
1957 if (priv->era < 10) {
1958 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1960 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1961 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1962 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1963 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1965 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1967 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1968 md_inst = mdha & CHA_VER_NUM_MASK;
1972 * Skip registration of any hashing algorithms if MD block
1978 /* Limit digest size based on LP256 */
1979 if (md_vid == CHA_VER_VID_MD_LP256)
1980 md_limit = SHA256_DIGEST_SIZE;
1982 INIT_LIST_HEAD(&hash_list);
1984 /* register crypto algorithms the device supports */
1985 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1986 struct caam_hash_alg *t_alg;
1987 struct caam_hash_template *alg = driver_hash + i;
1989 /* If MD size is not supported by device, skip registration */
1990 if (is_mdha(alg->alg_type) &&
1991 alg->template_ahash.halg.digestsize > md_limit)
1994 /* register hmac version */
1995 t_alg = caam_hash_alloc(alg, true);
1996 if (IS_ERR(t_alg)) {
1997 err = PTR_ERR(t_alg);
1998 pr_warn("%s alg allocation failed\n",
1999 alg->hmac_driver_name);
2003 err = crypto_register_ahash(&t_alg->ahash_alg);
2005 pr_warn("%s alg registration failed: %d\n",
2006 t_alg->ahash_alg.halg.base.cra_driver_name,
2010 list_add_tail(&t_alg->entry, &hash_list);
2012 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2015 /* register unkeyed version */
2016 t_alg = caam_hash_alloc(alg, false);
2017 if (IS_ERR(t_alg)) {
2018 err = PTR_ERR(t_alg);
2019 pr_warn("%s alg allocation failed\n", alg->driver_name);
2023 err = crypto_register_ahash(&t_alg->ahash_alg);
2025 pr_warn("%s alg registration failed: %d\n",
2026 t_alg->ahash_alg.halg.base.cra_driver_name,
2030 list_add_tail(&t_alg->entry, &hash_list);