9e5924e24f2e5a3385466be11b3489d19725fe25
[platform/kernel/linux-starfive.git] / drivers / crypto / caam / caamhash.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019, 2023 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/kernel.h>
71
72 #define CAAM_CRA_PRIORITY               3000
73
74 /* max hash key is max split key size */
75 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
76
77 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
78 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
79
80 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
81                                          CAAM_MAX_HASH_KEY_SIZE)
82 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
83
84 /* caam context sizes for hashes: running digest + 8 */
85 #define HASH_MSG_LEN                    8
86 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
87
88 static struct list_head hash_list;
89
90 /* ahash per-session context */
91 struct caam_hash_ctx {
92         struct crypto_engine_ctx enginectx;
93         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
96         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97         u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
98         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
99         dma_addr_t sh_desc_update_first_dma;
100         dma_addr_t sh_desc_fin_dma;
101         dma_addr_t sh_desc_digest_dma;
102         enum dma_data_direction dir;
103         enum dma_data_direction key_dir;
104         struct device *jrdev;
105         int ctx_len;
106         struct alginfo adata;
107 };
108
109 /* ahash state */
110 struct caam_hash_state {
111         dma_addr_t buf_dma;
112         dma_addr_t ctx_dma;
113         int ctx_dma_len;
114         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
115         int buflen;
116         int next_buflen;
117         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
118         int (*update)(struct ahash_request *req) ____cacheline_aligned;
119         int (*final)(struct ahash_request *req);
120         int (*finup)(struct ahash_request *req);
121         struct ahash_edesc *edesc;
122         void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
123                               void *context);
124 };
125
126 struct caam_export_state {
127         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128         u8 caam_ctx[MAX_CTX_LEN];
129         int buflen;
130         int (*update)(struct ahash_request *req);
131         int (*final)(struct ahash_request *req);
132         int (*finup)(struct ahash_request *req);
133 };
134
135 static inline bool is_cmac_aes(u32 algtype)
136 {
137         return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
138                (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
139 }
140 /* Common job descriptor seq in/out ptr routines */
141
142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144                                       struct caam_hash_state *state,
145                                       int ctx_len)
146 {
147         state->ctx_dma_len = ctx_len;
148         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149                                         ctx_len, DMA_FROM_DEVICE);
150         if (dma_mapping_error(jrdev, state->ctx_dma)) {
151                 dev_err(jrdev, "unable to map ctx\n");
152                 state->ctx_dma = 0;
153                 return -ENOMEM;
154         }
155
156         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
157
158         return 0;
159 }
160
161 /* Map current buffer in state (if length > 0) and put it in link table */
162 static inline int buf_map_to_sec4_sg(struct device *jrdev,
163                                      struct sec4_sg_entry *sec4_sg,
164                                      struct caam_hash_state *state)
165 {
166         int buflen = state->buflen;
167
168         if (!buflen)
169                 return 0;
170
171         state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
172                                         DMA_TO_DEVICE);
173         if (dma_mapping_error(jrdev, state->buf_dma)) {
174                 dev_err(jrdev, "unable to map buf\n");
175                 state->buf_dma = 0;
176                 return -ENOMEM;
177         }
178
179         dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
180
181         return 0;
182 }
183
184 /* Map state->caam_ctx, and add it to link table */
185 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
186                                      struct caam_hash_state *state, int ctx_len,
187                                      struct sec4_sg_entry *sec4_sg, u32 flag)
188 {
189         state->ctx_dma_len = ctx_len;
190         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
191         if (dma_mapping_error(jrdev, state->ctx_dma)) {
192                 dev_err(jrdev, "unable to map ctx\n");
193                 state->ctx_dma = 0;
194                 return -ENOMEM;
195         }
196
197         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
198
199         return 0;
200 }
201
202 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
203 {
204         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
205         int digestsize = crypto_ahash_digestsize(ahash);
206         struct device *jrdev = ctx->jrdev;
207         struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
208         u32 *desc;
209
210         ctx->adata.key_virt = ctx->key;
211
212         /* ahash_update shared descriptor */
213         desc = ctx->sh_desc_update;
214         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
215                           ctx->ctx_len, true, ctrlpriv->era);
216         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
217                                    desc_bytes(desc), ctx->dir);
218
219         print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
220                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
221                              1);
222
223         /* ahash_update_first shared descriptor */
224         desc = ctx->sh_desc_update_first;
225         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
226                           ctx->ctx_len, false, ctrlpriv->era);
227         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
228                                    desc_bytes(desc), ctx->dir);
229         print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
230                              ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
231                              desc_bytes(desc), 1);
232
233         /* ahash_final shared descriptor */
234         desc = ctx->sh_desc_fin;
235         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
236                           ctx->ctx_len, true, ctrlpriv->era);
237         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
238                                    desc_bytes(desc), ctx->dir);
239
240         print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
241                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
242                              desc_bytes(desc), 1);
243
244         /* ahash_digest shared descriptor */
245         desc = ctx->sh_desc_digest;
246         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
247                           ctx->ctx_len, false, ctrlpriv->era);
248         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
249                                    desc_bytes(desc), ctx->dir);
250
251         print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
252                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
253                              desc_bytes(desc), 1);
254
255         return 0;
256 }
257
258 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
259 {
260         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
261         int digestsize = crypto_ahash_digestsize(ahash);
262         struct device *jrdev = ctx->jrdev;
263         u32 *desc;
264
265         /* shared descriptor for ahash_update */
266         desc = ctx->sh_desc_update;
267         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
268                             ctx->ctx_len, ctx->ctx_len);
269         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
270                                    desc_bytes(desc), ctx->dir);
271         print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
272                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
273                              1);
274
275         /* shared descriptor for ahash_{final,finup} */
276         desc = ctx->sh_desc_fin;
277         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
278                             digestsize, ctx->ctx_len);
279         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
280                                    desc_bytes(desc), ctx->dir);
281         print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
282                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
283                              1);
284
285         /* key is immediate data for INIT and INITFINAL states */
286         ctx->adata.key_virt = ctx->key;
287
288         /* shared descriptor for first invocation of ahash_update */
289         desc = ctx->sh_desc_update_first;
290         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
291                             ctx->ctx_len);
292         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293                                    desc_bytes(desc), ctx->dir);
294         print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
295                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
296                              desc_bytes(desc), 1);
297
298         /* shared descriptor for ahash_digest */
299         desc = ctx->sh_desc_digest;
300         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
301                             digestsize, ctx->ctx_len);
302         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
303                                    desc_bytes(desc), ctx->dir);
304         print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
305                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
306                              1);
307         return 0;
308 }
309
310 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
311 {
312         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
313         int digestsize = crypto_ahash_digestsize(ahash);
314         struct device *jrdev = ctx->jrdev;
315         u32 *desc;
316
317         /* shared descriptor for ahash_update */
318         desc = ctx->sh_desc_update;
319         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
320                             ctx->ctx_len, ctx->ctx_len);
321         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
322                                    desc_bytes(desc), ctx->dir);
323         print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
324                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
325                              desc_bytes(desc), 1);
326
327         /* shared descriptor for ahash_{final,finup} */
328         desc = ctx->sh_desc_fin;
329         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
330                             digestsize, ctx->ctx_len);
331         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
332                                    desc_bytes(desc), ctx->dir);
333         print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
334                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
335                              desc_bytes(desc), 1);
336
337         /* shared descriptor for first invocation of ahash_update */
338         desc = ctx->sh_desc_update_first;
339         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
340                             ctx->ctx_len);
341         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
342                                    desc_bytes(desc), ctx->dir);
343         print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
344                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
345                              desc_bytes(desc), 1);
346
347         /* shared descriptor for ahash_digest */
348         desc = ctx->sh_desc_digest;
349         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
350                             digestsize, ctx->ctx_len);
351         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
352                                    desc_bytes(desc), ctx->dir);
353         print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
354                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
355                              desc_bytes(desc), 1);
356
357         return 0;
358 }
359
360 /* Digest hash size if it is too large */
361 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
362                            u32 digestsize)
363 {
364         struct device *jrdev = ctx->jrdev;
365         u32 *desc;
366         struct split_key_result result;
367         dma_addr_t key_dma;
368         int ret;
369
370         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
371         if (!desc) {
372                 dev_err(jrdev, "unable to allocate key input memory\n");
373                 return -ENOMEM;
374         }
375
376         init_job_desc(desc, 0);
377
378         key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
379         if (dma_mapping_error(jrdev, key_dma)) {
380                 dev_err(jrdev, "unable to map key memory\n");
381                 kfree(desc);
382                 return -ENOMEM;
383         }
384
385         /* Job descriptor to perform unkeyed hash on key_in */
386         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
387                          OP_ALG_AS_INITFINAL);
388         append_seq_in_ptr(desc, key_dma, *keylen, 0);
389         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
390                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
391         append_seq_out_ptr(desc, key_dma, digestsize, 0);
392         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
393                          LDST_SRCDST_BYTE_CONTEXT);
394
395         print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
396                              DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
397         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
398                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
399                              1);
400
401         result.err = 0;
402         init_completion(&result.completion);
403
404         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
405         if (ret == -EINPROGRESS) {
406                 /* in progress */
407                 wait_for_completion(&result.completion);
408                 ret = result.err;
409
410                 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
411                                      DUMP_PREFIX_ADDRESS, 16, 4, key,
412                                      digestsize, 1);
413         }
414         dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
415
416         *keylen = digestsize;
417
418         kfree(desc);
419
420         return ret;
421 }
422
423 static int ahash_setkey(struct crypto_ahash *ahash,
424                         const u8 *key, unsigned int keylen)
425 {
426         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
427         struct device *jrdev = ctx->jrdev;
428         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
429         int digestsize = crypto_ahash_digestsize(ahash);
430         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
431         int ret;
432         u8 *hashed_key = NULL;
433
434         dev_dbg(jrdev, "keylen %d\n", keylen);
435
436         if (keylen > blocksize) {
437                 unsigned int aligned_len =
438                         ALIGN(keylen, dma_get_cache_alignment());
439
440                 if (aligned_len < keylen)
441                         return -EOVERFLOW;
442
443                 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
444                 if (!hashed_key)
445                         return -ENOMEM;
446                 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
447                 if (ret)
448                         goto bad_free_key;
449                 key = hashed_key;
450         }
451
452         /*
453          * If DKP is supported, use it in the shared descriptor to generate
454          * the split key.
455          */
456         if (ctrlpriv->era >= 6) {
457                 ctx->adata.key_inline = true;
458                 ctx->adata.keylen = keylen;
459                 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
460                                                       OP_ALG_ALGSEL_MASK);
461
462                 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
463                         goto bad_free_key;
464
465                 memcpy(ctx->key, key, keylen);
466
467                 /*
468                  * In case |user key| > |derived key|, using DKP<imm,imm>
469                  * would result in invalid opcodes (last bytes of user key) in
470                  * the resulting descriptor. Use DKP<ptr,imm> instead => both
471                  * virtual and dma key addresses are needed.
472                  */
473                 if (keylen > ctx->adata.keylen_pad)
474                         dma_sync_single_for_device(ctx->jrdev,
475                                                    ctx->adata.key_dma,
476                                                    ctx->adata.keylen_pad,
477                                                    DMA_TO_DEVICE);
478         } else {
479                 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
480                                     keylen, CAAM_MAX_HASH_KEY_SIZE);
481                 if (ret)
482                         goto bad_free_key;
483         }
484
485         kfree(hashed_key);
486         return ahash_set_sh_desc(ahash);
487  bad_free_key:
488         kfree(hashed_key);
489         return -EINVAL;
490 }
491
492 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
493                         unsigned int keylen)
494 {
495         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
496         struct device *jrdev = ctx->jrdev;
497
498         if (keylen != AES_KEYSIZE_128)
499                 return -EINVAL;
500
501         memcpy(ctx->key, key, keylen);
502         dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
503                                    DMA_TO_DEVICE);
504         ctx->adata.keylen = keylen;
505
506         print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
507                              DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
508
509         return axcbc_set_sh_desc(ahash);
510 }
511
512 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
513                         unsigned int keylen)
514 {
515         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
516         int err;
517
518         err = aes_check_keylen(keylen);
519         if (err)
520                 return err;
521
522         /* key is immediate data for all cmac shared descriptors */
523         ctx->adata.key_virt = key;
524         ctx->adata.keylen = keylen;
525
526         print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
527                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
528
529         return acmac_set_sh_desc(ahash);
530 }
531
532 /*
533  * ahash_edesc - s/w-extended ahash descriptor
534  * @sec4_sg_dma: physical mapped address of h/w link table
535  * @src_nents: number of segments in input scatterlist
536  * @sec4_sg_bytes: length of dma mapped sec4_sg space
537  * @bklog: stored to determine if the request needs backlog
538  * @hw_desc: the h/w job descriptor followed by any referenced link tables
539  * @sec4_sg: h/w link table
540  */
541 struct ahash_edesc {
542         dma_addr_t sec4_sg_dma;
543         int src_nents;
544         int sec4_sg_bytes;
545         bool bklog;
546         u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
547         struct sec4_sg_entry sec4_sg[];
548 };
549
550 static inline void ahash_unmap(struct device *dev,
551                         struct ahash_edesc *edesc,
552                         struct ahash_request *req, int dst_len)
553 {
554         struct caam_hash_state *state = ahash_request_ctx_dma(req);
555
556         if (edesc->src_nents)
557                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
558
559         if (edesc->sec4_sg_bytes)
560                 dma_unmap_single(dev, edesc->sec4_sg_dma,
561                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
562
563         if (state->buf_dma) {
564                 dma_unmap_single(dev, state->buf_dma, state->buflen,
565                                  DMA_TO_DEVICE);
566                 state->buf_dma = 0;
567         }
568 }
569
570 static inline void ahash_unmap_ctx(struct device *dev,
571                         struct ahash_edesc *edesc,
572                         struct ahash_request *req, int dst_len, u32 flag)
573 {
574         struct caam_hash_state *state = ahash_request_ctx_dma(req);
575
576         if (state->ctx_dma) {
577                 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
578                 state->ctx_dma = 0;
579         }
580         ahash_unmap(dev, edesc, req, dst_len);
581 }
582
583 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
584                                   void *context, enum dma_data_direction dir)
585 {
586         struct ahash_request *req = context;
587         struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
588         struct ahash_edesc *edesc;
589         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
590         int digestsize = crypto_ahash_digestsize(ahash);
591         struct caam_hash_state *state = ahash_request_ctx_dma(req);
592         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
593         int ecode = 0;
594         bool has_bklog;
595
596         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
597
598         edesc = state->edesc;
599         has_bklog = edesc->bklog;
600
601         if (err)
602                 ecode = caam_jr_strstatus(jrdev, err);
603
604         ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
605         memcpy(req->result, state->caam_ctx, digestsize);
606         kfree(edesc);
607
608         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
609                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
610                              ctx->ctx_len, 1);
611
612         /*
613          * If no backlog flag, the completion of the request is done
614          * by CAAM, not crypto engine.
615          */
616         if (!has_bklog)
617                 ahash_request_complete(req, ecode);
618         else
619                 crypto_finalize_hash_request(jrp->engine, req, ecode);
620 }
621
622 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
623                        void *context)
624 {
625         ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
626 }
627
628 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
629                                void *context)
630 {
631         ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
632 }
633
634 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
635                                      void *context, enum dma_data_direction dir)
636 {
637         struct ahash_request *req = context;
638         struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
639         struct ahash_edesc *edesc;
640         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
641         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
642         struct caam_hash_state *state = ahash_request_ctx_dma(req);
643         int digestsize = crypto_ahash_digestsize(ahash);
644         int ecode = 0;
645         bool has_bklog;
646
647         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
648
649         edesc = state->edesc;
650         has_bklog = edesc->bklog;
651         if (err)
652                 ecode = caam_jr_strstatus(jrdev, err);
653
654         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
655         kfree(edesc);
656
657         scatterwalk_map_and_copy(state->buf, req->src,
658                                  req->nbytes - state->next_buflen,
659                                  state->next_buflen, 0);
660         state->buflen = state->next_buflen;
661
662         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
663                              DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
664                              state->buflen, 1);
665
666         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
667                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
668                              ctx->ctx_len, 1);
669         if (req->result)
670                 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
671                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
672                                      digestsize, 1);
673
674         /*
675          * If no backlog flag, the completion of the request is done
676          * by CAAM, not crypto engine.
677          */
678         if (!has_bklog)
679                 ahash_request_complete(req, ecode);
680         else
681                 crypto_finalize_hash_request(jrp->engine, req, ecode);
682
683 }
684
685 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
686                           void *context)
687 {
688         ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
689 }
690
691 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
692                                void *context)
693 {
694         ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
695 }
696
697 /*
698  * Allocate an enhanced descriptor, which contains the hardware descriptor
699  * and space for hardware scatter table containing sg_num entries.
700  */
701 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
702                                              int sg_num, u32 *sh_desc,
703                                              dma_addr_t sh_desc_dma)
704 {
705         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
707         struct caam_hash_state *state = ahash_request_ctx_dma(req);
708         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
709                        GFP_KERNEL : GFP_ATOMIC;
710         struct ahash_edesc *edesc;
711
712         edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
713         if (!edesc) {
714                 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
715                 return NULL;
716         }
717
718         state->edesc = edesc;
719
720         init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
721                              HDR_SHARE_DEFER | HDR_REVERSE);
722
723         return edesc;
724 }
725
726 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
727                                struct ahash_edesc *edesc,
728                                struct ahash_request *req, int nents,
729                                unsigned int first_sg,
730                                unsigned int first_bytes, size_t to_hash)
731 {
732         dma_addr_t src_dma;
733         u32 options;
734
735         if (nents > 1 || first_sg) {
736                 struct sec4_sg_entry *sg = edesc->sec4_sg;
737                 unsigned int sgsize = sizeof(*sg) *
738                                       pad_sg_nents(first_sg + nents);
739
740                 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
741
742                 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
743                 if (dma_mapping_error(ctx->jrdev, src_dma)) {
744                         dev_err(ctx->jrdev, "unable to map S/G table\n");
745                         return -ENOMEM;
746                 }
747
748                 edesc->sec4_sg_bytes = sgsize;
749                 edesc->sec4_sg_dma = src_dma;
750                 options = LDST_SGF;
751         } else {
752                 src_dma = sg_dma_address(req->src);
753                 options = 0;
754         }
755
756         append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
757                           options);
758
759         return 0;
760 }
761
762 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
763 {
764         struct ahash_request *req = ahash_request_cast(areq);
765         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
766         struct caam_hash_state *state = ahash_request_ctx_dma(req);
767         struct device *jrdev = ctx->jrdev;
768         u32 *desc = state->edesc->hw_desc;
769         int ret;
770
771         state->edesc->bklog = true;
772
773         ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
774
775         if (ret == -ENOSPC && engine->retry_support)
776                 return ret;
777
778         if (ret != -EINPROGRESS) {
779                 ahash_unmap(jrdev, state->edesc, req, 0);
780                 kfree(state->edesc);
781         } else {
782                 ret = 0;
783         }
784
785         return ret;
786 }
787
788 static int ahash_enqueue_req(struct device *jrdev,
789                              void (*cbk)(struct device *jrdev, u32 *desc,
790                                          u32 err, void *context),
791                              struct ahash_request *req,
792                              int dst_len, enum dma_data_direction dir)
793 {
794         struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
795         struct caam_hash_state *state = ahash_request_ctx_dma(req);
796         struct ahash_edesc *edesc = state->edesc;
797         u32 *desc = edesc->hw_desc;
798         int ret;
799
800         state->ahash_op_done = cbk;
801
802         /*
803          * Only the backlog request are sent to crypto-engine since the others
804          * can be handled by CAAM, if free, especially since JR has up to 1024
805          * entries (more than the 10 entries from crypto-engine).
806          */
807         if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
808                 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
809                                                              req);
810         else
811                 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
812
813         if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
814                 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
815                 kfree(edesc);
816         }
817
818         return ret;
819 }
820
821 /* submit update job descriptor */
822 static int ahash_update_ctx(struct ahash_request *req)
823 {
824         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
825         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
826         struct caam_hash_state *state = ahash_request_ctx_dma(req);
827         struct device *jrdev = ctx->jrdev;
828         u8 *buf = state->buf;
829         int *buflen = &state->buflen;
830         int *next_buflen = &state->next_buflen;
831         int blocksize = crypto_ahash_blocksize(ahash);
832         int in_len = *buflen + req->nbytes, to_hash;
833         u32 *desc;
834         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
835         struct ahash_edesc *edesc;
836         int ret = 0;
837
838         *next_buflen = in_len & (blocksize - 1);
839         to_hash = in_len - *next_buflen;
840
841         /*
842          * For XCBC and CMAC, if to_hash is multiple of block size,
843          * keep last block in internal buffer
844          */
845         if ((is_xcbc_aes(ctx->adata.algtype) ||
846              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
847              (*next_buflen == 0)) {
848                 *next_buflen = blocksize;
849                 to_hash -= blocksize;
850         }
851
852         if (to_hash) {
853                 int pad_nents;
854                 int src_len = req->nbytes - *next_buflen;
855
856                 src_nents = sg_nents_for_len(req->src, src_len);
857                 if (src_nents < 0) {
858                         dev_err(jrdev, "Invalid number of src SG.\n");
859                         return src_nents;
860                 }
861
862                 if (src_nents) {
863                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
864                                                   DMA_TO_DEVICE);
865                         if (!mapped_nents) {
866                                 dev_err(jrdev, "unable to DMA map source\n");
867                                 return -ENOMEM;
868                         }
869                 } else {
870                         mapped_nents = 0;
871                 }
872
873                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
874                 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
875                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
876
877                 /*
878                  * allocate space for base edesc and hw desc commands,
879                  * link tables
880                  */
881                 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
882                                           ctx->sh_desc_update_dma);
883                 if (!edesc) {
884                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
885                         return -ENOMEM;
886                 }
887
888                 edesc->src_nents = src_nents;
889                 edesc->sec4_sg_bytes = sec4_sg_bytes;
890
891                 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
892                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
893                 if (ret)
894                         goto unmap_ctx;
895
896                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
897                 if (ret)
898                         goto unmap_ctx;
899
900                 if (mapped_nents)
901                         sg_to_sec4_sg_last(req->src, src_len,
902                                            edesc->sec4_sg + sec4_sg_src_index,
903                                            0);
904                 else
905                         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
906                                             1);
907
908                 desc = edesc->hw_desc;
909
910                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
911                                                      sec4_sg_bytes,
912                                                      DMA_TO_DEVICE);
913                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
914                         dev_err(jrdev, "unable to map S/G table\n");
915                         ret = -ENOMEM;
916                         goto unmap_ctx;
917                 }
918
919                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
920                                        to_hash, LDST_SGF);
921
922                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
923
924                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
925                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
926                                      desc_bytes(desc), 1);
927
928                 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
929                                         ctx->ctx_len, DMA_BIDIRECTIONAL);
930         } else if (*next_buflen) {
931                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
932                                          req->nbytes, 0);
933                 *buflen = *next_buflen;
934
935                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
936                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
937                                      *buflen, 1);
938         }
939
940         return ret;
941 unmap_ctx:
942         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
943         kfree(edesc);
944         return ret;
945 }
946
947 static int ahash_final_ctx(struct ahash_request *req)
948 {
949         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
950         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
951         struct caam_hash_state *state = ahash_request_ctx_dma(req);
952         struct device *jrdev = ctx->jrdev;
953         int buflen = state->buflen;
954         u32 *desc;
955         int sec4_sg_bytes;
956         int digestsize = crypto_ahash_digestsize(ahash);
957         struct ahash_edesc *edesc;
958         int ret;
959
960         sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
961                         sizeof(struct sec4_sg_entry);
962
963         /* allocate space for base edesc and hw desc commands, link tables */
964         edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
965                                   ctx->sh_desc_fin_dma);
966         if (!edesc)
967                 return -ENOMEM;
968
969         desc = edesc->hw_desc;
970
971         edesc->sec4_sg_bytes = sec4_sg_bytes;
972
973         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
974                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
975         if (ret)
976                 goto unmap_ctx;
977
978         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
979         if (ret)
980                 goto unmap_ctx;
981
982         sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
983
984         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
985                                             sec4_sg_bytes, DMA_TO_DEVICE);
986         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
987                 dev_err(jrdev, "unable to map S/G table\n");
988                 ret = -ENOMEM;
989                 goto unmap_ctx;
990         }
991
992         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
993                           LDST_SGF);
994         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
995
996         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
997                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
998                              1);
999
1000         return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1001                                  digestsize, DMA_BIDIRECTIONAL);
1002  unmap_ctx:
1003         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1004         kfree(edesc);
1005         return ret;
1006 }
1007
1008 static int ahash_finup_ctx(struct ahash_request *req)
1009 {
1010         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1011         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1012         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1013         struct device *jrdev = ctx->jrdev;
1014         int buflen = state->buflen;
1015         u32 *desc;
1016         int sec4_sg_src_index;
1017         int src_nents, mapped_nents;
1018         int digestsize = crypto_ahash_digestsize(ahash);
1019         struct ahash_edesc *edesc;
1020         int ret;
1021
1022         src_nents = sg_nents_for_len(req->src, req->nbytes);
1023         if (src_nents < 0) {
1024                 dev_err(jrdev, "Invalid number of src SG.\n");
1025                 return src_nents;
1026         }
1027
1028         if (src_nents) {
1029                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1030                                           DMA_TO_DEVICE);
1031                 if (!mapped_nents) {
1032                         dev_err(jrdev, "unable to DMA map source\n");
1033                         return -ENOMEM;
1034                 }
1035         } else {
1036                 mapped_nents = 0;
1037         }
1038
1039         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1040
1041         /* allocate space for base edesc and hw desc commands, link tables */
1042         edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1043                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1044         if (!edesc) {
1045                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1046                 return -ENOMEM;
1047         }
1048
1049         desc = edesc->hw_desc;
1050
1051         edesc->src_nents = src_nents;
1052
1053         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1054                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
1055         if (ret)
1056                 goto unmap_ctx;
1057
1058         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1059         if (ret)
1060                 goto unmap_ctx;
1061
1062         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1063                                   sec4_sg_src_index, ctx->ctx_len + buflen,
1064                                   req->nbytes);
1065         if (ret)
1066                 goto unmap_ctx;
1067
1068         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1069
1070         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1071                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1072                              1);
1073
1074         return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1075                                  digestsize, DMA_BIDIRECTIONAL);
1076  unmap_ctx:
1077         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1078         kfree(edesc);
1079         return ret;
1080 }
1081
1082 static int ahash_digest(struct ahash_request *req)
1083 {
1084         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1085         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1086         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1087         struct device *jrdev = ctx->jrdev;
1088         u32 *desc;
1089         int digestsize = crypto_ahash_digestsize(ahash);
1090         int src_nents, mapped_nents;
1091         struct ahash_edesc *edesc;
1092         int ret;
1093
1094         state->buf_dma = 0;
1095
1096         src_nents = sg_nents_for_len(req->src, req->nbytes);
1097         if (src_nents < 0) {
1098                 dev_err(jrdev, "Invalid number of src SG.\n");
1099                 return src_nents;
1100         }
1101
1102         if (src_nents) {
1103                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1104                                           DMA_TO_DEVICE);
1105                 if (!mapped_nents) {
1106                         dev_err(jrdev, "unable to map source for DMA\n");
1107                         return -ENOMEM;
1108                 }
1109         } else {
1110                 mapped_nents = 0;
1111         }
1112
1113         /* allocate space for base edesc and hw desc commands, link tables */
1114         edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1115                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1116         if (!edesc) {
1117                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1118                 return -ENOMEM;
1119         }
1120
1121         edesc->src_nents = src_nents;
1122
1123         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1124                                   req->nbytes);
1125         if (ret) {
1126                 ahash_unmap(jrdev, edesc, req, digestsize);
1127                 kfree(edesc);
1128                 return ret;
1129         }
1130
1131         desc = edesc->hw_desc;
1132
1133         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1134         if (ret) {
1135                 ahash_unmap(jrdev, edesc, req, digestsize);
1136                 kfree(edesc);
1137                 return -ENOMEM;
1138         }
1139
1140         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1141                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1142                              1);
1143
1144         return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1145                                  DMA_FROM_DEVICE);
1146 }
1147
1148 /* submit ahash final if it the first job descriptor */
1149 static int ahash_final_no_ctx(struct ahash_request *req)
1150 {
1151         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1152         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1153         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1154         struct device *jrdev = ctx->jrdev;
1155         u8 *buf = state->buf;
1156         int buflen = state->buflen;
1157         u32 *desc;
1158         int digestsize = crypto_ahash_digestsize(ahash);
1159         struct ahash_edesc *edesc;
1160         int ret;
1161
1162         /* allocate space for base edesc and hw desc commands, link tables */
1163         edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1164                                   ctx->sh_desc_digest_dma);
1165         if (!edesc)
1166                 return -ENOMEM;
1167
1168         desc = edesc->hw_desc;
1169
1170         if (buflen) {
1171                 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1172                                                 DMA_TO_DEVICE);
1173                 if (dma_mapping_error(jrdev, state->buf_dma)) {
1174                         dev_err(jrdev, "unable to map src\n");
1175                         goto unmap;
1176                 }
1177
1178                 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1179         }
1180
1181         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1182         if (ret)
1183                 goto unmap;
1184
1185         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1186                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1187                              1);
1188
1189         return ahash_enqueue_req(jrdev, ahash_done, req,
1190                                  digestsize, DMA_FROM_DEVICE);
1191  unmap:
1192         ahash_unmap(jrdev, edesc, req, digestsize);
1193         kfree(edesc);
1194         return -ENOMEM;
1195 }
1196
1197 /* submit ahash update if it the first job descriptor after update */
1198 static int ahash_update_no_ctx(struct ahash_request *req)
1199 {
1200         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1201         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1202         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1203         struct device *jrdev = ctx->jrdev;
1204         u8 *buf = state->buf;
1205         int *buflen = &state->buflen;
1206         int *next_buflen = &state->next_buflen;
1207         int blocksize = crypto_ahash_blocksize(ahash);
1208         int in_len = *buflen + req->nbytes, to_hash;
1209         int sec4_sg_bytes, src_nents, mapped_nents;
1210         struct ahash_edesc *edesc;
1211         u32 *desc;
1212         int ret = 0;
1213
1214         *next_buflen = in_len & (blocksize - 1);
1215         to_hash = in_len - *next_buflen;
1216
1217         /*
1218          * For XCBC and CMAC, if to_hash is multiple of block size,
1219          * keep last block in internal buffer
1220          */
1221         if ((is_xcbc_aes(ctx->adata.algtype) ||
1222              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1223              (*next_buflen == 0)) {
1224                 *next_buflen = blocksize;
1225                 to_hash -= blocksize;
1226         }
1227
1228         if (to_hash) {
1229                 int pad_nents;
1230                 int src_len = req->nbytes - *next_buflen;
1231
1232                 src_nents = sg_nents_for_len(req->src, src_len);
1233                 if (src_nents < 0) {
1234                         dev_err(jrdev, "Invalid number of src SG.\n");
1235                         return src_nents;
1236                 }
1237
1238                 if (src_nents) {
1239                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1240                                                   DMA_TO_DEVICE);
1241                         if (!mapped_nents) {
1242                                 dev_err(jrdev, "unable to DMA map source\n");
1243                                 return -ENOMEM;
1244                         }
1245                 } else {
1246                         mapped_nents = 0;
1247                 }
1248
1249                 pad_nents = pad_sg_nents(1 + mapped_nents);
1250                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1251
1252                 /*
1253                  * allocate space for base edesc and hw desc commands,
1254                  * link tables
1255                  */
1256                 edesc = ahash_edesc_alloc(req, pad_nents,
1257                                           ctx->sh_desc_update_first,
1258                                           ctx->sh_desc_update_first_dma);
1259                 if (!edesc) {
1260                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1261                         return -ENOMEM;
1262                 }
1263
1264                 edesc->src_nents = src_nents;
1265                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1266
1267                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1268                 if (ret)
1269                         goto unmap_ctx;
1270
1271                 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1272
1273                 desc = edesc->hw_desc;
1274
1275                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1276                                                     sec4_sg_bytes,
1277                                                     DMA_TO_DEVICE);
1278                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1279                         dev_err(jrdev, "unable to map S/G table\n");
1280                         ret = -ENOMEM;
1281                         goto unmap_ctx;
1282                 }
1283
1284                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1285
1286                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1287                 if (ret)
1288                         goto unmap_ctx;
1289
1290                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1291                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1292                                      desc_bytes(desc), 1);
1293
1294                 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1295                                         ctx->ctx_len, DMA_TO_DEVICE);
1296                 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1297                         return ret;
1298                 state->update = ahash_update_ctx;
1299                 state->finup = ahash_finup_ctx;
1300                 state->final = ahash_final_ctx;
1301         } else if (*next_buflen) {
1302                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1303                                          req->nbytes, 0);
1304                 *buflen = *next_buflen;
1305
1306                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1307                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1308                                      *buflen, 1);
1309         }
1310
1311         return ret;
1312  unmap_ctx:
1313         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1314         kfree(edesc);
1315         return ret;
1316 }
1317
1318 /* submit ahash finup if it the first job descriptor after update */
1319 static int ahash_finup_no_ctx(struct ahash_request *req)
1320 {
1321         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1322         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1323         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1324         struct device *jrdev = ctx->jrdev;
1325         int buflen = state->buflen;
1326         u32 *desc;
1327         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1328         int digestsize = crypto_ahash_digestsize(ahash);
1329         struct ahash_edesc *edesc;
1330         int ret;
1331
1332         src_nents = sg_nents_for_len(req->src, req->nbytes);
1333         if (src_nents < 0) {
1334                 dev_err(jrdev, "Invalid number of src SG.\n");
1335                 return src_nents;
1336         }
1337
1338         if (src_nents) {
1339                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1340                                           DMA_TO_DEVICE);
1341                 if (!mapped_nents) {
1342                         dev_err(jrdev, "unable to DMA map source\n");
1343                         return -ENOMEM;
1344                 }
1345         } else {
1346                 mapped_nents = 0;
1347         }
1348
1349         sec4_sg_src_index = 2;
1350         sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1351                          sizeof(struct sec4_sg_entry);
1352
1353         /* allocate space for base edesc and hw desc commands, link tables */
1354         edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1355                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1356         if (!edesc) {
1357                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1358                 return -ENOMEM;
1359         }
1360
1361         desc = edesc->hw_desc;
1362
1363         edesc->src_nents = src_nents;
1364         edesc->sec4_sg_bytes = sec4_sg_bytes;
1365
1366         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1367         if (ret)
1368                 goto unmap;
1369
1370         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1371                                   req->nbytes);
1372         if (ret) {
1373                 dev_err(jrdev, "unable to map S/G table\n");
1374                 goto unmap;
1375         }
1376
1377         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1378         if (ret)
1379                 goto unmap;
1380
1381         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1382                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1383                              1);
1384
1385         return ahash_enqueue_req(jrdev, ahash_done, req,
1386                                  digestsize, DMA_FROM_DEVICE);
1387  unmap:
1388         ahash_unmap(jrdev, edesc, req, digestsize);
1389         kfree(edesc);
1390         return -ENOMEM;
1391
1392 }
1393
1394 /* submit first update job descriptor after init */
1395 static int ahash_update_first(struct ahash_request *req)
1396 {
1397         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1398         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1399         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1400         struct device *jrdev = ctx->jrdev;
1401         u8 *buf = state->buf;
1402         int *buflen = &state->buflen;
1403         int *next_buflen = &state->next_buflen;
1404         int to_hash;
1405         int blocksize = crypto_ahash_blocksize(ahash);
1406         u32 *desc;
1407         int src_nents, mapped_nents;
1408         struct ahash_edesc *edesc;
1409         int ret = 0;
1410
1411         *next_buflen = req->nbytes & (blocksize - 1);
1412         to_hash = req->nbytes - *next_buflen;
1413
1414         /*
1415          * For XCBC and CMAC, if to_hash is multiple of block size,
1416          * keep last block in internal buffer
1417          */
1418         if ((is_xcbc_aes(ctx->adata.algtype) ||
1419              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1420              (*next_buflen == 0)) {
1421                 *next_buflen = blocksize;
1422                 to_hash -= blocksize;
1423         }
1424
1425         if (to_hash) {
1426                 src_nents = sg_nents_for_len(req->src,
1427                                              req->nbytes - *next_buflen);
1428                 if (src_nents < 0) {
1429                         dev_err(jrdev, "Invalid number of src SG.\n");
1430                         return src_nents;
1431                 }
1432
1433                 if (src_nents) {
1434                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1435                                                   DMA_TO_DEVICE);
1436                         if (!mapped_nents) {
1437                                 dev_err(jrdev, "unable to map source for DMA\n");
1438                                 return -ENOMEM;
1439                         }
1440                 } else {
1441                         mapped_nents = 0;
1442                 }
1443
1444                 /*
1445                  * allocate space for base edesc and hw desc commands,
1446                  * link tables
1447                  */
1448                 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1449                                           mapped_nents : 0,
1450                                           ctx->sh_desc_update_first,
1451                                           ctx->sh_desc_update_first_dma);
1452                 if (!edesc) {
1453                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1454                         return -ENOMEM;
1455                 }
1456
1457                 edesc->src_nents = src_nents;
1458
1459                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1460                                           to_hash);
1461                 if (ret)
1462                         goto unmap_ctx;
1463
1464                 desc = edesc->hw_desc;
1465
1466                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1467                 if (ret)
1468                         goto unmap_ctx;
1469
1470                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1471                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1472                                      desc_bytes(desc), 1);
1473
1474                 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1475                                         ctx->ctx_len, DMA_TO_DEVICE);
1476                 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1477                         return ret;
1478                 state->update = ahash_update_ctx;
1479                 state->finup = ahash_finup_ctx;
1480                 state->final = ahash_final_ctx;
1481         } else if (*next_buflen) {
1482                 state->update = ahash_update_no_ctx;
1483                 state->finup = ahash_finup_no_ctx;
1484                 state->final = ahash_final_no_ctx;
1485                 scatterwalk_map_and_copy(buf, req->src, 0,
1486                                          req->nbytes, 0);
1487                 *buflen = *next_buflen;
1488
1489                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1490                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1491                                      *buflen, 1);
1492         }
1493
1494         return ret;
1495  unmap_ctx:
1496         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1497         kfree(edesc);
1498         return ret;
1499 }
1500
1501 static int ahash_finup_first(struct ahash_request *req)
1502 {
1503         return ahash_digest(req);
1504 }
1505
1506 static int ahash_init(struct ahash_request *req)
1507 {
1508         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1509
1510         state->update = ahash_update_first;
1511         state->finup = ahash_finup_first;
1512         state->final = ahash_final_no_ctx;
1513
1514         state->ctx_dma = 0;
1515         state->ctx_dma_len = 0;
1516         state->buf_dma = 0;
1517         state->buflen = 0;
1518         state->next_buflen = 0;
1519
1520         return 0;
1521 }
1522
1523 static int ahash_update(struct ahash_request *req)
1524 {
1525         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1526
1527         return state->update(req);
1528 }
1529
1530 static int ahash_finup(struct ahash_request *req)
1531 {
1532         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1533
1534         return state->finup(req);
1535 }
1536
1537 static int ahash_final(struct ahash_request *req)
1538 {
1539         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1540
1541         return state->final(req);
1542 }
1543
1544 static int ahash_export(struct ahash_request *req, void *out)
1545 {
1546         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1547         struct caam_export_state *export = out;
1548         u8 *buf = state->buf;
1549         int len = state->buflen;
1550
1551         memcpy(export->buf, buf, len);
1552         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1553         export->buflen = len;
1554         export->update = state->update;
1555         export->final = state->final;
1556         export->finup = state->finup;
1557
1558         return 0;
1559 }
1560
1561 static int ahash_import(struct ahash_request *req, const void *in)
1562 {
1563         struct caam_hash_state *state = ahash_request_ctx_dma(req);
1564         const struct caam_export_state *export = in;
1565
1566         memset(state, 0, sizeof(*state));
1567         memcpy(state->buf, export->buf, export->buflen);
1568         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1569         state->buflen = export->buflen;
1570         state->update = export->update;
1571         state->final = export->final;
1572         state->finup = export->finup;
1573
1574         return 0;
1575 }
1576
1577 struct caam_hash_template {
1578         char name[CRYPTO_MAX_ALG_NAME];
1579         char driver_name[CRYPTO_MAX_ALG_NAME];
1580         char hmac_name[CRYPTO_MAX_ALG_NAME];
1581         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1582         unsigned int blocksize;
1583         struct ahash_alg template_ahash;
1584         u32 alg_type;
1585 };
1586
1587 /* ahash descriptors */
1588 static struct caam_hash_template driver_hash[] = {
1589         {
1590                 .name = "sha1",
1591                 .driver_name = "sha1-caam",
1592                 .hmac_name = "hmac(sha1)",
1593                 .hmac_driver_name = "hmac-sha1-caam",
1594                 .blocksize = SHA1_BLOCK_SIZE,
1595                 .template_ahash = {
1596                         .init = ahash_init,
1597                         .update = ahash_update,
1598                         .final = ahash_final,
1599                         .finup = ahash_finup,
1600                         .digest = ahash_digest,
1601                         .export = ahash_export,
1602                         .import = ahash_import,
1603                         .setkey = ahash_setkey,
1604                         .halg = {
1605                                 .digestsize = SHA1_DIGEST_SIZE,
1606                                 .statesize = sizeof(struct caam_export_state),
1607                         },
1608                 },
1609                 .alg_type = OP_ALG_ALGSEL_SHA1,
1610         }, {
1611                 .name = "sha224",
1612                 .driver_name = "sha224-caam",
1613                 .hmac_name = "hmac(sha224)",
1614                 .hmac_driver_name = "hmac-sha224-caam",
1615                 .blocksize = SHA224_BLOCK_SIZE,
1616                 .template_ahash = {
1617                         .init = ahash_init,
1618                         .update = ahash_update,
1619                         .final = ahash_final,
1620                         .finup = ahash_finup,
1621                         .digest = ahash_digest,
1622                         .export = ahash_export,
1623                         .import = ahash_import,
1624                         .setkey = ahash_setkey,
1625                         .halg = {
1626                                 .digestsize = SHA224_DIGEST_SIZE,
1627                                 .statesize = sizeof(struct caam_export_state),
1628                         },
1629                 },
1630                 .alg_type = OP_ALG_ALGSEL_SHA224,
1631         }, {
1632                 .name = "sha256",
1633                 .driver_name = "sha256-caam",
1634                 .hmac_name = "hmac(sha256)",
1635                 .hmac_driver_name = "hmac-sha256-caam",
1636                 .blocksize = SHA256_BLOCK_SIZE,
1637                 .template_ahash = {
1638                         .init = ahash_init,
1639                         .update = ahash_update,
1640                         .final = ahash_final,
1641                         .finup = ahash_finup,
1642                         .digest = ahash_digest,
1643                         .export = ahash_export,
1644                         .import = ahash_import,
1645                         .setkey = ahash_setkey,
1646                         .halg = {
1647                                 .digestsize = SHA256_DIGEST_SIZE,
1648                                 .statesize = sizeof(struct caam_export_state),
1649                         },
1650                 },
1651                 .alg_type = OP_ALG_ALGSEL_SHA256,
1652         }, {
1653                 .name = "sha384",
1654                 .driver_name = "sha384-caam",
1655                 .hmac_name = "hmac(sha384)",
1656                 .hmac_driver_name = "hmac-sha384-caam",
1657                 .blocksize = SHA384_BLOCK_SIZE,
1658                 .template_ahash = {
1659                         .init = ahash_init,
1660                         .update = ahash_update,
1661                         .final = ahash_final,
1662                         .finup = ahash_finup,
1663                         .digest = ahash_digest,
1664                         .export = ahash_export,
1665                         .import = ahash_import,
1666                         .setkey = ahash_setkey,
1667                         .halg = {
1668                                 .digestsize = SHA384_DIGEST_SIZE,
1669                                 .statesize = sizeof(struct caam_export_state),
1670                         },
1671                 },
1672                 .alg_type = OP_ALG_ALGSEL_SHA384,
1673         }, {
1674                 .name = "sha512",
1675                 .driver_name = "sha512-caam",
1676                 .hmac_name = "hmac(sha512)",
1677                 .hmac_driver_name = "hmac-sha512-caam",
1678                 .blocksize = SHA512_BLOCK_SIZE,
1679                 .template_ahash = {
1680                         .init = ahash_init,
1681                         .update = ahash_update,
1682                         .final = ahash_final,
1683                         .finup = ahash_finup,
1684                         .digest = ahash_digest,
1685                         .export = ahash_export,
1686                         .import = ahash_import,
1687                         .setkey = ahash_setkey,
1688                         .halg = {
1689                                 .digestsize = SHA512_DIGEST_SIZE,
1690                                 .statesize = sizeof(struct caam_export_state),
1691                         },
1692                 },
1693                 .alg_type = OP_ALG_ALGSEL_SHA512,
1694         }, {
1695                 .name = "md5",
1696                 .driver_name = "md5-caam",
1697                 .hmac_name = "hmac(md5)",
1698                 .hmac_driver_name = "hmac-md5-caam",
1699                 .blocksize = MD5_BLOCK_WORDS * 4,
1700                 .template_ahash = {
1701                         .init = ahash_init,
1702                         .update = ahash_update,
1703                         .final = ahash_final,
1704                         .finup = ahash_finup,
1705                         .digest = ahash_digest,
1706                         .export = ahash_export,
1707                         .import = ahash_import,
1708                         .setkey = ahash_setkey,
1709                         .halg = {
1710                                 .digestsize = MD5_DIGEST_SIZE,
1711                                 .statesize = sizeof(struct caam_export_state),
1712                         },
1713                 },
1714                 .alg_type = OP_ALG_ALGSEL_MD5,
1715         }, {
1716                 .hmac_name = "xcbc(aes)",
1717                 .hmac_driver_name = "xcbc-aes-caam",
1718                 .blocksize = AES_BLOCK_SIZE,
1719                 .template_ahash = {
1720                         .init = ahash_init,
1721                         .update = ahash_update,
1722                         .final = ahash_final,
1723                         .finup = ahash_finup,
1724                         .digest = ahash_digest,
1725                         .export = ahash_export,
1726                         .import = ahash_import,
1727                         .setkey = axcbc_setkey,
1728                         .halg = {
1729                                 .digestsize = AES_BLOCK_SIZE,
1730                                 .statesize = sizeof(struct caam_export_state),
1731                         },
1732                  },
1733                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1734         }, {
1735                 .hmac_name = "cmac(aes)",
1736                 .hmac_driver_name = "cmac-aes-caam",
1737                 .blocksize = AES_BLOCK_SIZE,
1738                 .template_ahash = {
1739                         .init = ahash_init,
1740                         .update = ahash_update,
1741                         .final = ahash_final,
1742                         .finup = ahash_finup,
1743                         .digest = ahash_digest,
1744                         .export = ahash_export,
1745                         .import = ahash_import,
1746                         .setkey = acmac_setkey,
1747                         .halg = {
1748                                 .digestsize = AES_BLOCK_SIZE,
1749                                 .statesize = sizeof(struct caam_export_state),
1750                         },
1751                  },
1752                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1753         },
1754 };
1755
1756 struct caam_hash_alg {
1757         struct list_head entry;
1758         int alg_type;
1759         struct ahash_alg ahash_alg;
1760 };
1761
1762 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1763 {
1764         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1765         struct crypto_alg *base = tfm->__crt_alg;
1766         struct hash_alg_common *halg =
1767                  container_of(base, struct hash_alg_common, base);
1768         struct ahash_alg *alg =
1769                  container_of(halg, struct ahash_alg, halg);
1770         struct caam_hash_alg *caam_hash =
1771                  container_of(alg, struct caam_hash_alg, ahash_alg);
1772         struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1773         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1774         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1775                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1776                                          HASH_MSG_LEN + 32,
1777                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1778                                          HASH_MSG_LEN + 64,
1779                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1780         const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1781                                                       sh_desc_update);
1782         dma_addr_t dma_addr;
1783         struct caam_drv_private *priv;
1784
1785         /*
1786          * Get a Job ring from Job Ring driver to ensure in-order
1787          * crypto request processing per tfm
1788          */
1789         ctx->jrdev = caam_jr_alloc();
1790         if (IS_ERR(ctx->jrdev)) {
1791                 pr_err("Job Ring Device allocation for transform failed\n");
1792                 return PTR_ERR(ctx->jrdev);
1793         }
1794
1795         priv = dev_get_drvdata(ctx->jrdev->parent);
1796
1797         if (is_xcbc_aes(caam_hash->alg_type)) {
1798                 ctx->dir = DMA_TO_DEVICE;
1799                 ctx->key_dir = DMA_BIDIRECTIONAL;
1800                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1801                 ctx->ctx_len = 48;
1802         } else if (is_cmac_aes(caam_hash->alg_type)) {
1803                 ctx->dir = DMA_TO_DEVICE;
1804                 ctx->key_dir = DMA_NONE;
1805                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1806                 ctx->ctx_len = 32;
1807         } else {
1808                 if (priv->era >= 6) {
1809                         ctx->dir = DMA_BIDIRECTIONAL;
1810                         ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1811                 } else {
1812                         ctx->dir = DMA_TO_DEVICE;
1813                         ctx->key_dir = DMA_NONE;
1814                 }
1815                 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1816                 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1817                                            OP_ALG_ALGSEL_SUBMASK) >>
1818                                           OP_ALG_ALGSEL_SHIFT];
1819         }
1820
1821         if (ctx->key_dir != DMA_NONE) {
1822                 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1823                                                           ARRAY_SIZE(ctx->key),
1824                                                           ctx->key_dir,
1825                                                           DMA_ATTR_SKIP_CPU_SYNC);
1826                 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1827                         dev_err(ctx->jrdev, "unable to map key\n");
1828                         caam_jr_free(ctx->jrdev);
1829                         return -ENOMEM;
1830                 }
1831         }
1832
1833         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1834                                         offsetof(struct caam_hash_ctx, key) -
1835                                         sh_desc_update_offset,
1836                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1837         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1838                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1839
1840                 if (ctx->key_dir != DMA_NONE)
1841                         dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1842                                                ARRAY_SIZE(ctx->key),
1843                                                ctx->key_dir,
1844                                                DMA_ATTR_SKIP_CPU_SYNC);
1845
1846                 caam_jr_free(ctx->jrdev);
1847                 return -ENOMEM;
1848         }
1849
1850         ctx->sh_desc_update_dma = dma_addr;
1851         ctx->sh_desc_update_first_dma = dma_addr +
1852                                         offsetof(struct caam_hash_ctx,
1853                                                  sh_desc_update_first) -
1854                                         sh_desc_update_offset;
1855         ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1856                                                    sh_desc_fin) -
1857                                         sh_desc_update_offset;
1858         ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1859                                                       sh_desc_digest) -
1860                                         sh_desc_update_offset;
1861
1862         ctx->enginectx.op.do_one_request = ahash_do_one_req;
1863
1864         crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1865
1866         /*
1867          * For keyed hash algorithms shared descriptors
1868          * will be created later in setkey() callback
1869          */
1870         return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1871 }
1872
1873 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1874 {
1875         struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1876
1877         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1878                                offsetof(struct caam_hash_ctx, key) -
1879                                offsetof(struct caam_hash_ctx, sh_desc_update),
1880                                ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1881         if (ctx->key_dir != DMA_NONE)
1882                 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1883                                        ARRAY_SIZE(ctx->key), ctx->key_dir,
1884                                        DMA_ATTR_SKIP_CPU_SYNC);
1885         caam_jr_free(ctx->jrdev);
1886 }
1887
1888 void caam_algapi_hash_exit(void)
1889 {
1890         struct caam_hash_alg *t_alg, *n;
1891
1892         if (!hash_list.next)
1893                 return;
1894
1895         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1896                 crypto_unregister_ahash(&t_alg->ahash_alg);
1897                 list_del(&t_alg->entry);
1898                 kfree(t_alg);
1899         }
1900 }
1901
1902 static struct caam_hash_alg *
1903 caam_hash_alloc(struct caam_hash_template *template,
1904                 bool keyed)
1905 {
1906         struct caam_hash_alg *t_alg;
1907         struct ahash_alg *halg;
1908         struct crypto_alg *alg;
1909
1910         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1911         if (!t_alg) {
1912                 pr_err("failed to allocate t_alg\n");
1913                 return ERR_PTR(-ENOMEM);
1914         }
1915
1916         t_alg->ahash_alg = template->template_ahash;
1917         halg = &t_alg->ahash_alg;
1918         alg = &halg->halg.base;
1919
1920         if (keyed) {
1921                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1922                          template->hmac_name);
1923                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1924                          template->hmac_driver_name);
1925         } else {
1926                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1927                          template->name);
1928                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1929                          template->driver_name);
1930                 t_alg->ahash_alg.setkey = NULL;
1931         }
1932         alg->cra_module = THIS_MODULE;
1933         alg->cra_init = caam_hash_cra_init;
1934         alg->cra_exit = caam_hash_cra_exit;
1935         alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1936         alg->cra_priority = CAAM_CRA_PRIORITY;
1937         alg->cra_blocksize = template->blocksize;
1938         alg->cra_alignmask = 0;
1939         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1940
1941         t_alg->alg_type = template->alg_type;
1942
1943         return t_alg;
1944 }
1945
1946 int caam_algapi_hash_init(struct device *ctrldev)
1947 {
1948         int i = 0, err = 0;
1949         struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1950         unsigned int md_limit = SHA512_DIGEST_SIZE;
1951         u32 md_inst, md_vid;
1952
1953         /*
1954          * Register crypto algorithms the device supports.  First, identify
1955          * presence and attributes of MD block.
1956          */
1957         if (priv->era < 10) {
1958                 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1959
1960                 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1961                           CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1962                 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1963                            CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1964         } else {
1965                 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1966
1967                 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1968                 md_inst = mdha & CHA_VER_NUM_MASK;
1969         }
1970
1971         /*
1972          * Skip registration of any hashing algorithms if MD block
1973          * is not present.
1974          */
1975         if (!md_inst)
1976                 return 0;
1977
1978         /* Limit digest size based on LP256 */
1979         if (md_vid == CHA_VER_VID_MD_LP256)
1980                 md_limit = SHA256_DIGEST_SIZE;
1981
1982         INIT_LIST_HEAD(&hash_list);
1983
1984         /* register crypto algorithms the device supports */
1985         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1986                 struct caam_hash_alg *t_alg;
1987                 struct caam_hash_template *alg = driver_hash + i;
1988
1989                 /* If MD size is not supported by device, skip registration */
1990                 if (is_mdha(alg->alg_type) &&
1991                     alg->template_ahash.halg.digestsize > md_limit)
1992                         continue;
1993
1994                 /* register hmac version */
1995                 t_alg = caam_hash_alloc(alg, true);
1996                 if (IS_ERR(t_alg)) {
1997                         err = PTR_ERR(t_alg);
1998                         pr_warn("%s alg allocation failed\n",
1999                                 alg->hmac_driver_name);
2000                         continue;
2001                 }
2002
2003                 err = crypto_register_ahash(&t_alg->ahash_alg);
2004                 if (err) {
2005                         pr_warn("%s alg registration failed: %d\n",
2006                                 t_alg->ahash_alg.halg.base.cra_driver_name,
2007                                 err);
2008                         kfree(t_alg);
2009                 } else
2010                         list_add_tail(&t_alg->entry, &hash_list);
2011
2012                 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2013                         continue;
2014
2015                 /* register unkeyed version */
2016                 t_alg = caam_hash_alloc(alg, false);
2017                 if (IS_ERR(t_alg)) {
2018                         err = PTR_ERR(t_alg);
2019                         pr_warn("%s alg allocation failed\n", alg->driver_name);
2020                         continue;
2021                 }
2022
2023                 err = crypto_register_ahash(&t_alg->ahash_alg);
2024                 if (err) {
2025                         pr_warn("%s alg registration failed: %d\n",
2026                                 t_alg->ahash_alg.halg.base.cra_driver_name,
2027                                 err);
2028                         kfree(t_alg);
2029                 } else
2030                         list_add_tail(&t_alg->entry, &hash_list);
2031         }
2032
2033         return err;
2034 }