Merge tag 'backport/v3.14.24-ltsi-rc1/phy-rcar-gen2-usb-to-v3.15' into backport/v3...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / crypto / caam / caamhash.c
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY               3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE                 (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN           (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN     (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN           (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
83                                          CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN                    8
88 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102         struct device *jrdev;
103         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107         u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108         dma_addr_t sh_desc_update_dma;
109         dma_addr_t sh_desc_update_first_dma;
110         dma_addr_t sh_desc_fin_dma;
111         dma_addr_t sh_desc_digest_dma;
112         dma_addr_t sh_desc_finup_dma;
113         u32 alg_type;
114         u32 alg_op;
115         u8 key[CAAM_MAX_HASH_KEY_SIZE];
116         dma_addr_t key_dma;
117         int ctx_len;
118         unsigned int split_key_len;
119         unsigned int split_key_pad_len;
120 };
121
122 /* ahash state */
123 struct caam_hash_state {
124         dma_addr_t buf_dma;
125         dma_addr_t ctx_dma;
126         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127         int buflen_0;
128         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129         int buflen_1;
130         u8 caam_ctx[MAX_CTX_LEN];
131         int (*update)(struct ahash_request *req);
132         int (*final)(struct ahash_request *req);
133         int (*finup)(struct ahash_request *req);
134         int current_buf;
135 };
136
137 /* Common job descriptor seq in/out ptr routines */
138
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141                                        struct caam_hash_state *state,
142                                        int ctx_len)
143 {
144         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145                                         ctx_len, DMA_FROM_DEVICE);
146         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
147 }
148
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151                                                 u8 *result, int digestsize)
152 {
153         dma_addr_t dst_dma;
154
155         dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
157
158         return dst_dma;
159 }
160
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163                                             struct sec4_sg_entry *sec4_sg,
164                                             u8 *buf, int buflen)
165 {
166         dma_addr_t buf_dma;
167
168         buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169         dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
170
171         return buf_dma;
172 }
173
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device *jrdev,
176                                       struct scatterlist *src, int src_nents,
177                                       struct sec4_sg_entry *sec4_sg,
178                                       bool chained)
179 {
180         dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
181         sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182 }
183
184 /*
185  * Only put buffer in link table if it contains data, which is possible,
186  * since a buffer has previously been used, and needs to be unmapped,
187  */
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190                        u8 *buf, dma_addr_t buf_dma, int buflen,
191                        int last_buflen)
192 {
193         if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194                 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195         if (buflen)
196                 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197         else
198                 buf_dma = 0;
199
200         return buf_dma;
201 }
202
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205                                       struct caam_hash_state *state,
206                                       int ctx_len,
207                                       struct sec4_sg_entry *sec4_sg,
208                                       u32 flag)
209 {
210         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212 }
213
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216 {
217         append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218                           ctx->split_key_len, CLASS_2 |
219                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
220 }
221
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224 {
225         u32 *key_jump_cmd;
226
227         init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229         if (ctx->split_key_len) {
230                 /* Skip if already shared */
231                 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232                                            JUMP_COND_SHRD);
233
234                 append_key_ahash(desc, ctx);
235
236                 set_jump_tgt_here(desc, key_jump_cmd);
237         }
238
239         /* Propagate errors from shared to job descriptor */
240         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241 }
242
243 /*
244  * For ahash read data from seqin following state->caam_ctx,
245  * and write resulting class2 context to seqout, which may be state->caam_ctx
246  * or req->result
247  */
248 static inline void ahash_append_load_str(u32 *desc, int digestsize)
249 {
250         /* Calculate remaining bytes to read */
251         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253         /* Read remaining bytes */
254         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255                              FIFOLD_TYPE_MSG | KEY_VLF);
256
257         /* Store class2 context bytes */
258         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259                          LDST_SRCDST_BYTE_CONTEXT);
260 }
261
262 /*
263  * For ahash update, final and finup, import context, read and write to seqout
264  */
265 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266                                          int digestsize,
267                                          struct caam_hash_ctx *ctx)
268 {
269         init_sh_desc_key_ahash(desc, ctx);
270
271         /* Import context from software */
272         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273                    LDST_CLASS_2_CCB | ctx->ctx_len);
274
275         /* Class 2 operation */
276         append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278         /*
279          * Load from buf and/or src and write to req->result or state->context
280          */
281         ahash_append_load_str(desc, digestsize);
282 }
283
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286                                      int digestsize, struct caam_hash_ctx *ctx)
287 {
288         init_sh_desc_key_ahash(desc, ctx);
289
290         /* Class 2 operation */
291         append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293         /*
294          * Load from buf and/or src and write to req->result or state->context
295          */
296         ahash_append_load_str(desc, digestsize);
297 }
298
299 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300 {
301         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302         int digestsize = crypto_ahash_digestsize(ahash);
303         struct device *jrdev = ctx->jrdev;
304         u32 have_key = 0;
305         u32 *desc;
306
307         if (ctx->split_key_len)
308                 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310         /* ahash_update shared descriptor */
311         desc = ctx->sh_desc_update;
312
313         init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315         /* Import context from software */
316         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317                    LDST_CLASS_2_CCB | ctx->ctx_len);
318
319         /* Class 2 operation */
320         append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321                          OP_ALG_ENCRYPT);
322
323         /* Load data and write to result or context */
324         ahash_append_load_str(desc, ctx->ctx_len);
325
326         ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327                                                  DMA_TO_DEVICE);
328         if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329                 dev_err(jrdev, "unable to map shared descriptor\n");
330                 return -ENOMEM;
331         }
332 #ifdef DEBUG
333         print_hex_dump(KERN_ERR,
334                        "ahash update shdesc@"__stringify(__LINE__)": ",
335                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 #endif
337
338         /* ahash_update_first shared descriptor */
339         desc = ctx->sh_desc_update_first;
340
341         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342                           ctx->ctx_len, ctx);
343
344         ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345                                                        desc_bytes(desc),
346                                                        DMA_TO_DEVICE);
347         if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348                 dev_err(jrdev, "unable to map shared descriptor\n");
349                 return -ENOMEM;
350         }
351 #ifdef DEBUG
352         print_hex_dump(KERN_ERR,
353                        "ahash update first shdesc@"__stringify(__LINE__)": ",
354                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
355 #endif
356
357         /* ahash_final shared descriptor */
358         desc = ctx->sh_desc_fin;
359
360         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361                               OP_ALG_AS_FINALIZE, digestsize, ctx);
362
363         ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
364                                               DMA_TO_DEVICE);
365         if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366                 dev_err(jrdev, "unable to map shared descriptor\n");
367                 return -ENOMEM;
368         }
369 #ifdef DEBUG
370         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
371                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
372                        desc_bytes(desc), 1);
373 #endif
374
375         /* ahash_finup shared descriptor */
376         desc = ctx->sh_desc_finup;
377
378         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379                               OP_ALG_AS_FINALIZE, digestsize, ctx);
380
381         ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
382                                                 DMA_TO_DEVICE);
383         if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384                 dev_err(jrdev, "unable to map shared descriptor\n");
385                 return -ENOMEM;
386         }
387 #ifdef DEBUG
388         print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
389                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
390                        desc_bytes(desc), 1);
391 #endif
392
393         /* ahash_digest shared descriptor */
394         desc = ctx->sh_desc_digest;
395
396         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
397                           digestsize, ctx);
398
399         ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
400                                                  desc_bytes(desc),
401                                                  DMA_TO_DEVICE);
402         if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403                 dev_err(jrdev, "unable to map shared descriptor\n");
404                 return -ENOMEM;
405         }
406 #ifdef DEBUG
407         print_hex_dump(KERN_ERR,
408                        "ahash digest shdesc@"__stringify(__LINE__)": ",
409                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
410                        desc_bytes(desc), 1);
411 #endif
412
413         return 0;
414 }
415
416 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
417                               u32 keylen)
418 {
419         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420                                ctx->split_key_pad_len, key_in, keylen,
421                                ctx->alg_op);
422 }
423
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
426                            u32 *keylen, u8 *key_out, u32 digestsize)
427 {
428         struct device *jrdev = ctx->jrdev;
429         u32 *desc;
430         struct split_key_result result;
431         dma_addr_t src_dma, dst_dma;
432         int ret = 0;
433
434         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
435         if (!desc) {
436                 dev_err(jrdev, "unable to allocate key input memory\n");
437                 return -ENOMEM;
438         }
439
440         init_job_desc(desc, 0);
441
442         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
443                                  DMA_TO_DEVICE);
444         if (dma_mapping_error(jrdev, src_dma)) {
445                 dev_err(jrdev, "unable to map key input memory\n");
446                 kfree(desc);
447                 return -ENOMEM;
448         }
449         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
450                                  DMA_FROM_DEVICE);
451         if (dma_mapping_error(jrdev, dst_dma)) {
452                 dev_err(jrdev, "unable to map key output memory\n");
453                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454                 kfree(desc);
455                 return -ENOMEM;
456         }
457
458         /* Job descriptor to perform unkeyed hash on key_in */
459         append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460                          OP_ALG_AS_INITFINAL);
461         append_seq_in_ptr(desc, src_dma, *keylen, 0);
462         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466                          LDST_SRCDST_BYTE_CONTEXT);
467
468 #ifdef DEBUG
469         print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
470                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
471         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
472                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
473 #endif
474
475         result.err = 0;
476         init_completion(&result.completion);
477
478         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
479         if (!ret) {
480                 /* in progress */
481                 wait_for_completion_interruptible(&result.completion);
482                 ret = result.err;
483 #ifdef DEBUG
484                 print_hex_dump(KERN_ERR,
485                                "digested key@"__stringify(__LINE__)": ",
486                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
487                                digestsize, 1);
488 #endif
489         }
490         *keylen = digestsize;
491
492         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
494
495         kfree(desc);
496
497         return ret;
498 }
499
500 static int ahash_setkey(struct crypto_ahash *ahash,
501                         const u8 *key, unsigned int keylen)
502 {
503         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506         struct device *jrdev = ctx->jrdev;
507         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508         int digestsize = crypto_ahash_digestsize(ahash);
509         int ret = 0;
510         u8 *hashed_key = NULL;
511
512 #ifdef DEBUG
513         printk(KERN_ERR "keylen %d\n", keylen);
514 #endif
515
516         if (keylen > blocksize) {
517                 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
518                                      GFP_DMA);
519                 if (!hashed_key)
520                         return -ENOMEM;
521                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
522                                       digestsize);
523                 if (ret)
524                         goto badkey;
525                 key = hashed_key;
526         }
527
528         /* Pick class 2 key length from algorithm submask */
529         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530                                       OP_ALG_ALGSEL_SHIFT] * 2;
531         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
532
533 #ifdef DEBUG
534         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535                ctx->split_key_len, ctx->split_key_pad_len);
536         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
537                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
538 #endif
539
540         ret = gen_split_hash_key(ctx, key, keylen);
541         if (ret)
542                 goto badkey;
543
544         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
545                                       DMA_TO_DEVICE);
546         if (dma_mapping_error(jrdev, ctx->key_dma)) {
547                 dev_err(jrdev, "unable to map key i/o memory\n");
548                 return -ENOMEM;
549         }
550 #ifdef DEBUG
551         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
552                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
553                        ctx->split_key_pad_len, 1);
554 #endif
555
556         ret = ahash_set_sh_desc(ahash);
557         if (ret) {
558                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
559                                  DMA_TO_DEVICE);
560         }
561
562         kfree(hashed_key);
563         return ret;
564 badkey:
565         kfree(hashed_key);
566         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
567         return -EINVAL;
568 }
569
570 /*
571  * ahash_edesc - s/w-extended ahash descriptor
572  * @dst_dma: physical mapped address of req->result
573  * @sec4_sg_dma: physical mapped address of h/w link table
574  * @chained: if source is chained
575  * @src_nents: number of segments in input scatterlist
576  * @sec4_sg_bytes: length of dma mapped sec4_sg space
577  * @sec4_sg: pointer to h/w link table
578  * @hw_desc: the h/w job descriptor followed by any referenced link tables
579  */
580 struct ahash_edesc {
581         dma_addr_t dst_dma;
582         dma_addr_t sec4_sg_dma;
583         bool chained;
584         int src_nents;
585         int sec4_sg_bytes;
586         struct sec4_sg_entry *sec4_sg;
587         u32 hw_desc[0];
588 };
589
590 static inline void ahash_unmap(struct device *dev,
591                         struct ahash_edesc *edesc,
592                         struct ahash_request *req, int dst_len)
593 {
594         if (edesc->src_nents)
595                 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
596                                      DMA_TO_DEVICE, edesc->chained);
597         if (edesc->dst_dma)
598                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
599
600         if (edesc->sec4_sg_bytes)
601                 dma_unmap_single(dev, edesc->sec4_sg_dma,
602                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
603 }
604
605 static inline void ahash_unmap_ctx(struct device *dev,
606                         struct ahash_edesc *edesc,
607                         struct ahash_request *req, int dst_len, u32 flag)
608 {
609         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
610         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
611         struct caam_hash_state *state = ahash_request_ctx(req);
612
613         if (state->ctx_dma)
614                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
615         ahash_unmap(dev, edesc, req, dst_len);
616 }
617
618 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
619                        void *context)
620 {
621         struct ahash_request *req = context;
622         struct ahash_edesc *edesc;
623         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624         int digestsize = crypto_ahash_digestsize(ahash);
625 #ifdef DEBUG
626         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
627         struct caam_hash_state *state = ahash_request_ctx(req);
628
629         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
630 #endif
631
632         edesc = (struct ahash_edesc *)((char *)desc -
633                  offsetof(struct ahash_edesc, hw_desc));
634         if (err) {
635                 char tmp[CAAM_ERROR_STR_MAX];
636
637                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
638         }
639
640         ahash_unmap(jrdev, edesc, req, digestsize);
641         kfree(edesc);
642
643 #ifdef DEBUG
644         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
645                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
646                        ctx->ctx_len, 1);
647         if (req->result)
648                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
649                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
650                                digestsize, 1);
651 #endif
652
653         req->base.complete(&req->base, err);
654 }
655
656 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
657                             void *context)
658 {
659         struct ahash_request *req = context;
660         struct ahash_edesc *edesc;
661         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
662         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
663 #ifdef DEBUG
664         struct caam_hash_state *state = ahash_request_ctx(req);
665         int digestsize = crypto_ahash_digestsize(ahash);
666
667         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
668 #endif
669
670         edesc = (struct ahash_edesc *)((char *)desc -
671                  offsetof(struct ahash_edesc, hw_desc));
672         if (err) {
673                 char tmp[CAAM_ERROR_STR_MAX];
674
675                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
676         }
677
678         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
679         kfree(edesc);
680
681 #ifdef DEBUG
682         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
683                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
684                        ctx->ctx_len, 1);
685         if (req->result)
686                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
687                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
688                                digestsize, 1);
689 #endif
690
691         req->base.complete(&req->base, err);
692 }
693
694 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
695                                void *context)
696 {
697         struct ahash_request *req = context;
698         struct ahash_edesc *edesc;
699         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
700         int digestsize = crypto_ahash_digestsize(ahash);
701 #ifdef DEBUG
702         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
703         struct caam_hash_state *state = ahash_request_ctx(req);
704
705         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
706 #endif
707
708         edesc = (struct ahash_edesc *)((char *)desc -
709                  offsetof(struct ahash_edesc, hw_desc));
710         if (err) {
711                 char tmp[CAAM_ERROR_STR_MAX];
712
713                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
714         }
715
716         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
717         kfree(edesc);
718
719 #ifdef DEBUG
720         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
721                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
722                        ctx->ctx_len, 1);
723         if (req->result)
724                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
725                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
726                                digestsize, 1);
727 #endif
728
729         req->base.complete(&req->base, err);
730 }
731
732 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
733                                void *context)
734 {
735         struct ahash_request *req = context;
736         struct ahash_edesc *edesc;
737         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
738         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
739 #ifdef DEBUG
740         struct caam_hash_state *state = ahash_request_ctx(req);
741         int digestsize = crypto_ahash_digestsize(ahash);
742
743         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
744 #endif
745
746         edesc = (struct ahash_edesc *)((char *)desc -
747                  offsetof(struct ahash_edesc, hw_desc));
748         if (err) {
749                 char tmp[CAAM_ERROR_STR_MAX];
750
751                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
752         }
753
754         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
755         kfree(edesc);
756
757 #ifdef DEBUG
758         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
759                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
760                        ctx->ctx_len, 1);
761         if (req->result)
762                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
763                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
764                                digestsize, 1);
765 #endif
766
767         req->base.complete(&req->base, err);
768 }
769
770 /* submit update job descriptor */
771 static int ahash_update_ctx(struct ahash_request *req)
772 {
773         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
774         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
775         struct caam_hash_state *state = ahash_request_ctx(req);
776         struct device *jrdev = ctx->jrdev;
777         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
778                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
779         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
780         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
781         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
782         int *next_buflen = state->current_buf ? &state->buflen_0 :
783                            &state->buflen_1, last_buflen;
784         int in_len = *buflen + req->nbytes, to_hash;
785         u32 *sh_desc = ctx->sh_desc_update, *desc;
786         dma_addr_t ptr = ctx->sh_desc_update_dma;
787         int src_nents, sec4_sg_bytes, sec4_sg_src_index;
788         struct ahash_edesc *edesc;
789         bool chained = false;
790         int ret = 0;
791         int sh_len;
792
793         last_buflen = *next_buflen;
794         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
795         to_hash = in_len - *next_buflen;
796
797         if (to_hash) {
798                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
799                                        &chained);
800                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
801                 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
802                                  sizeof(struct sec4_sg_entry);
803
804                 /*
805                  * allocate space for base edesc and hw desc commands,
806                  * link tables
807                  */
808                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
809                                 sec4_sg_bytes, GFP_DMA | flags);
810                 if (!edesc) {
811                         dev_err(jrdev,
812                                 "could not allocate extended descriptor\n");
813                         return -ENOMEM;
814                 }
815
816                 edesc->src_nents = src_nents;
817                 edesc->chained = chained;
818                 edesc->sec4_sg_bytes = sec4_sg_bytes;
819                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
820                                  DESC_JOB_IO_LEN;
821                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
822                                                      sec4_sg_bytes,
823                                                      DMA_TO_DEVICE);
824
825                 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
826                                    edesc->sec4_sg, DMA_BIDIRECTIONAL);
827
828                 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
829                                                         edesc->sec4_sg + 1,
830                                                         buf, state->buf_dma,
831                                                         *buflen, last_buflen);
832
833                 if (src_nents) {
834                         src_map_to_sec4_sg(jrdev, req->src, src_nents,
835                                            edesc->sec4_sg + sec4_sg_src_index,
836                                            chained);
837                         if (*next_buflen) {
838                                 scatterwalk_map_and_copy(next_buf, req->src,
839                                                          to_hash - *buflen,
840                                                          *next_buflen, 0);
841                                 state->current_buf = !state->current_buf;
842                         }
843                 } else {
844                         (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
845                                                         SEC4_SG_LEN_FIN;
846                 }
847
848                 sh_len = desc_len(sh_desc);
849                 desc = edesc->hw_desc;
850                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
851                                      HDR_REVERSE);
852
853                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
854                                        to_hash, LDST_SGF);
855
856                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
857
858 #ifdef DEBUG
859                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
860                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
861                                desc_bytes(desc), 1);
862 #endif
863
864                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
865                 if (!ret) {
866                         ret = -EINPROGRESS;
867                 } else {
868                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
869                                            DMA_BIDIRECTIONAL);
870                         kfree(edesc);
871                 }
872         } else if (*next_buflen) {
873                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
874                                          req->nbytes, 0);
875                 *buflen = *next_buflen;
876                 *next_buflen = last_buflen;
877         }
878 #ifdef DEBUG
879         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
880                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
881         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
882                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
883                        *next_buflen, 1);
884 #endif
885
886         return ret;
887 }
888
889 static int ahash_final_ctx(struct ahash_request *req)
890 {
891         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
892         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
893         struct caam_hash_state *state = ahash_request_ctx(req);
894         struct device *jrdev = ctx->jrdev;
895         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
896                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
897         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
898         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
899         int last_buflen = state->current_buf ? state->buflen_0 :
900                           state->buflen_1;
901         u32 *sh_desc = ctx->sh_desc_fin, *desc;
902         dma_addr_t ptr = ctx->sh_desc_fin_dma;
903         int sec4_sg_bytes;
904         int digestsize = crypto_ahash_digestsize(ahash);
905         struct ahash_edesc *edesc;
906         int ret = 0;
907         int sh_len;
908
909         sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
910
911         /* allocate space for base edesc and hw desc commands, link tables */
912         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
913                         sec4_sg_bytes, GFP_DMA | flags);
914         if (!edesc) {
915                 dev_err(jrdev, "could not allocate extended descriptor\n");
916                 return -ENOMEM;
917         }
918
919         sh_len = desc_len(sh_desc);
920         desc = edesc->hw_desc;
921         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
922
923         edesc->sec4_sg_bytes = sec4_sg_bytes;
924         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
925                          DESC_JOB_IO_LEN;
926         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
927                                             sec4_sg_bytes, DMA_TO_DEVICE);
928         edesc->src_nents = 0;
929
930         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
931                            DMA_TO_DEVICE);
932
933         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
934                                                 buf, state->buf_dma, buflen,
935                                                 last_buflen);
936         (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
937
938         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
939                           LDST_SGF);
940
941         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
942                                                 digestsize);
943
944 #ifdef DEBUG
945         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
946                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
947 #endif
948
949         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
950         if (!ret) {
951                 ret = -EINPROGRESS;
952         } else {
953                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
954                 kfree(edesc);
955         }
956
957         return ret;
958 }
959
960 static int ahash_finup_ctx(struct ahash_request *req)
961 {
962         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
963         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
964         struct caam_hash_state *state = ahash_request_ctx(req);
965         struct device *jrdev = ctx->jrdev;
966         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
967                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
968         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
969         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
970         int last_buflen = state->current_buf ? state->buflen_0 :
971                           state->buflen_1;
972         u32 *sh_desc = ctx->sh_desc_finup, *desc;
973         dma_addr_t ptr = ctx->sh_desc_finup_dma;
974         int sec4_sg_bytes, sec4_sg_src_index;
975         int src_nents;
976         int digestsize = crypto_ahash_digestsize(ahash);
977         struct ahash_edesc *edesc;
978         bool chained = false;
979         int ret = 0;
980         int sh_len;
981
982         src_nents = __sg_count(req->src, req->nbytes, &chained);
983         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
984         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
985                          sizeof(struct sec4_sg_entry);
986
987         /* allocate space for base edesc and hw desc commands, link tables */
988         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
989                         sec4_sg_bytes, GFP_DMA | flags);
990         if (!edesc) {
991                 dev_err(jrdev, "could not allocate extended descriptor\n");
992                 return -ENOMEM;
993         }
994
995         sh_len = desc_len(sh_desc);
996         desc = edesc->hw_desc;
997         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
998
999         edesc->src_nents = src_nents;
1000         edesc->chained = chained;
1001         edesc->sec4_sg_bytes = sec4_sg_bytes;
1002         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1003                          DESC_JOB_IO_LEN;
1004         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1005                                             sec4_sg_bytes, DMA_TO_DEVICE);
1006
1007         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1008                            DMA_TO_DEVICE);
1009
1010         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1011                                                 buf, state->buf_dma, buflen,
1012                                                 last_buflen);
1013
1014         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1015                            sec4_sg_src_index, chained);
1016
1017         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1018                                buflen + req->nbytes, LDST_SGF);
1019
1020         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1021                                                 digestsize);
1022
1023 #ifdef DEBUG
1024         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1025                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1026 #endif
1027
1028         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1029         if (!ret) {
1030                 ret = -EINPROGRESS;
1031         } else {
1032                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1033                 kfree(edesc);
1034         }
1035
1036         return ret;
1037 }
1038
1039 static int ahash_digest(struct ahash_request *req)
1040 {
1041         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1042         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1043         struct device *jrdev = ctx->jrdev;
1044         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1045                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1046         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1047         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1048         int digestsize = crypto_ahash_digestsize(ahash);
1049         int src_nents, sec4_sg_bytes;
1050         dma_addr_t src_dma;
1051         struct ahash_edesc *edesc;
1052         bool chained = false;
1053         int ret = 0;
1054         u32 options;
1055         int sh_len;
1056
1057         src_nents = sg_count(req->src, req->nbytes, &chained);
1058         dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1059                            chained);
1060         sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1061
1062         /* allocate space for base edesc and hw desc commands, link tables */
1063         edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1064                         DESC_JOB_IO_LEN, GFP_DMA | flags);
1065         if (!edesc) {
1066                 dev_err(jrdev, "could not allocate extended descriptor\n");
1067                 return -ENOMEM;
1068         }
1069         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1070                           DESC_JOB_IO_LEN;
1071         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1072                                             sec4_sg_bytes, DMA_TO_DEVICE);
1073         edesc->src_nents = src_nents;
1074         edesc->chained = chained;
1075
1076         sh_len = desc_len(sh_desc);
1077         desc = edesc->hw_desc;
1078         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1079
1080         if (src_nents) {
1081                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1082                 src_dma = edesc->sec4_sg_dma;
1083                 options = LDST_SGF;
1084         } else {
1085                 src_dma = sg_dma_address(req->src);
1086                 options = 0;
1087         }
1088         append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1089
1090         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1091                                                 digestsize);
1092
1093 #ifdef DEBUG
1094         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1095                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1096 #endif
1097
1098         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1099         if (!ret) {
1100                 ret = -EINPROGRESS;
1101         } else {
1102                 ahash_unmap(jrdev, edesc, req, digestsize);
1103                 kfree(edesc);
1104         }
1105
1106         return ret;
1107 }
1108
1109 /* submit ahash final if it the first job descriptor */
1110 static int ahash_final_no_ctx(struct ahash_request *req)
1111 {
1112         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1113         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1114         struct caam_hash_state *state = ahash_request_ctx(req);
1115         struct device *jrdev = ctx->jrdev;
1116         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1117                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1118         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1119         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1120         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1121         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1122         int digestsize = crypto_ahash_digestsize(ahash);
1123         struct ahash_edesc *edesc;
1124         int ret = 0;
1125         int sh_len;
1126
1127         /* allocate space for base edesc and hw desc commands, link tables */
1128         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1129                         GFP_DMA | flags);
1130         if (!edesc) {
1131                 dev_err(jrdev, "could not allocate extended descriptor\n");
1132                 return -ENOMEM;
1133         }
1134
1135         sh_len = desc_len(sh_desc);
1136         desc = edesc->hw_desc;
1137         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1138
1139         state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1140
1141         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1142
1143         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1144                                                 digestsize);
1145         edesc->src_nents = 0;
1146
1147 #ifdef DEBUG
1148         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1149                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1150 #endif
1151
1152         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1153         if (!ret) {
1154                 ret = -EINPROGRESS;
1155         } else {
1156                 ahash_unmap(jrdev, edesc, req, digestsize);
1157                 kfree(edesc);
1158         }
1159
1160         return ret;
1161 }
1162
1163 /* submit ahash update if it the first job descriptor after update */
1164 static int ahash_update_no_ctx(struct ahash_request *req)
1165 {
1166         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1167         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1168         struct caam_hash_state *state = ahash_request_ctx(req);
1169         struct device *jrdev = ctx->jrdev;
1170         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1171                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1172         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1173         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1174         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1175         int *next_buflen = state->current_buf ? &state->buflen_0 :
1176                            &state->buflen_1;
1177         int in_len = *buflen + req->nbytes, to_hash;
1178         int sec4_sg_bytes, src_nents;
1179         struct ahash_edesc *edesc;
1180         u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1181         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1182         bool chained = false;
1183         int ret = 0;
1184         int sh_len;
1185
1186         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1187         to_hash = in_len - *next_buflen;
1188
1189         if (to_hash) {
1190                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1191                                        &chained);
1192                 sec4_sg_bytes = (1 + src_nents) *
1193                                 sizeof(struct sec4_sg_entry);
1194
1195                 /*
1196                  * allocate space for base edesc and hw desc commands,
1197                  * link tables
1198                  */
1199                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1200                                 sec4_sg_bytes, GFP_DMA | flags);
1201                 if (!edesc) {
1202                         dev_err(jrdev,
1203                                 "could not allocate extended descriptor\n");
1204                         return -ENOMEM;
1205                 }
1206
1207                 edesc->src_nents = src_nents;
1208                 edesc->chained = chained;
1209                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1210                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1211                                  DESC_JOB_IO_LEN;
1212                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1213                                                     sec4_sg_bytes,
1214                                                     DMA_TO_DEVICE);
1215
1216                 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1217                                                     buf, *buflen);
1218                 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1219                                    edesc->sec4_sg + 1, chained);
1220                 if (*next_buflen) {
1221                         scatterwalk_map_and_copy(next_buf, req->src,
1222                                                  to_hash - *buflen,
1223                                                  *next_buflen, 0);
1224                         state->current_buf = !state->current_buf;
1225                 }
1226
1227                 sh_len = desc_len(sh_desc);
1228                 desc = edesc->hw_desc;
1229                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1230                                      HDR_REVERSE);
1231
1232                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1233
1234                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1235
1236 #ifdef DEBUG
1237                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1238                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1239                                desc_bytes(desc), 1);
1240 #endif
1241
1242                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1243                 if (!ret) {
1244                         ret = -EINPROGRESS;
1245                         state->update = ahash_update_ctx;
1246                         state->finup = ahash_finup_ctx;
1247                         state->final = ahash_final_ctx;
1248                 } else {
1249                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1250                                         DMA_TO_DEVICE);
1251                         kfree(edesc);
1252                 }
1253         } else if (*next_buflen) {
1254                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1255                                          req->nbytes, 0);
1256                 *buflen = *next_buflen;
1257                 *next_buflen = 0;
1258         }
1259 #ifdef DEBUG
1260         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1261                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1262         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1263                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1264                        *next_buflen, 1);
1265 #endif
1266
1267         return ret;
1268 }
1269
1270 /* submit ahash finup if it the first job descriptor after update */
1271 static int ahash_finup_no_ctx(struct ahash_request *req)
1272 {
1273         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1274         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1275         struct caam_hash_state *state = ahash_request_ctx(req);
1276         struct device *jrdev = ctx->jrdev;
1277         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1278                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1279         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1280         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1281         int last_buflen = state->current_buf ? state->buflen_0 :
1282                           state->buflen_1;
1283         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1284         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1285         int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1286         int digestsize = crypto_ahash_digestsize(ahash);
1287         struct ahash_edesc *edesc;
1288         bool chained = false;
1289         int sh_len;
1290         int ret = 0;
1291
1292         src_nents = __sg_count(req->src, req->nbytes, &chained);
1293         sec4_sg_src_index = 2;
1294         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1295                          sizeof(struct sec4_sg_entry);
1296
1297         /* allocate space for base edesc and hw desc commands, link tables */
1298         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1299                         sec4_sg_bytes, GFP_DMA | flags);
1300         if (!edesc) {
1301                 dev_err(jrdev, "could not allocate extended descriptor\n");
1302                 return -ENOMEM;
1303         }
1304
1305         sh_len = desc_len(sh_desc);
1306         desc = edesc->hw_desc;
1307         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1308
1309         edesc->src_nents = src_nents;
1310         edesc->chained = chained;
1311         edesc->sec4_sg_bytes = sec4_sg_bytes;
1312         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1313                          DESC_JOB_IO_LEN;
1314         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1315                                             sec4_sg_bytes, DMA_TO_DEVICE);
1316
1317         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1318                                                 state->buf_dma, buflen,
1319                                                 last_buflen);
1320
1321         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1322                            chained);
1323
1324         append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1325                                req->nbytes, LDST_SGF);
1326
1327         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1328                                                 digestsize);
1329
1330 #ifdef DEBUG
1331         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1332                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1333 #endif
1334
1335         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1336         if (!ret) {
1337                 ret = -EINPROGRESS;
1338         } else {
1339                 ahash_unmap(jrdev, edesc, req, digestsize);
1340                 kfree(edesc);
1341         }
1342
1343         return ret;
1344 }
1345
1346 /* submit first update job descriptor after init */
1347 static int ahash_update_first(struct ahash_request *req)
1348 {
1349         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1350         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1351         struct caam_hash_state *state = ahash_request_ctx(req);
1352         struct device *jrdev = ctx->jrdev;
1353         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1354                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1355         u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1356         int *next_buflen = state->current_buf ?
1357                 &state->buflen_1 : &state->buflen_0;
1358         int to_hash;
1359         u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1360         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1361         int sec4_sg_bytes, src_nents;
1362         dma_addr_t src_dma;
1363         u32 options;
1364         struct ahash_edesc *edesc;
1365         bool chained = false;
1366         int ret = 0;
1367         int sh_len;
1368
1369         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1370                                       1);
1371         to_hash = req->nbytes - *next_buflen;
1372
1373         if (to_hash) {
1374                 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1375                                      &chained);
1376                 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1377                                    DMA_TO_DEVICE, chained);
1378                 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1379
1380                 /*
1381                  * allocate space for base edesc and hw desc commands,
1382                  * link tables
1383                  */
1384                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1385                                 sec4_sg_bytes, GFP_DMA | flags);
1386                 if (!edesc) {
1387                         dev_err(jrdev,
1388                                 "could not allocate extended descriptor\n");
1389                         return -ENOMEM;
1390                 }
1391
1392                 edesc->src_nents = src_nents;
1393                 edesc->chained = chained;
1394                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1395                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1396                                  DESC_JOB_IO_LEN;
1397                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1398                                                     sec4_sg_bytes,
1399                                                     DMA_TO_DEVICE);
1400
1401                 if (src_nents) {
1402                         sg_to_sec4_sg_last(req->src, src_nents,
1403                                            edesc->sec4_sg, 0);
1404                         src_dma = edesc->sec4_sg_dma;
1405                         options = LDST_SGF;
1406                 } else {
1407                         src_dma = sg_dma_address(req->src);
1408                         options = 0;
1409                 }
1410
1411                 if (*next_buflen)
1412                         scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1413                                                  *next_buflen, 0);
1414
1415                 sh_len = desc_len(sh_desc);
1416                 desc = edesc->hw_desc;
1417                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1418                                      HDR_REVERSE);
1419
1420                 append_seq_in_ptr(desc, src_dma, to_hash, options);
1421
1422                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1423
1424 #ifdef DEBUG
1425                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1426                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1427                                desc_bytes(desc), 1);
1428 #endif
1429
1430                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1431                                       req);
1432                 if (!ret) {
1433                         ret = -EINPROGRESS;
1434                         state->update = ahash_update_ctx;
1435                         state->finup = ahash_finup_ctx;
1436                         state->final = ahash_final_ctx;
1437                 } else {
1438                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1439                                         DMA_TO_DEVICE);
1440                         kfree(edesc);
1441                 }
1442         } else if (*next_buflen) {
1443                 state->update = ahash_update_no_ctx;
1444                 state->finup = ahash_finup_no_ctx;
1445                 state->final = ahash_final_no_ctx;
1446                 scatterwalk_map_and_copy(next_buf, req->src, 0,
1447                                          req->nbytes, 0);
1448         }
1449 #ifdef DEBUG
1450         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1451                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1452                        *next_buflen, 1);
1453 #endif
1454
1455         return ret;
1456 }
1457
1458 static int ahash_finup_first(struct ahash_request *req)
1459 {
1460         return ahash_digest(req);
1461 }
1462
1463 static int ahash_init(struct ahash_request *req)
1464 {
1465         struct caam_hash_state *state = ahash_request_ctx(req);
1466
1467         state->update = ahash_update_first;
1468         state->finup = ahash_finup_first;
1469         state->final = ahash_final_no_ctx;
1470
1471         state->current_buf = 0;
1472
1473         return 0;
1474 }
1475
1476 static int ahash_update(struct ahash_request *req)
1477 {
1478         struct caam_hash_state *state = ahash_request_ctx(req);
1479
1480         return state->update(req);
1481 }
1482
1483 static int ahash_finup(struct ahash_request *req)
1484 {
1485         struct caam_hash_state *state = ahash_request_ctx(req);
1486
1487         return state->finup(req);
1488 }
1489
1490 static int ahash_final(struct ahash_request *req)
1491 {
1492         struct caam_hash_state *state = ahash_request_ctx(req);
1493
1494         return state->final(req);
1495 }
1496
1497 static int ahash_export(struct ahash_request *req, void *out)
1498 {
1499         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1500         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1501         struct caam_hash_state *state = ahash_request_ctx(req);
1502
1503         memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1504         memcpy(out + sizeof(struct caam_hash_ctx), state,
1505                sizeof(struct caam_hash_state));
1506         return 0;
1507 }
1508
1509 static int ahash_import(struct ahash_request *req, const void *in)
1510 {
1511         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1512         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1513         struct caam_hash_state *state = ahash_request_ctx(req);
1514
1515         memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1516         memcpy(state, in + sizeof(struct caam_hash_ctx),
1517                sizeof(struct caam_hash_state));
1518         return 0;
1519 }
1520
1521 struct caam_hash_template {
1522         char name[CRYPTO_MAX_ALG_NAME];
1523         char driver_name[CRYPTO_MAX_ALG_NAME];
1524         char hmac_name[CRYPTO_MAX_ALG_NAME];
1525         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1526         unsigned int blocksize;
1527         struct ahash_alg template_ahash;
1528         u32 alg_type;
1529         u32 alg_op;
1530 };
1531
1532 /* ahash descriptors */
1533 static struct caam_hash_template driver_hash[] = {
1534         {
1535                 .name = "sha1",
1536                 .driver_name = "sha1-caam",
1537                 .hmac_name = "hmac(sha1)",
1538                 .hmac_driver_name = "hmac-sha1-caam",
1539                 .blocksize = SHA1_BLOCK_SIZE,
1540                 .template_ahash = {
1541                         .init = ahash_init,
1542                         .update = ahash_update,
1543                         .final = ahash_final,
1544                         .finup = ahash_finup,
1545                         .digest = ahash_digest,
1546                         .export = ahash_export,
1547                         .import = ahash_import,
1548                         .setkey = ahash_setkey,
1549                         .halg = {
1550                                 .digestsize = SHA1_DIGEST_SIZE,
1551                                 },
1552                         },
1553                 .alg_type = OP_ALG_ALGSEL_SHA1,
1554                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1555         }, {
1556                 .name = "sha224",
1557                 .driver_name = "sha224-caam",
1558                 .hmac_name = "hmac(sha224)",
1559                 .hmac_driver_name = "hmac-sha224-caam",
1560                 .blocksize = SHA224_BLOCK_SIZE,
1561                 .template_ahash = {
1562                         .init = ahash_init,
1563                         .update = ahash_update,
1564                         .final = ahash_final,
1565                         .finup = ahash_finup,
1566                         .digest = ahash_digest,
1567                         .export = ahash_export,
1568                         .import = ahash_import,
1569                         .setkey = ahash_setkey,
1570                         .halg = {
1571                                 .digestsize = SHA224_DIGEST_SIZE,
1572                                 },
1573                         },
1574                 .alg_type = OP_ALG_ALGSEL_SHA224,
1575                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1576         }, {
1577                 .name = "sha256",
1578                 .driver_name = "sha256-caam",
1579                 .hmac_name = "hmac(sha256)",
1580                 .hmac_driver_name = "hmac-sha256-caam",
1581                 .blocksize = SHA256_BLOCK_SIZE,
1582                 .template_ahash = {
1583                         .init = ahash_init,
1584                         .update = ahash_update,
1585                         .final = ahash_final,
1586                         .finup = ahash_finup,
1587                         .digest = ahash_digest,
1588                         .export = ahash_export,
1589                         .import = ahash_import,
1590                         .setkey = ahash_setkey,
1591                         .halg = {
1592                                 .digestsize = SHA256_DIGEST_SIZE,
1593                                 },
1594                         },
1595                 .alg_type = OP_ALG_ALGSEL_SHA256,
1596                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1597         }, {
1598                 .name = "sha384",
1599                 .driver_name = "sha384-caam",
1600                 .hmac_name = "hmac(sha384)",
1601                 .hmac_driver_name = "hmac-sha384-caam",
1602                 .blocksize = SHA384_BLOCK_SIZE,
1603                 .template_ahash = {
1604                         .init = ahash_init,
1605                         .update = ahash_update,
1606                         .final = ahash_final,
1607                         .finup = ahash_finup,
1608                         .digest = ahash_digest,
1609                         .export = ahash_export,
1610                         .import = ahash_import,
1611                         .setkey = ahash_setkey,
1612                         .halg = {
1613                                 .digestsize = SHA384_DIGEST_SIZE,
1614                                 },
1615                         },
1616                 .alg_type = OP_ALG_ALGSEL_SHA384,
1617                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1618         }, {
1619                 .name = "sha512",
1620                 .driver_name = "sha512-caam",
1621                 .hmac_name = "hmac(sha512)",
1622                 .hmac_driver_name = "hmac-sha512-caam",
1623                 .blocksize = SHA512_BLOCK_SIZE,
1624                 .template_ahash = {
1625                         .init = ahash_init,
1626                         .update = ahash_update,
1627                         .final = ahash_final,
1628                         .finup = ahash_finup,
1629                         .digest = ahash_digest,
1630                         .export = ahash_export,
1631                         .import = ahash_import,
1632                         .setkey = ahash_setkey,
1633                         .halg = {
1634                                 .digestsize = SHA512_DIGEST_SIZE,
1635                                 },
1636                         },
1637                 .alg_type = OP_ALG_ALGSEL_SHA512,
1638                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1639         }, {
1640                 .name = "md5",
1641                 .driver_name = "md5-caam",
1642                 .hmac_name = "hmac(md5)",
1643                 .hmac_driver_name = "hmac-md5-caam",
1644                 .blocksize = MD5_BLOCK_WORDS * 4,
1645                 .template_ahash = {
1646                         .init = ahash_init,
1647                         .update = ahash_update,
1648                         .final = ahash_final,
1649                         .finup = ahash_finup,
1650                         .digest = ahash_digest,
1651                         .export = ahash_export,
1652                         .import = ahash_import,
1653                         .setkey = ahash_setkey,
1654                         .halg = {
1655                                 .digestsize = MD5_DIGEST_SIZE,
1656                                 },
1657                         },
1658                 .alg_type = OP_ALG_ALGSEL_MD5,
1659                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1660         },
1661 };
1662
1663 struct caam_hash_alg {
1664         struct list_head entry;
1665         int alg_type;
1666         int alg_op;
1667         struct ahash_alg ahash_alg;
1668 };
1669
1670 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1671 {
1672         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1673         struct crypto_alg *base = tfm->__crt_alg;
1674         struct hash_alg_common *halg =
1675                  container_of(base, struct hash_alg_common, base);
1676         struct ahash_alg *alg =
1677                  container_of(halg, struct ahash_alg, halg);
1678         struct caam_hash_alg *caam_hash =
1679                  container_of(alg, struct caam_hash_alg, ahash_alg);
1680         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1681         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1682         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1683                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1684                                          HASH_MSG_LEN + 32,
1685                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1686                                          HASH_MSG_LEN + 64,
1687                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1688         int ret = 0;
1689
1690         /*
1691          * Get a Job ring from Job Ring driver to ensure in-order
1692          * crypto request processing per tfm
1693          */
1694         ctx->jrdev = caam_jr_alloc();
1695         if (IS_ERR(ctx->jrdev)) {
1696                 pr_err("Job Ring Device allocation for transform failed\n");
1697                 return PTR_ERR(ctx->jrdev);
1698         }
1699         /* copy descriptor header template value */
1700         ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1701         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1702
1703         ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1704                                   OP_ALG_ALGSEL_SHIFT];
1705
1706         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1707                                  sizeof(struct caam_hash_state));
1708
1709         ret = ahash_set_sh_desc(ahash);
1710
1711         return ret;
1712 }
1713
1714 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1715 {
1716         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1717
1718         if (ctx->sh_desc_update_dma &&
1719             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1720                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1721                                  desc_bytes(ctx->sh_desc_update),
1722                                  DMA_TO_DEVICE);
1723         if (ctx->sh_desc_update_first_dma &&
1724             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1725                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1726                                  desc_bytes(ctx->sh_desc_update_first),
1727                                  DMA_TO_DEVICE);
1728         if (ctx->sh_desc_fin_dma &&
1729             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1730                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1731                                  desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1732         if (ctx->sh_desc_digest_dma &&
1733             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1734                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1735                                  desc_bytes(ctx->sh_desc_digest),
1736                                  DMA_TO_DEVICE);
1737         if (ctx->sh_desc_finup_dma &&
1738             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1739                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1740                                  desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1741
1742         caam_jr_free(ctx->jrdev);
1743 }
1744
1745 static void __exit caam_algapi_hash_exit(void)
1746 {
1747         struct caam_hash_alg *t_alg, *n;
1748
1749         if (!hash_list.next)
1750                 return;
1751
1752         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1753                 crypto_unregister_ahash(&t_alg->ahash_alg);
1754                 list_del(&t_alg->entry);
1755                 kfree(t_alg);
1756         }
1757 }
1758
1759 static struct caam_hash_alg *
1760 caam_hash_alloc(struct caam_hash_template *template,
1761                 bool keyed)
1762 {
1763         struct caam_hash_alg *t_alg;
1764         struct ahash_alg *halg;
1765         struct crypto_alg *alg;
1766
1767         t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1768         if (!t_alg) {
1769                 pr_err("failed to allocate t_alg\n");
1770                 return ERR_PTR(-ENOMEM);
1771         }
1772
1773         t_alg->ahash_alg = template->template_ahash;
1774         halg = &t_alg->ahash_alg;
1775         alg = &halg->halg.base;
1776
1777         if (keyed) {
1778                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1779                          template->hmac_name);
1780                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1781                          template->hmac_driver_name);
1782         } else {
1783                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1784                          template->name);
1785                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1786                          template->driver_name);
1787         }
1788         alg->cra_module = THIS_MODULE;
1789         alg->cra_init = caam_hash_cra_init;
1790         alg->cra_exit = caam_hash_cra_exit;
1791         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1792         alg->cra_priority = CAAM_CRA_PRIORITY;
1793         alg->cra_blocksize = template->blocksize;
1794         alg->cra_alignmask = 0;
1795         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1796         alg->cra_type = &crypto_ahash_type;
1797
1798         t_alg->alg_type = template->alg_type;
1799         t_alg->alg_op = template->alg_op;
1800
1801         return t_alg;
1802 }
1803
1804 static int __init caam_algapi_hash_init(void)
1805 {
1806         int i = 0, err = 0;
1807
1808         INIT_LIST_HEAD(&hash_list);
1809
1810         /* register crypto algorithms the device supports */
1811         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1812                 /* TODO: check if h/w supports alg */
1813                 struct caam_hash_alg *t_alg;
1814
1815                 /* register hmac version */
1816                 t_alg = caam_hash_alloc(&driver_hash[i], true);
1817                 if (IS_ERR(t_alg)) {
1818                         err = PTR_ERR(t_alg);
1819                         pr_warn("%s alg allocation failed\n",
1820                                 driver_hash[i].driver_name);
1821                         continue;
1822                 }
1823
1824                 err = crypto_register_ahash(&t_alg->ahash_alg);
1825                 if (err) {
1826                         pr_warn("%s alg registration failed\n",
1827                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1828                         kfree(t_alg);
1829                 } else
1830                         list_add_tail(&t_alg->entry, &hash_list);
1831
1832                 /* register unkeyed version */
1833                 t_alg = caam_hash_alloc(&driver_hash[i], false);
1834                 if (IS_ERR(t_alg)) {
1835                         err = PTR_ERR(t_alg);
1836                         pr_warn("%s alg allocation failed\n",
1837                                 driver_hash[i].driver_name);
1838                         continue;
1839                 }
1840
1841                 err = crypto_register_ahash(&t_alg->ahash_alg);
1842                 if (err) {
1843                         pr_warn("%s alg registration failed\n",
1844                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1845                         kfree(t_alg);
1846                 } else
1847                         list_add_tail(&t_alg->entry, &hash_list);
1848         }
1849
1850         return err;
1851 }
1852
1853 module_init(caam_algapi_hash_init);
1854 module_exit(caam_algapi_hash_exit);
1855
1856 MODULE_LICENSE("GPL");
1857 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1858 MODULE_AUTHOR("Freescale Semiconductor - NMG");