478822fc7a4ed93322b3ae14c0e1472e98eead66
[platform/kernel/linux-starfive.git] / drivers / crypto / stm32 / stm32-hash.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22
23 #include <crypto/engine.h>
24 #include <crypto/hash.h>
25 #include <crypto/md5.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/sha1.h>
28 #include <crypto/sha2.h>
29 #include <crypto/internal/hash.h>
30
31 #define HASH_CR                         0x00
32 #define HASH_DIN                        0x04
33 #define HASH_STR                        0x08
34 #define HASH_UX500_HREG(x)              (0x0c + ((x) * 0x04))
35 #define HASH_IMR                        0x20
36 #define HASH_SR                         0x24
37 #define HASH_CSR(x)                     (0x0F8 + ((x) * 0x04))
38 #define HASH_HREG(x)                    (0x310 + ((x) * 0x04))
39 #define HASH_HWCFGR                     0x3F0
40 #define HASH_VER                        0x3F4
41 #define HASH_ID                         0x3F8
42
43 /* Control Register */
44 #define HASH_CR_INIT                    BIT(2)
45 #define HASH_CR_DMAE                    BIT(3)
46 #define HASH_CR_DATATYPE_POS            4
47 #define HASH_CR_MODE                    BIT(6)
48 #define HASH_CR_MDMAT                   BIT(13)
49 #define HASH_CR_DMAA                    BIT(14)
50 #define HASH_CR_LKEY                    BIT(16)
51
52 #define HASH_CR_ALGO_SHA1               0x0
53 #define HASH_CR_ALGO_MD5                0x80
54 #define HASH_CR_ALGO_SHA224             0x40000
55 #define HASH_CR_ALGO_SHA256             0x40080
56
57 #define HASH_CR_UX500_EMPTYMSG          BIT(20)
58 #define HASH_CR_UX500_ALGO_SHA1         BIT(7)
59 #define HASH_CR_UX500_ALGO_SHA256       0x0
60
61 /* Interrupt */
62 #define HASH_DINIE                      BIT(0)
63 #define HASH_DCIE                       BIT(1)
64
65 /* Interrupt Mask */
66 #define HASH_MASK_CALC_COMPLETION       BIT(0)
67 #define HASH_MASK_DATA_INPUT            BIT(1)
68
69 /* Context swap register */
70 #define HASH_CSR_REGISTER_NUMBER        54
71
72 /* Status Flags */
73 #define HASH_SR_DATA_INPUT_READY        BIT(0)
74 #define HASH_SR_OUTPUT_READY            BIT(1)
75 #define HASH_SR_DMA_ACTIVE              BIT(2)
76 #define HASH_SR_BUSY                    BIT(3)
77
78 /* STR Register */
79 #define HASH_STR_NBLW_MASK              GENMASK(4, 0)
80 #define HASH_STR_DCAL                   BIT(8)
81
82 #define HASH_FLAGS_INIT                 BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY         BIT(1)
84 #define HASH_FLAGS_CPU                  BIT(2)
85 #define HASH_FLAGS_DMA_READY            BIT(3)
86 #define HASH_FLAGS_DMA_ACTIVE           BIT(4)
87 #define HASH_FLAGS_HMAC_INIT            BIT(5)
88 #define HASH_FLAGS_HMAC_FINAL           BIT(6)
89 #define HASH_FLAGS_HMAC_KEY             BIT(7)
90
91 #define HASH_FLAGS_FINAL                BIT(15)
92 #define HASH_FLAGS_FINUP                BIT(16)
93 #define HASH_FLAGS_ALGO_MASK            GENMASK(21, 18)
94 #define HASH_FLAGS_MD5                  BIT(18)
95 #define HASH_FLAGS_SHA1                 BIT(19)
96 #define HASH_FLAGS_SHA224               BIT(20)
97 #define HASH_FLAGS_SHA256               BIT(21)
98 #define HASH_FLAGS_HMAC                 BIT(23)
99
100 #define HASH_OP_UPDATE                  1
101 #define HASH_OP_FINAL                   2
102
103 enum stm32_hash_data_format {
104         HASH_DATA_32_BITS               = 0x0,
105         HASH_DATA_16_BITS               = 0x1,
106         HASH_DATA_8_BITS                = 0x2,
107         HASH_DATA_1_BIT                 = 0x3
108 };
109
110 #define HASH_BUFLEN                     256
111 #define HASH_LONG_KEY                   64
112 #define HASH_MAX_KEY_SIZE               (SHA256_BLOCK_SIZE * 8)
113 #define HASH_QUEUE_LENGTH               16
114 #define HASH_DMA_THRESHOLD              50
115
116 #define HASH_AUTOSUSPEND_DELAY          50
117
118 struct stm32_hash_ctx {
119         struct crypto_engine_ctx enginectx;
120         struct stm32_hash_dev   *hdev;
121         struct crypto_shash     *xtfm;
122         unsigned long           flags;
123
124         u8                      key[HASH_MAX_KEY_SIZE];
125         int                     keylen;
126 };
127
128 struct stm32_hash_state {
129         u32                     flags;
130
131         u16                     bufcnt;
132         u16                     buflen;
133
134         u8 buffer[HASH_BUFLEN] __aligned(4);
135
136         /* hash state */
137         u32                     *hw_context;
138 };
139
140 struct stm32_hash_request_ctx {
141         struct stm32_hash_dev   *hdev;
142         unsigned long           op;
143
144         u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
145         size_t                  digcnt;
146
147         /* DMA */
148         struct scatterlist      *sg;
149         unsigned int            offset;
150         unsigned int            total;
151         struct scatterlist      sg_key;
152
153         dma_addr_t              dma_addr;
154         size_t                  dma_ct;
155         int                     nents;
156
157         u8                      data_type;
158
159         struct stm32_hash_state state;
160 };
161
162 struct stm32_hash_algs_info {
163         struct ahash_alg        *algs_list;
164         size_t                  size;
165 };
166
167 struct stm32_hash_pdata {
168         struct stm32_hash_algs_info     *algs_info;
169         size_t                          algs_info_size;
170         bool                            has_sr;
171         bool                            has_mdmat;
172         bool                            broken_emptymsg;
173         bool                            ux500;
174 };
175
176 struct stm32_hash_dev {
177         struct list_head        list;
178         struct device           *dev;
179         struct clk              *clk;
180         struct reset_control    *rst;
181         void __iomem            *io_base;
182         phys_addr_t             phys_base;
183         u32                     dma_mode;
184         u32                     dma_maxburst;
185         bool                    polled;
186
187         struct ahash_request    *req;
188         struct crypto_engine    *engine;
189
190         unsigned long           flags;
191
192         struct dma_chan         *dma_lch;
193         struct completion       dma_completion;
194
195         const struct stm32_hash_pdata   *pdata;
196 };
197
198 struct stm32_hash_drv {
199         struct list_head        dev_list;
200         spinlock_t              lock; /* List protection access */
201 };
202
203 static struct stm32_hash_drv stm32_hash = {
204         .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
205         .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
206 };
207
208 static void stm32_hash_dma_callback(void *param);
209
210 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
211 {
212         return readl_relaxed(hdev->io_base + offset);
213 }
214
215 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
216                                     u32 offset, u32 value)
217 {
218         writel_relaxed(value, hdev->io_base + offset);
219 }
220
221 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
222 {
223         u32 status;
224
225         /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
226         if (!hdev->pdata->has_sr)
227                 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
228                                                   !(status & HASH_STR_DCAL), 10, 10000);
229
230         return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
231                                    !(status & HASH_SR_BUSY), 10, 10000);
232 }
233
234 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
235 {
236         u32 reg;
237
238         reg = stm32_hash_read(hdev, HASH_STR);
239         reg &= ~(HASH_STR_NBLW_MASK);
240         reg |= (8U * ((length) % 4U));
241         stm32_hash_write(hdev, HASH_STR, reg);
242 }
243
244 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
245 {
246         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
247         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
248         u32 reg;
249         int keylen = ctx->keylen;
250         void *key = ctx->key;
251
252         if (keylen) {
253                 stm32_hash_set_nblw(hdev, keylen);
254
255                 while (keylen > 0) {
256                         stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
257                         keylen -= 4;
258                         key += 4;
259                 }
260
261                 reg = stm32_hash_read(hdev, HASH_STR);
262                 reg |= HASH_STR_DCAL;
263                 stm32_hash_write(hdev, HASH_STR, reg);
264
265                 return -EINPROGRESS;
266         }
267
268         return 0;
269 }
270
271 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
272 {
273         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
274         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
275         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
276         struct stm32_hash_state *state = &rctx->state;
277
278         u32 reg = HASH_CR_INIT;
279
280         if (!(hdev->flags & HASH_FLAGS_INIT)) {
281                 switch (state->flags & HASH_FLAGS_ALGO_MASK) {
282                 case HASH_FLAGS_MD5:
283                         reg |= HASH_CR_ALGO_MD5;
284                         break;
285                 case HASH_FLAGS_SHA1:
286                         if (hdev->pdata->ux500)
287                                 reg |= HASH_CR_UX500_ALGO_SHA1;
288                         else
289                                 reg |= HASH_CR_ALGO_SHA1;
290                         break;
291                 case HASH_FLAGS_SHA224:
292                         reg |= HASH_CR_ALGO_SHA224;
293                         break;
294                 case HASH_FLAGS_SHA256:
295                         if (hdev->pdata->ux500)
296                                 reg |= HASH_CR_UX500_ALGO_SHA256;
297                         else
298                                 reg |= HASH_CR_ALGO_SHA256;
299                         break;
300                 default:
301                         reg |= HASH_CR_ALGO_MD5;
302                 }
303
304                 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
305
306                 if (state->flags & HASH_FLAGS_HMAC) {
307                         hdev->flags |= HASH_FLAGS_HMAC;
308                         reg |= HASH_CR_MODE;
309                         if (ctx->keylen > HASH_LONG_KEY)
310                                 reg |= HASH_CR_LKEY;
311                 }
312
313                 /*
314                  * On the Ux500 we need to set a special flag to indicate that
315                  * the message is zero length.
316                  */
317                 if (hdev->pdata->ux500 && bufcnt == 0)
318                         reg |= HASH_CR_UX500_EMPTYMSG;
319
320                 if (!hdev->polled)
321                         stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
322
323                 stm32_hash_write(hdev, HASH_CR, reg);
324
325                 hdev->flags |= HASH_FLAGS_INIT;
326
327                 dev_dbg(hdev->dev, "Write Control %x\n", reg);
328         }
329 }
330
331 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
332 {
333         struct stm32_hash_state *state = &rctx->state;
334         size_t count;
335
336         while ((state->bufcnt < state->buflen) && rctx->total) {
337                 count = min(rctx->sg->length - rctx->offset, rctx->total);
338                 count = min_t(size_t, count, state->buflen - state->bufcnt);
339
340                 if (count <= 0) {
341                         if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
342                                 rctx->sg = sg_next(rctx->sg);
343                                 continue;
344                         } else {
345                                 break;
346                         }
347                 }
348
349                 scatterwalk_map_and_copy(state->buffer + state->bufcnt,
350                                          rctx->sg, rctx->offset, count, 0);
351
352                 state->bufcnt += count;
353                 rctx->offset += count;
354                 rctx->total -= count;
355
356                 if (rctx->offset == rctx->sg->length) {
357                         rctx->sg = sg_next(rctx->sg);
358                         if (rctx->sg)
359                                 rctx->offset = 0;
360                         else
361                                 rctx->total = 0;
362                 }
363         }
364 }
365
366 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
367                                const u8 *buf, size_t length, int final)
368 {
369         unsigned int count, len32;
370         const u32 *buffer = (const u32 *)buf;
371         u32 reg;
372
373         if (final)
374                 hdev->flags |= HASH_FLAGS_FINAL;
375
376         len32 = DIV_ROUND_UP(length, sizeof(u32));
377
378         dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
379                 __func__, length, final, len32);
380
381         hdev->flags |= HASH_FLAGS_CPU;
382
383         stm32_hash_write_ctrl(hdev, length);
384
385         if (stm32_hash_wait_busy(hdev))
386                 return -ETIMEDOUT;
387
388         if ((hdev->flags & HASH_FLAGS_HMAC) &&
389             (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
390                 hdev->flags |= HASH_FLAGS_HMAC_KEY;
391                 stm32_hash_write_key(hdev);
392                 if (stm32_hash_wait_busy(hdev))
393                         return -ETIMEDOUT;
394         }
395
396         for (count = 0; count < len32; count++)
397                 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
398
399         if (final) {
400                 if (stm32_hash_wait_busy(hdev))
401                         return -ETIMEDOUT;
402
403                 stm32_hash_set_nblw(hdev, length);
404                 reg = stm32_hash_read(hdev, HASH_STR);
405                 reg |= HASH_STR_DCAL;
406                 stm32_hash_write(hdev, HASH_STR, reg);
407                 if (hdev->flags & HASH_FLAGS_HMAC) {
408                         if (stm32_hash_wait_busy(hdev))
409                                 return -ETIMEDOUT;
410                         stm32_hash_write_key(hdev);
411                 }
412                 return -EINPROGRESS;
413         }
414
415         return 0;
416 }
417
418 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
419 {
420         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
421         struct stm32_hash_state *state = &rctx->state;
422         int bufcnt, err = 0, final;
423
424         dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
425
426         final = state->flags & HASH_FLAGS_FINAL;
427
428         while ((rctx->total >= state->buflen) ||
429                (state->bufcnt + rctx->total >= state->buflen)) {
430                 stm32_hash_append_sg(rctx);
431                 bufcnt = state->bufcnt;
432                 state->bufcnt = 0;
433                 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
434                 if (err)
435                         return err;
436         }
437
438         stm32_hash_append_sg(rctx);
439
440         if (final) {
441                 bufcnt = state->bufcnt;
442                 state->bufcnt = 0;
443                 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
444         }
445
446         return err;
447 }
448
449 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
450                                struct scatterlist *sg, int length, int mdma)
451 {
452         struct dma_async_tx_descriptor *in_desc;
453         dma_cookie_t cookie;
454         u32 reg;
455         int err;
456
457         in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
458                                           DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
459                                           DMA_CTRL_ACK);
460         if (!in_desc) {
461                 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
462                 return -ENOMEM;
463         }
464
465         reinit_completion(&hdev->dma_completion);
466         in_desc->callback = stm32_hash_dma_callback;
467         in_desc->callback_param = hdev;
468
469         hdev->flags |= HASH_FLAGS_FINAL;
470         hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
471
472         reg = stm32_hash_read(hdev, HASH_CR);
473
474         if (!hdev->pdata->has_mdmat) {
475                 if (mdma)
476                         reg |= HASH_CR_MDMAT;
477                 else
478                         reg &= ~HASH_CR_MDMAT;
479         }
480         reg |= HASH_CR_DMAE;
481
482         stm32_hash_write(hdev, HASH_CR, reg);
483
484         stm32_hash_set_nblw(hdev, length);
485
486         cookie = dmaengine_submit(in_desc);
487         err = dma_submit_error(cookie);
488         if (err)
489                 return -ENOMEM;
490
491         dma_async_issue_pending(hdev->dma_lch);
492
493         if (!wait_for_completion_timeout(&hdev->dma_completion,
494                                          msecs_to_jiffies(100)))
495                 err = -ETIMEDOUT;
496
497         if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
498                                      NULL, NULL) != DMA_COMPLETE)
499                 err = -ETIMEDOUT;
500
501         if (err) {
502                 dev_err(hdev->dev, "DMA Error %i\n", err);
503                 dmaengine_terminate_all(hdev->dma_lch);
504                 return err;
505         }
506
507         return -EINPROGRESS;
508 }
509
510 static void stm32_hash_dma_callback(void *param)
511 {
512         struct stm32_hash_dev *hdev = param;
513
514         complete(&hdev->dma_completion);
515
516         hdev->flags |= HASH_FLAGS_DMA_READY;
517 }
518
519 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
520 {
521         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
522         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
523         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
524         int err;
525
526         if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
527                 err = stm32_hash_write_key(hdev);
528                 if (stm32_hash_wait_busy(hdev))
529                         return -ETIMEDOUT;
530         } else {
531                 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
532                         sg_init_one(&rctx->sg_key, ctx->key,
533                                     ALIGN(ctx->keylen, sizeof(u32)));
534
535                 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
536                                           DMA_TO_DEVICE);
537                 if (rctx->dma_ct == 0) {
538                         dev_err(hdev->dev, "dma_map_sg error\n");
539                         return -ENOMEM;
540                 }
541
542                 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
543
544                 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
545         }
546
547         return err;
548 }
549
550 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
551 {
552         struct dma_slave_config dma_conf;
553         struct dma_chan *chan;
554         int err;
555
556         memset(&dma_conf, 0, sizeof(dma_conf));
557
558         dma_conf.direction = DMA_MEM_TO_DEV;
559         dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
560         dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
561         dma_conf.src_maxburst = hdev->dma_maxburst;
562         dma_conf.dst_maxburst = hdev->dma_maxburst;
563         dma_conf.device_fc = false;
564
565         chan = dma_request_chan(hdev->dev, "in");
566         if (IS_ERR(chan))
567                 return PTR_ERR(chan);
568
569         hdev->dma_lch = chan;
570
571         err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
572         if (err) {
573                 dma_release_channel(hdev->dma_lch);
574                 hdev->dma_lch = NULL;
575                 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
576                 return err;
577         }
578
579         init_completion(&hdev->dma_completion);
580
581         return 0;
582 }
583
584 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
585 {
586         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
587         u32 *buffer = (void *)rctx->state.buffer;
588         struct scatterlist sg[1], *tsg;
589         int err = 0, len = 0, reg, ncp = 0;
590         unsigned int i;
591
592         rctx->sg = hdev->req->src;
593         rctx->total = hdev->req->nbytes;
594
595         rctx->nents = sg_nents(rctx->sg);
596
597         if (rctx->nents < 0)
598                 return -EINVAL;
599
600         stm32_hash_write_ctrl(hdev, rctx->total);
601
602         if (hdev->flags & HASH_FLAGS_HMAC) {
603                 err = stm32_hash_hmac_dma_send(hdev);
604                 if (err != -EINPROGRESS)
605                         return err;
606         }
607
608         for_each_sg(rctx->sg, tsg, rctx->nents, i) {
609                 len = sg->length;
610
611                 sg[0] = *tsg;
612                 if (sg_is_last(sg)) {
613                         if (hdev->dma_mode == 1) {
614                                 len = (ALIGN(sg->length, 16) - 16);
615
616                                 ncp = sg_pcopy_to_buffer(
617                                         rctx->sg, rctx->nents,
618                                         rctx->state.buffer, sg->length - len,
619                                         rctx->total - sg->length + len);
620
621                                 sg->length = len;
622                         } else {
623                                 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
624                                         len = sg->length;
625                                         sg->length = ALIGN(sg->length,
626                                                            sizeof(u32));
627                                 }
628                         }
629                 }
630
631                 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
632                                           DMA_TO_DEVICE);
633                 if (rctx->dma_ct == 0) {
634                         dev_err(hdev->dev, "dma_map_sg error\n");
635                         return -ENOMEM;
636                 }
637
638                 err = stm32_hash_xmit_dma(hdev, sg, len,
639                                           !sg_is_last(sg));
640
641                 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
642
643                 if (err == -ENOMEM)
644                         return err;
645         }
646
647         if (hdev->dma_mode == 1) {
648                 if (stm32_hash_wait_busy(hdev))
649                         return -ETIMEDOUT;
650                 reg = stm32_hash_read(hdev, HASH_CR);
651                 reg &= ~HASH_CR_DMAE;
652                 reg |= HASH_CR_DMAA;
653                 stm32_hash_write(hdev, HASH_CR, reg);
654
655                 if (ncp) {
656                         memset(buffer + ncp, 0,
657                                DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
658                         writesl(hdev->io_base + HASH_DIN, buffer,
659                                 DIV_ROUND_UP(ncp, sizeof(u32)));
660                 }
661                 stm32_hash_set_nblw(hdev, ncp);
662                 reg = stm32_hash_read(hdev, HASH_STR);
663                 reg |= HASH_STR_DCAL;
664                 stm32_hash_write(hdev, HASH_STR, reg);
665                 err = -EINPROGRESS;
666         }
667
668         if (hdev->flags & HASH_FLAGS_HMAC) {
669                 if (stm32_hash_wait_busy(hdev))
670                         return -ETIMEDOUT;
671                 err = stm32_hash_hmac_dma_send(hdev);
672         }
673
674         return err;
675 }
676
677 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
678 {
679         struct stm32_hash_dev *hdev = NULL, *tmp;
680
681         spin_lock_bh(&stm32_hash.lock);
682         if (!ctx->hdev) {
683                 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
684                         hdev = tmp;
685                         break;
686                 }
687                 ctx->hdev = hdev;
688         } else {
689                 hdev = ctx->hdev;
690         }
691
692         spin_unlock_bh(&stm32_hash.lock);
693
694         return hdev;
695 }
696
697 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
698 {
699         struct scatterlist *sg;
700         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
701         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
702         int i;
703
704         if (req->nbytes <= HASH_DMA_THRESHOLD)
705                 return false;
706
707         if (sg_nents(req->src) > 1) {
708                 if (hdev->dma_mode == 1)
709                         return false;
710                 for_each_sg(req->src, sg, sg_nents(req->src), i) {
711                         if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
712                             (!sg_is_last(sg)))
713                                 return false;
714                 }
715         }
716
717         if (req->src->offset % 4)
718                 return false;
719
720         return true;
721 }
722
723 static int stm32_hash_init(struct ahash_request *req)
724 {
725         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
726         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
727         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
728         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
729         struct stm32_hash_state *state = &rctx->state;
730
731         rctx->hdev = hdev;
732
733         state->flags = HASH_FLAGS_CPU;
734
735         rctx->digcnt = crypto_ahash_digestsize(tfm);
736         switch (rctx->digcnt) {
737         case MD5_DIGEST_SIZE:
738                 state->flags |= HASH_FLAGS_MD5;
739                 break;
740         case SHA1_DIGEST_SIZE:
741                 state->flags |= HASH_FLAGS_SHA1;
742                 break;
743         case SHA224_DIGEST_SIZE:
744                 state->flags |= HASH_FLAGS_SHA224;
745                 break;
746         case SHA256_DIGEST_SIZE:
747                 state->flags |= HASH_FLAGS_SHA256;
748                 break;
749         default:
750                 return -EINVAL;
751         }
752
753         rctx->state.bufcnt = 0;
754         rctx->state.buflen = HASH_BUFLEN;
755         rctx->total = 0;
756         rctx->offset = 0;
757         rctx->data_type = HASH_DATA_8_BITS;
758
759         if (ctx->flags & HASH_FLAGS_HMAC)
760                 state->flags |= HASH_FLAGS_HMAC;
761
762         dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
763
764         return 0;
765 }
766
767 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
768 {
769         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
770         struct stm32_hash_state *state = &rctx->state;
771
772         if (!(state->flags & HASH_FLAGS_CPU))
773                 return stm32_hash_dma_send(hdev);
774
775         return stm32_hash_update_cpu(hdev);
776 }
777
778 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
779 {
780         struct ahash_request *req = hdev->req;
781         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
782         struct stm32_hash_state *state = &rctx->state;
783         int buflen = state->bufcnt;
784
785         if (state->flags & HASH_FLAGS_FINUP)
786                 return stm32_hash_update_req(hdev);
787
788         state->bufcnt = 0;
789
790         return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
791 }
792
793 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
794 {
795         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
796         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
797         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
798         struct stm32_hash_dev *hdev = rctx->hdev;
799         int ret;
800
801         dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
802                 ctx->keylen);
803
804         if (!ctx->xtfm) {
805                 dev_err(hdev->dev, "no fallback engine\n");
806                 return;
807         }
808
809         if (ctx->keylen) {
810                 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
811                 if (ret) {
812                         dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
813                         return;
814                 }
815         }
816
817         ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
818         if (ret)
819                 dev_err(hdev->dev, "shash digest error\n");
820 }
821
822 static void stm32_hash_copy_hash(struct ahash_request *req)
823 {
824         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
825         struct stm32_hash_state *state = &rctx->state;
826         struct stm32_hash_dev *hdev = rctx->hdev;
827         __be32 *hash = (void *)rctx->digest;
828         unsigned int i, hashsize;
829
830         if (hdev->pdata->broken_emptymsg && !req->nbytes)
831                 return stm32_hash_emptymsg_fallback(req);
832
833         switch (state->flags & HASH_FLAGS_ALGO_MASK) {
834         case HASH_FLAGS_MD5:
835                 hashsize = MD5_DIGEST_SIZE;
836                 break;
837         case HASH_FLAGS_SHA1:
838                 hashsize = SHA1_DIGEST_SIZE;
839                 break;
840         case HASH_FLAGS_SHA224:
841                 hashsize = SHA224_DIGEST_SIZE;
842                 break;
843         case HASH_FLAGS_SHA256:
844                 hashsize = SHA256_DIGEST_SIZE;
845                 break;
846         default:
847                 return;
848         }
849
850         for (i = 0; i < hashsize / sizeof(u32); i++) {
851                 if (hdev->pdata->ux500)
852                         hash[i] = cpu_to_be32(stm32_hash_read(hdev,
853                                               HASH_UX500_HREG(i)));
854                 else
855                         hash[i] = cpu_to_be32(stm32_hash_read(hdev,
856                                               HASH_HREG(i)));
857         }
858 }
859
860 static int stm32_hash_finish(struct ahash_request *req)
861 {
862         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
863
864         if (!req->result)
865                 return -EINVAL;
866
867         memcpy(req->result, rctx->digest, rctx->digcnt);
868
869         return 0;
870 }
871
872 static void stm32_hash_finish_req(struct ahash_request *req, int err)
873 {
874         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
875         struct stm32_hash_dev *hdev = rctx->hdev;
876
877         if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
878                 stm32_hash_copy_hash(req);
879                 err = stm32_hash_finish(req);
880                 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
881                                  HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
882                                  HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
883                                  HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
884                                  HASH_FLAGS_HMAC_KEY);
885         }
886
887         pm_runtime_mark_last_busy(hdev->dev);
888         pm_runtime_put_autosuspend(hdev->dev);
889
890         crypto_finalize_hash_request(hdev->engine, req, err);
891 }
892
893 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
894                               struct stm32_hash_request_ctx *rctx)
895 {
896         pm_runtime_get_sync(hdev->dev);
897
898         if (!(HASH_FLAGS_INIT & hdev->flags)) {
899                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
900                 stm32_hash_write(hdev, HASH_STR, 0);
901                 stm32_hash_write(hdev, HASH_DIN, 0);
902                 stm32_hash_write(hdev, HASH_IMR, 0);
903         }
904
905         return 0;
906 }
907
908 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
909 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
910
911 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
912                                    struct ahash_request *req)
913 {
914         return crypto_transfer_hash_request_to_engine(hdev->engine, req);
915 }
916
917 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
918 {
919         struct ahash_request *req = container_of(areq, struct ahash_request,
920                                                  base);
921         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
922         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
923         struct stm32_hash_request_ctx *rctx;
924
925         if (!hdev)
926                 return -ENODEV;
927
928         hdev->req = req;
929
930         rctx = ahash_request_ctx(req);
931
932         dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
933                 rctx->op, req->nbytes);
934
935         return stm32_hash_hw_init(hdev, rctx);
936 }
937
938 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
939 {
940         struct ahash_request *req = container_of(areq, struct ahash_request,
941                                                  base);
942         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
943         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
944         struct stm32_hash_request_ctx *rctx;
945         int err = 0;
946
947         if (!hdev)
948                 return -ENODEV;
949
950         hdev->req = req;
951
952         rctx = ahash_request_ctx(req);
953
954         if (rctx->op == HASH_OP_UPDATE)
955                 err = stm32_hash_update_req(hdev);
956         else if (rctx->op == HASH_OP_FINAL)
957                 err = stm32_hash_final_req(hdev);
958
959         /* If we have an IRQ, wait for that, else poll for completion */
960         if (err == -EINPROGRESS && hdev->polled) {
961                 if (stm32_hash_wait_busy(hdev))
962                         err = -ETIMEDOUT;
963                 else {
964                         hdev->flags |= HASH_FLAGS_OUTPUT_READY;
965                         err = 0;
966                 }
967         }
968
969         if (err != -EINPROGRESS)
970         /* done task will not finish it, so do it here */
971                 stm32_hash_finish_req(req, err);
972
973         return 0;
974 }
975
976 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
977 {
978         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
979         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
980         struct stm32_hash_dev *hdev = ctx->hdev;
981
982         rctx->op = op;
983
984         return stm32_hash_handle_queue(hdev, req);
985 }
986
987 static int stm32_hash_update(struct ahash_request *req)
988 {
989         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
990         struct stm32_hash_state *state = &rctx->state;
991
992         if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
993                 return 0;
994
995         rctx->total = req->nbytes;
996         rctx->sg = req->src;
997         rctx->offset = 0;
998
999         if ((state->bufcnt + rctx->total < state->buflen)) {
1000                 stm32_hash_append_sg(rctx);
1001                 return 0;
1002         }
1003
1004         return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1005 }
1006
1007 static int stm32_hash_final(struct ahash_request *req)
1008 {
1009         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1010         struct stm32_hash_state *state = &rctx->state;
1011
1012         state->flags |= HASH_FLAGS_FINAL;
1013
1014         return stm32_hash_enqueue(req, HASH_OP_FINAL);
1015 }
1016
1017 static int stm32_hash_finup(struct ahash_request *req)
1018 {
1019         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1020         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1021         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1022         struct stm32_hash_state *state = &rctx->state;
1023
1024         if (!req->nbytes)
1025                 goto out;
1026
1027         state->flags |= HASH_FLAGS_FINUP;
1028         rctx->total = req->nbytes;
1029         rctx->sg = req->src;
1030         rctx->offset = 0;
1031
1032         if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1033                 state->flags &= ~HASH_FLAGS_CPU;
1034
1035 out:
1036         return stm32_hash_final(req);
1037 }
1038
1039 static int stm32_hash_digest(struct ahash_request *req)
1040 {
1041         return stm32_hash_init(req) ?: stm32_hash_finup(req);
1042 }
1043
1044 static int stm32_hash_export(struct ahash_request *req, void *out)
1045 {
1046         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1047         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1048         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1049         struct stm32_hash_state *state = &rctx->state;
1050         u32 *preg;
1051         unsigned int i;
1052         int ret;
1053
1054         pm_runtime_get_sync(hdev->dev);
1055
1056         ret = stm32_hash_wait_busy(hdev);
1057         if (ret)
1058                 return ret;
1059
1060         state->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
1061                                           sizeof(u32), GFP_KERNEL);
1062         preg = state->hw_context;
1063
1064         if (!hdev->pdata->ux500)
1065                 *preg++ = stm32_hash_read(hdev, HASH_IMR);
1066         *preg++ = stm32_hash_read(hdev, HASH_STR);
1067         *preg++ = stm32_hash_read(hdev, HASH_CR);
1068         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1069                 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1070
1071         pm_runtime_mark_last_busy(hdev->dev);
1072         pm_runtime_put_autosuspend(hdev->dev);
1073
1074         memcpy(out, rctx, sizeof(*rctx));
1075
1076         return 0;
1077 }
1078
1079 static int stm32_hash_import(struct ahash_request *req, const void *in)
1080 {
1081         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1082         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1083         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1084         struct stm32_hash_state *state = &rctx->state;
1085         const u32 *preg = in;
1086         u32 reg;
1087         unsigned int i;
1088
1089         memcpy(rctx, in, sizeof(*rctx));
1090
1091         preg = state->hw_context;
1092
1093         pm_runtime_get_sync(hdev->dev);
1094
1095         if (!hdev->pdata->ux500)
1096                 stm32_hash_write(hdev, HASH_IMR, *preg++);
1097         stm32_hash_write(hdev, HASH_STR, *preg++);
1098         stm32_hash_write(hdev, HASH_CR, *preg);
1099         reg = *preg++ | HASH_CR_INIT;
1100         stm32_hash_write(hdev, HASH_CR, reg);
1101
1102         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1103                 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1104
1105         pm_runtime_mark_last_busy(hdev->dev);
1106         pm_runtime_put_autosuspend(hdev->dev);
1107
1108         kfree(state->hw_context);
1109
1110         return 0;
1111 }
1112
1113 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1114                              const u8 *key, unsigned int keylen)
1115 {
1116         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1117
1118         if (keylen <= HASH_MAX_KEY_SIZE) {
1119                 memcpy(ctx->key, key, keylen);
1120                 ctx->keylen = keylen;
1121         } else {
1122                 return -ENOMEM;
1123         }
1124
1125         return 0;
1126 }
1127
1128 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1129 {
1130         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1131         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1132         const char *name = crypto_tfm_alg_name(tfm);
1133         struct crypto_shash *xtfm;
1134
1135         /* The fallback is only needed on Ux500 */
1136         if (!hdev->pdata->ux500)
1137                 return 0;
1138
1139         xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1140         if (IS_ERR(xtfm)) {
1141                 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1142                         name);
1143                 return PTR_ERR(xtfm);
1144         }
1145         dev_info(hdev->dev, "allocated %s fallback\n", name);
1146         ctx->xtfm = xtfm;
1147
1148         return 0;
1149 }
1150
1151 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1152                                     const char *algs_hmac_name)
1153 {
1154         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1155
1156         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1157                                  sizeof(struct stm32_hash_request_ctx));
1158
1159         ctx->keylen = 0;
1160
1161         if (algs_hmac_name)
1162                 ctx->flags |= HASH_FLAGS_HMAC;
1163
1164         ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1165         ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1166         ctx->enginectx.op.unprepare_request = NULL;
1167
1168         return stm32_hash_init_fallback(tfm);
1169 }
1170
1171 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1172 {
1173         return stm32_hash_cra_init_algs(tfm, NULL);
1174 }
1175
1176 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1177 {
1178         return stm32_hash_cra_init_algs(tfm, "md5");
1179 }
1180
1181 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1182 {
1183         return stm32_hash_cra_init_algs(tfm, "sha1");
1184 }
1185
1186 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1187 {
1188         return stm32_hash_cra_init_algs(tfm, "sha224");
1189 }
1190
1191 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1192 {
1193         return stm32_hash_cra_init_algs(tfm, "sha256");
1194 }
1195
1196 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1197 {
1198         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1199
1200         if (ctx->xtfm)
1201                 crypto_free_shash(ctx->xtfm);
1202 }
1203
1204 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1205 {
1206         struct stm32_hash_dev *hdev = dev_id;
1207
1208         if (HASH_FLAGS_CPU & hdev->flags) {
1209                 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1210                         hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1211                         goto finish;
1212                 }
1213         } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1214                 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1215                         hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1216                                 goto finish;
1217                 }
1218         }
1219
1220         return IRQ_HANDLED;
1221
1222 finish:
1223         /* Finish current request */
1224         stm32_hash_finish_req(hdev->req, 0);
1225
1226         return IRQ_HANDLED;
1227 }
1228
1229 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1230 {
1231         struct stm32_hash_dev *hdev = dev_id;
1232         u32 reg;
1233
1234         reg = stm32_hash_read(hdev, HASH_SR);
1235         if (reg & HASH_SR_OUTPUT_READY) {
1236                 reg &= ~HASH_SR_OUTPUT_READY;
1237                 stm32_hash_write(hdev, HASH_SR, reg);
1238                 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1239                 /* Disable IT*/
1240                 stm32_hash_write(hdev, HASH_IMR, 0);
1241                 return IRQ_WAKE_THREAD;
1242         }
1243
1244         return IRQ_NONE;
1245 }
1246
1247 static struct ahash_alg algs_md5[] = {
1248         {
1249                 .init = stm32_hash_init,
1250                 .update = stm32_hash_update,
1251                 .final = stm32_hash_final,
1252                 .finup = stm32_hash_finup,
1253                 .digest = stm32_hash_digest,
1254                 .export = stm32_hash_export,
1255                 .import = stm32_hash_import,
1256                 .halg = {
1257                         .digestsize = MD5_DIGEST_SIZE,
1258                         .statesize = sizeof(struct stm32_hash_request_ctx),
1259                         .base = {
1260                                 .cra_name = "md5",
1261                                 .cra_driver_name = "stm32-md5",
1262                                 .cra_priority = 200,
1263                                 .cra_flags = CRYPTO_ALG_ASYNC |
1264                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1265                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1266                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1267                                 .cra_alignmask = 3,
1268                                 .cra_init = stm32_hash_cra_init,
1269                                 .cra_exit = stm32_hash_cra_exit,
1270                                 .cra_module = THIS_MODULE,
1271                         }
1272                 }
1273         },
1274         {
1275                 .init = stm32_hash_init,
1276                 .update = stm32_hash_update,
1277                 .final = stm32_hash_final,
1278                 .finup = stm32_hash_finup,
1279                 .digest = stm32_hash_digest,
1280                 .export = stm32_hash_export,
1281                 .import = stm32_hash_import,
1282                 .setkey = stm32_hash_setkey,
1283                 .halg = {
1284                         .digestsize = MD5_DIGEST_SIZE,
1285                         .statesize = sizeof(struct stm32_hash_request_ctx),
1286                         .base = {
1287                                 .cra_name = "hmac(md5)",
1288                                 .cra_driver_name = "stm32-hmac-md5",
1289                                 .cra_priority = 200,
1290                                 .cra_flags = CRYPTO_ALG_ASYNC |
1291                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1292                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1293                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1294                                 .cra_alignmask = 3,
1295                                 .cra_init = stm32_hash_cra_md5_init,
1296                                 .cra_exit = stm32_hash_cra_exit,
1297                                 .cra_module = THIS_MODULE,
1298                         }
1299                 }
1300         },
1301 };
1302
1303 static struct ahash_alg algs_sha1[] = {
1304         {
1305                 .init = stm32_hash_init,
1306                 .update = stm32_hash_update,
1307                 .final = stm32_hash_final,
1308                 .finup = stm32_hash_finup,
1309                 .digest = stm32_hash_digest,
1310                 .export = stm32_hash_export,
1311                 .import = stm32_hash_import,
1312                 .halg = {
1313                         .digestsize = SHA1_DIGEST_SIZE,
1314                         .statesize = sizeof(struct stm32_hash_request_ctx),
1315                         .base = {
1316                                 .cra_name = "sha1",
1317                                 .cra_driver_name = "stm32-sha1",
1318                                 .cra_priority = 200,
1319                                 .cra_flags = CRYPTO_ALG_ASYNC |
1320                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1321                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1322                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1323                                 .cra_alignmask = 3,
1324                                 .cra_init = stm32_hash_cra_init,
1325                                 .cra_exit = stm32_hash_cra_exit,
1326                                 .cra_module = THIS_MODULE,
1327                         }
1328                 }
1329         },
1330         {
1331                 .init = stm32_hash_init,
1332                 .update = stm32_hash_update,
1333                 .final = stm32_hash_final,
1334                 .finup = stm32_hash_finup,
1335                 .digest = stm32_hash_digest,
1336                 .export = stm32_hash_export,
1337                 .import = stm32_hash_import,
1338                 .setkey = stm32_hash_setkey,
1339                 .halg = {
1340                         .digestsize = SHA1_DIGEST_SIZE,
1341                         .statesize = sizeof(struct stm32_hash_request_ctx),
1342                         .base = {
1343                                 .cra_name = "hmac(sha1)",
1344                                 .cra_driver_name = "stm32-hmac-sha1",
1345                                 .cra_priority = 200,
1346                                 .cra_flags = CRYPTO_ALG_ASYNC |
1347                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1348                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1349                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1350                                 .cra_alignmask = 3,
1351                                 .cra_init = stm32_hash_cra_sha1_init,
1352                                 .cra_exit = stm32_hash_cra_exit,
1353                                 .cra_module = THIS_MODULE,
1354                         }
1355                 }
1356         },
1357 };
1358
1359 static struct ahash_alg algs_sha224[] = {
1360         {
1361                 .init = stm32_hash_init,
1362                 .update = stm32_hash_update,
1363                 .final = stm32_hash_final,
1364                 .finup = stm32_hash_finup,
1365                 .digest = stm32_hash_digest,
1366                 .export = stm32_hash_export,
1367                 .import = stm32_hash_import,
1368                 .halg = {
1369                         .digestsize = SHA224_DIGEST_SIZE,
1370                         .statesize = sizeof(struct stm32_hash_request_ctx),
1371                         .base = {
1372                                 .cra_name = "sha224",
1373                                 .cra_driver_name = "stm32-sha224",
1374                                 .cra_priority = 200,
1375                                 .cra_flags = CRYPTO_ALG_ASYNC |
1376                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1377                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1378                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1379                                 .cra_alignmask = 3,
1380                                 .cra_init = stm32_hash_cra_init,
1381                                 .cra_exit = stm32_hash_cra_exit,
1382                                 .cra_module = THIS_MODULE,
1383                         }
1384                 }
1385         },
1386         {
1387                 .init = stm32_hash_init,
1388                 .update = stm32_hash_update,
1389                 .final = stm32_hash_final,
1390                 .finup = stm32_hash_finup,
1391                 .digest = stm32_hash_digest,
1392                 .setkey = stm32_hash_setkey,
1393                 .export = stm32_hash_export,
1394                 .import = stm32_hash_import,
1395                 .halg = {
1396                         .digestsize = SHA224_DIGEST_SIZE,
1397                         .statesize = sizeof(struct stm32_hash_request_ctx),
1398                         .base = {
1399                                 .cra_name = "hmac(sha224)",
1400                                 .cra_driver_name = "stm32-hmac-sha224",
1401                                 .cra_priority = 200,
1402                                 .cra_flags = CRYPTO_ALG_ASYNC |
1403                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1404                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1405                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1406                                 .cra_alignmask = 3,
1407                                 .cra_init = stm32_hash_cra_sha224_init,
1408                                 .cra_exit = stm32_hash_cra_exit,
1409                                 .cra_module = THIS_MODULE,
1410                         }
1411                 }
1412         },
1413 };
1414
1415 static struct ahash_alg algs_sha256[] = {
1416         {
1417                 .init = stm32_hash_init,
1418                 .update = stm32_hash_update,
1419                 .final = stm32_hash_final,
1420                 .finup = stm32_hash_finup,
1421                 .digest = stm32_hash_digest,
1422                 .export = stm32_hash_export,
1423                 .import = stm32_hash_import,
1424                 .halg = {
1425                         .digestsize = SHA256_DIGEST_SIZE,
1426                         .statesize = sizeof(struct stm32_hash_request_ctx),
1427                         .base = {
1428                                 .cra_name = "sha256",
1429                                 .cra_driver_name = "stm32-sha256",
1430                                 .cra_priority = 200,
1431                                 .cra_flags = CRYPTO_ALG_ASYNC |
1432                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1433                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1434                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1435                                 .cra_alignmask = 3,
1436                                 .cra_init = stm32_hash_cra_init,
1437                                 .cra_exit = stm32_hash_cra_exit,
1438                                 .cra_module = THIS_MODULE,
1439                         }
1440                 }
1441         },
1442         {
1443                 .init = stm32_hash_init,
1444                 .update = stm32_hash_update,
1445                 .final = stm32_hash_final,
1446                 .finup = stm32_hash_finup,
1447                 .digest = stm32_hash_digest,
1448                 .export = stm32_hash_export,
1449                 .import = stm32_hash_import,
1450                 .setkey = stm32_hash_setkey,
1451                 .halg = {
1452                         .digestsize = SHA256_DIGEST_SIZE,
1453                         .statesize = sizeof(struct stm32_hash_request_ctx),
1454                         .base = {
1455                                 .cra_name = "hmac(sha256)",
1456                                 .cra_driver_name = "stm32-hmac-sha256",
1457                                 .cra_priority = 200,
1458                                 .cra_flags = CRYPTO_ALG_ASYNC |
1459                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1460                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1461                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1462                                 .cra_alignmask = 3,
1463                                 .cra_init = stm32_hash_cra_sha256_init,
1464                                 .cra_exit = stm32_hash_cra_exit,
1465                                 .cra_module = THIS_MODULE,
1466                         }
1467                 }
1468         },
1469 };
1470
1471 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1472 {
1473         unsigned int i, j;
1474         int err;
1475
1476         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1477                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1478                         err = crypto_register_ahash(
1479                                 &hdev->pdata->algs_info[i].algs_list[j]);
1480                         if (err)
1481                                 goto err_algs;
1482                 }
1483         }
1484
1485         return 0;
1486 err_algs:
1487         dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1488         for (; i--; ) {
1489                 for (; j--;)
1490                         crypto_unregister_ahash(
1491                                 &hdev->pdata->algs_info[i].algs_list[j]);
1492         }
1493
1494         return err;
1495 }
1496
1497 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1498 {
1499         unsigned int i, j;
1500
1501         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1502                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1503                         crypto_unregister_ahash(
1504                                 &hdev->pdata->algs_info[i].algs_list[j]);
1505         }
1506
1507         return 0;
1508 }
1509
1510 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1511         {
1512                 .algs_list      = algs_sha1,
1513                 .size           = ARRAY_SIZE(algs_sha1),
1514         },
1515         {
1516                 .algs_list      = algs_sha256,
1517                 .size           = ARRAY_SIZE(algs_sha256),
1518         },
1519 };
1520
1521 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1522         .algs_info      = stm32_hash_algs_info_ux500,
1523         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
1524         .broken_emptymsg = true,
1525         .ux500          = true,
1526 };
1527
1528 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1529         {
1530                 .algs_list      = algs_md5,
1531                 .size           = ARRAY_SIZE(algs_md5),
1532         },
1533         {
1534                 .algs_list      = algs_sha1,
1535                 .size           = ARRAY_SIZE(algs_sha1),
1536         },
1537 };
1538
1539 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1540         .algs_info      = stm32_hash_algs_info_stm32f4,
1541         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1542         .has_sr         = true,
1543         .has_mdmat      = true,
1544 };
1545
1546 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1547         {
1548                 .algs_list      = algs_md5,
1549                 .size           = ARRAY_SIZE(algs_md5),
1550         },
1551         {
1552                 .algs_list      = algs_sha1,
1553                 .size           = ARRAY_SIZE(algs_sha1),
1554         },
1555         {
1556                 .algs_list      = algs_sha224,
1557                 .size           = ARRAY_SIZE(algs_sha224),
1558         },
1559         {
1560                 .algs_list      = algs_sha256,
1561                 .size           = ARRAY_SIZE(algs_sha256),
1562         },
1563 };
1564
1565 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1566         .algs_info      = stm32_hash_algs_info_stm32f7,
1567         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1568         .has_sr         = true,
1569         .has_mdmat      = true,
1570 };
1571
1572 static const struct of_device_id stm32_hash_of_match[] = {
1573         {
1574                 .compatible = "stericsson,ux500-hash",
1575                 .data = &stm32_hash_pdata_ux500,
1576         },
1577         {
1578                 .compatible = "st,stm32f456-hash",
1579                 .data = &stm32_hash_pdata_stm32f4,
1580         },
1581         {
1582                 .compatible = "st,stm32f756-hash",
1583                 .data = &stm32_hash_pdata_stm32f7,
1584         },
1585         {},
1586 };
1587
1588 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1589
1590 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1591                                    struct device *dev)
1592 {
1593         hdev->pdata = of_device_get_match_data(dev);
1594         if (!hdev->pdata) {
1595                 dev_err(dev, "no compatible OF match\n");
1596                 return -EINVAL;
1597         }
1598
1599         if (of_property_read_u32(dev->of_node, "dma-maxburst",
1600                                  &hdev->dma_maxburst)) {
1601                 dev_info(dev, "dma-maxburst not specified, using 0\n");
1602                 hdev->dma_maxburst = 0;
1603         }
1604
1605         return 0;
1606 }
1607
1608 static int stm32_hash_probe(struct platform_device *pdev)
1609 {
1610         struct stm32_hash_dev *hdev;
1611         struct device *dev = &pdev->dev;
1612         struct resource *res;
1613         int ret, irq;
1614
1615         hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1616         if (!hdev)
1617                 return -ENOMEM;
1618
1619         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1620         hdev->io_base = devm_ioremap_resource(dev, res);
1621         if (IS_ERR(hdev->io_base))
1622                 return PTR_ERR(hdev->io_base);
1623
1624         hdev->phys_base = res->start;
1625
1626         ret = stm32_hash_get_of_match(hdev, dev);
1627         if (ret)
1628                 return ret;
1629
1630         irq = platform_get_irq_optional(pdev, 0);
1631         if (irq < 0 && irq != -ENXIO)
1632                 return irq;
1633
1634         if (irq > 0) {
1635                 ret = devm_request_threaded_irq(dev, irq,
1636                                                 stm32_hash_irq_handler,
1637                                                 stm32_hash_irq_thread,
1638                                                 IRQF_ONESHOT,
1639                                                 dev_name(dev), hdev);
1640                 if (ret) {
1641                         dev_err(dev, "Cannot grab IRQ\n");
1642                         return ret;
1643                 }
1644         } else {
1645                 dev_info(dev, "No IRQ, use polling mode\n");
1646                 hdev->polled = true;
1647         }
1648
1649         hdev->clk = devm_clk_get(&pdev->dev, NULL);
1650         if (IS_ERR(hdev->clk))
1651                 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1652                                      "failed to get clock for hash\n");
1653
1654         ret = clk_prepare_enable(hdev->clk);
1655         if (ret) {
1656                 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1657                 return ret;
1658         }
1659
1660         pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1661         pm_runtime_use_autosuspend(dev);
1662
1663         pm_runtime_get_noresume(dev);
1664         pm_runtime_set_active(dev);
1665         pm_runtime_enable(dev);
1666
1667         hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1668         if (IS_ERR(hdev->rst)) {
1669                 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1670                         ret = -EPROBE_DEFER;
1671                         goto err_reset;
1672                 }
1673         } else {
1674                 reset_control_assert(hdev->rst);
1675                 udelay(2);
1676                 reset_control_deassert(hdev->rst);
1677         }
1678
1679         hdev->dev = dev;
1680
1681         platform_set_drvdata(pdev, hdev);
1682
1683         ret = stm32_hash_dma_init(hdev);
1684         switch (ret) {
1685         case 0:
1686                 break;
1687         case -ENOENT:
1688         case -ENODEV:
1689                 dev_info(dev, "DMA mode not available\n");
1690                 break;
1691         default:
1692                 dev_err(dev, "DMA init error %d\n", ret);
1693                 goto err_dma;
1694         }
1695
1696         spin_lock(&stm32_hash.lock);
1697         list_add_tail(&hdev->list, &stm32_hash.dev_list);
1698         spin_unlock(&stm32_hash.lock);
1699
1700         /* Initialize crypto engine */
1701         hdev->engine = crypto_engine_alloc_init(dev, 1);
1702         if (!hdev->engine) {
1703                 ret = -ENOMEM;
1704                 goto err_engine;
1705         }
1706
1707         ret = crypto_engine_start(hdev->engine);
1708         if (ret)
1709                 goto err_engine_start;
1710
1711         if (hdev->pdata->ux500)
1712                 /* FIXME: implement DMA mode for Ux500 */
1713                 hdev->dma_mode = 0;
1714         else
1715                 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1716
1717         /* Register algos */
1718         ret = stm32_hash_register_algs(hdev);
1719         if (ret)
1720                 goto err_algs;
1721
1722         dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1723                  stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1724
1725         pm_runtime_put_sync(dev);
1726
1727         return 0;
1728
1729 err_algs:
1730 err_engine_start:
1731         crypto_engine_exit(hdev->engine);
1732 err_engine:
1733         spin_lock(&stm32_hash.lock);
1734         list_del(&hdev->list);
1735         spin_unlock(&stm32_hash.lock);
1736 err_dma:
1737         if (hdev->dma_lch)
1738                 dma_release_channel(hdev->dma_lch);
1739 err_reset:
1740         pm_runtime_disable(dev);
1741         pm_runtime_put_noidle(dev);
1742
1743         clk_disable_unprepare(hdev->clk);
1744
1745         return ret;
1746 }
1747
1748 static int stm32_hash_remove(struct platform_device *pdev)
1749 {
1750         struct stm32_hash_dev *hdev;
1751         int ret;
1752
1753         hdev = platform_get_drvdata(pdev);
1754         if (!hdev)
1755                 return -ENODEV;
1756
1757         ret = pm_runtime_resume_and_get(hdev->dev);
1758         if (ret < 0)
1759                 return ret;
1760
1761         stm32_hash_unregister_algs(hdev);
1762
1763         crypto_engine_exit(hdev->engine);
1764
1765         spin_lock(&stm32_hash.lock);
1766         list_del(&hdev->list);
1767         spin_unlock(&stm32_hash.lock);
1768
1769         if (hdev->dma_lch)
1770                 dma_release_channel(hdev->dma_lch);
1771
1772         pm_runtime_disable(hdev->dev);
1773         pm_runtime_put_noidle(hdev->dev);
1774
1775         clk_disable_unprepare(hdev->clk);
1776
1777         return 0;
1778 }
1779
1780 #ifdef CONFIG_PM
1781 static int stm32_hash_runtime_suspend(struct device *dev)
1782 {
1783         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1784
1785         clk_disable_unprepare(hdev->clk);
1786
1787         return 0;
1788 }
1789
1790 static int stm32_hash_runtime_resume(struct device *dev)
1791 {
1792         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1793         int ret;
1794
1795         ret = clk_prepare_enable(hdev->clk);
1796         if (ret) {
1797                 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1798                 return ret;
1799         }
1800
1801         return 0;
1802 }
1803 #endif
1804
1805 static const struct dev_pm_ops stm32_hash_pm_ops = {
1806         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1807                                 pm_runtime_force_resume)
1808         SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1809                            stm32_hash_runtime_resume, NULL)
1810 };
1811
1812 static struct platform_driver stm32_hash_driver = {
1813         .probe          = stm32_hash_probe,
1814         .remove         = stm32_hash_remove,
1815         .driver         = {
1816                 .name   = "stm32-hash",
1817                 .pm = &stm32_hash_pm_ops,
1818                 .of_match_table = stm32_hash_of_match,
1819         }
1820 };
1821
1822 module_platform_driver(stm32_hash_driver);
1823
1824 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1825 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1826 MODULE_LICENSE("GPL v2");