crypto: stm32 - Remove unused hdev->err field
[platform/kernel/linux-starfive.git] / drivers / crypto / stm32 / stm32-hash.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22
23 #include <crypto/engine.h>
24 #include <crypto/hash.h>
25 #include <crypto/md5.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/sha1.h>
28 #include <crypto/sha2.h>
29 #include <crypto/internal/hash.h>
30
31 #define HASH_CR                         0x00
32 #define HASH_DIN                        0x04
33 #define HASH_STR                        0x08
34 #define HASH_UX500_HREG(x)              (0x0c + ((x) * 0x04))
35 #define HASH_IMR                        0x20
36 #define HASH_SR                         0x24
37 #define HASH_CSR(x)                     (0x0F8 + ((x) * 0x04))
38 #define HASH_HREG(x)                    (0x310 + ((x) * 0x04))
39 #define HASH_HWCFGR                     0x3F0
40 #define HASH_VER                        0x3F4
41 #define HASH_ID                         0x3F8
42
43 /* Control Register */
44 #define HASH_CR_INIT                    BIT(2)
45 #define HASH_CR_DMAE                    BIT(3)
46 #define HASH_CR_DATATYPE_POS            4
47 #define HASH_CR_MODE                    BIT(6)
48 #define HASH_CR_MDMAT                   BIT(13)
49 #define HASH_CR_DMAA                    BIT(14)
50 #define HASH_CR_LKEY                    BIT(16)
51
52 #define HASH_CR_ALGO_SHA1               0x0
53 #define HASH_CR_ALGO_MD5                0x80
54 #define HASH_CR_ALGO_SHA224             0x40000
55 #define HASH_CR_ALGO_SHA256             0x40080
56
57 #define HASH_CR_UX500_EMPTYMSG          BIT(20)
58 #define HASH_CR_UX500_ALGO_SHA1         BIT(7)
59 #define HASH_CR_UX500_ALGO_SHA256       0x0
60
61 /* Interrupt */
62 #define HASH_DINIE                      BIT(0)
63 #define HASH_DCIE                       BIT(1)
64
65 /* Interrupt Mask */
66 #define HASH_MASK_CALC_COMPLETION       BIT(0)
67 #define HASH_MASK_DATA_INPUT            BIT(1)
68
69 /* Context swap register */
70 #define HASH_CSR_REGISTER_NUMBER        54
71
72 /* Status Flags */
73 #define HASH_SR_DATA_INPUT_READY        BIT(0)
74 #define HASH_SR_OUTPUT_READY            BIT(1)
75 #define HASH_SR_DMA_ACTIVE              BIT(2)
76 #define HASH_SR_BUSY                    BIT(3)
77
78 /* STR Register */
79 #define HASH_STR_NBLW_MASK              GENMASK(4, 0)
80 #define HASH_STR_DCAL                   BIT(8)
81
82 #define HASH_FLAGS_INIT                 BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY         BIT(1)
84 #define HASH_FLAGS_CPU                  BIT(2)
85 #define HASH_FLAGS_DMA_READY            BIT(3)
86 #define HASH_FLAGS_DMA_ACTIVE           BIT(4)
87 #define HASH_FLAGS_HMAC_INIT            BIT(5)
88 #define HASH_FLAGS_HMAC_FINAL           BIT(6)
89 #define HASH_FLAGS_HMAC_KEY             BIT(7)
90
91 #define HASH_FLAGS_FINAL                BIT(15)
92 #define HASH_FLAGS_FINUP                BIT(16)
93 #define HASH_FLAGS_ALGO_MASK            GENMASK(21, 18)
94 #define HASH_FLAGS_MD5                  BIT(18)
95 #define HASH_FLAGS_SHA1                 BIT(19)
96 #define HASH_FLAGS_SHA224               BIT(20)
97 #define HASH_FLAGS_SHA256               BIT(21)
98 #define HASH_FLAGS_ERRORS               BIT(22)
99 #define HASH_FLAGS_HMAC                 BIT(23)
100
101 #define HASH_OP_UPDATE                  1
102 #define HASH_OP_FINAL                   2
103
104 enum stm32_hash_data_format {
105         HASH_DATA_32_BITS               = 0x0,
106         HASH_DATA_16_BITS               = 0x1,
107         HASH_DATA_8_BITS                = 0x2,
108         HASH_DATA_1_BIT                 = 0x3
109 };
110
111 #define HASH_BUFLEN                     256
112 #define HASH_LONG_KEY                   64
113 #define HASH_MAX_KEY_SIZE               (SHA256_BLOCK_SIZE * 8)
114 #define HASH_QUEUE_LENGTH               16
115 #define HASH_DMA_THRESHOLD              50
116
117 #define HASH_AUTOSUSPEND_DELAY          50
118
119 struct stm32_hash_ctx {
120         struct crypto_engine_ctx enginectx;
121         struct stm32_hash_dev   *hdev;
122         struct crypto_shash     *xtfm;
123         unsigned long           flags;
124
125         u8                      key[HASH_MAX_KEY_SIZE];
126         int                     keylen;
127 };
128
129 struct stm32_hash_request_ctx {
130         struct stm32_hash_dev   *hdev;
131         unsigned long           flags;
132         unsigned long           op;
133
134         u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
135         size_t                  digcnt;
136         size_t                  bufcnt;
137         size_t                  buflen;
138
139         /* DMA */
140         struct scatterlist      *sg;
141         unsigned int            offset;
142         unsigned int            total;
143         struct scatterlist      sg_key;
144
145         dma_addr_t              dma_addr;
146         size_t                  dma_ct;
147         int                     nents;
148
149         u8                      data_type;
150
151         u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
152
153         /* Export Context */
154         u32                     *hw_context;
155 };
156
157 struct stm32_hash_algs_info {
158         struct ahash_alg        *algs_list;
159         size_t                  size;
160 };
161
162 struct stm32_hash_pdata {
163         struct stm32_hash_algs_info     *algs_info;
164         size_t                          algs_info_size;
165         bool                            has_sr;
166         bool                            has_mdmat;
167         bool                            broken_emptymsg;
168         bool                            ux500;
169 };
170
171 struct stm32_hash_dev {
172         struct list_head        list;
173         struct device           *dev;
174         struct clk              *clk;
175         struct reset_control    *rst;
176         void __iomem            *io_base;
177         phys_addr_t             phys_base;
178         u32                     dma_mode;
179         u32                     dma_maxburst;
180         bool                    polled;
181
182         struct ahash_request    *req;
183         struct crypto_engine    *engine;
184
185         unsigned long           flags;
186
187         struct dma_chan         *dma_lch;
188         struct completion       dma_completion;
189
190         const struct stm32_hash_pdata   *pdata;
191 };
192
193 struct stm32_hash_drv {
194         struct list_head        dev_list;
195         spinlock_t              lock; /* List protection access */
196 };
197
198 static struct stm32_hash_drv stm32_hash = {
199         .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
200         .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
201 };
202
203 static void stm32_hash_dma_callback(void *param);
204
205 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
206 {
207         return readl_relaxed(hdev->io_base + offset);
208 }
209
210 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
211                                     u32 offset, u32 value)
212 {
213         writel_relaxed(value, hdev->io_base + offset);
214 }
215
216 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
217 {
218         u32 status;
219
220         /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
221         if (!hdev->pdata->has_sr)
222                 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
223                                                   !(status & HASH_STR_DCAL), 10, 10000);
224
225         return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
226                                    !(status & HASH_SR_BUSY), 10, 10000);
227 }
228
229 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
230 {
231         u32 reg;
232
233         reg = stm32_hash_read(hdev, HASH_STR);
234         reg &= ~(HASH_STR_NBLW_MASK);
235         reg |= (8U * ((length) % 4U));
236         stm32_hash_write(hdev, HASH_STR, reg);
237 }
238
239 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
240 {
241         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
242         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
243         u32 reg;
244         int keylen = ctx->keylen;
245         void *key = ctx->key;
246
247         if (keylen) {
248                 stm32_hash_set_nblw(hdev, keylen);
249
250                 while (keylen > 0) {
251                         stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
252                         keylen -= 4;
253                         key += 4;
254                 }
255
256                 reg = stm32_hash_read(hdev, HASH_STR);
257                 reg |= HASH_STR_DCAL;
258                 stm32_hash_write(hdev, HASH_STR, reg);
259
260                 return -EINPROGRESS;
261         }
262
263         return 0;
264 }
265
266 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
267 {
268         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
269         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
270         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
271
272         u32 reg = HASH_CR_INIT;
273
274         if (!(hdev->flags & HASH_FLAGS_INIT)) {
275                 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
276                 case HASH_FLAGS_MD5:
277                         reg |= HASH_CR_ALGO_MD5;
278                         break;
279                 case HASH_FLAGS_SHA1:
280                         if (hdev->pdata->ux500)
281                                 reg |= HASH_CR_UX500_ALGO_SHA1;
282                         else
283                                 reg |= HASH_CR_ALGO_SHA1;
284                         break;
285                 case HASH_FLAGS_SHA224:
286                         reg |= HASH_CR_ALGO_SHA224;
287                         break;
288                 case HASH_FLAGS_SHA256:
289                         if (hdev->pdata->ux500)
290                                 reg |= HASH_CR_UX500_ALGO_SHA256;
291                         else
292                                 reg |= HASH_CR_ALGO_SHA256;
293                         break;
294                 default:
295                         reg |= HASH_CR_ALGO_MD5;
296                 }
297
298                 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
299
300                 if (rctx->flags & HASH_FLAGS_HMAC) {
301                         hdev->flags |= HASH_FLAGS_HMAC;
302                         reg |= HASH_CR_MODE;
303                         if (ctx->keylen > HASH_LONG_KEY)
304                                 reg |= HASH_CR_LKEY;
305                 }
306
307                 /*
308                  * On the Ux500 we need to set a special flag to indicate that
309                  * the message is zero length.
310                  */
311                 if (hdev->pdata->ux500 && bufcnt == 0)
312                         reg |= HASH_CR_UX500_EMPTYMSG;
313
314                 if (!hdev->polled)
315                         stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
316
317                 stm32_hash_write(hdev, HASH_CR, reg);
318
319                 hdev->flags |= HASH_FLAGS_INIT;
320
321                 dev_dbg(hdev->dev, "Write Control %x\n", reg);
322         }
323 }
324
325 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
326 {
327         size_t count;
328
329         while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
330                 count = min(rctx->sg->length - rctx->offset, rctx->total);
331                 count = min(count, rctx->buflen - rctx->bufcnt);
332
333                 if (count <= 0) {
334                         if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
335                                 rctx->sg = sg_next(rctx->sg);
336                                 continue;
337                         } else {
338                                 break;
339                         }
340                 }
341
342                 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
343                                          rctx->offset, count, 0);
344
345                 rctx->bufcnt += count;
346                 rctx->offset += count;
347                 rctx->total -= count;
348
349                 if (rctx->offset == rctx->sg->length) {
350                         rctx->sg = sg_next(rctx->sg);
351                         if (rctx->sg)
352                                 rctx->offset = 0;
353                         else
354                                 rctx->total = 0;
355                 }
356         }
357 }
358
359 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
360                                const u8 *buf, size_t length, int final)
361 {
362         unsigned int count, len32;
363         const u32 *buffer = (const u32 *)buf;
364         u32 reg;
365
366         if (final)
367                 hdev->flags |= HASH_FLAGS_FINAL;
368
369         len32 = DIV_ROUND_UP(length, sizeof(u32));
370
371         dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
372                 __func__, length, final, len32);
373
374         hdev->flags |= HASH_FLAGS_CPU;
375
376         stm32_hash_write_ctrl(hdev, length);
377
378         if (stm32_hash_wait_busy(hdev))
379                 return -ETIMEDOUT;
380
381         if ((hdev->flags & HASH_FLAGS_HMAC) &&
382             (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
383                 hdev->flags |= HASH_FLAGS_HMAC_KEY;
384                 stm32_hash_write_key(hdev);
385                 if (stm32_hash_wait_busy(hdev))
386                         return -ETIMEDOUT;
387         }
388
389         for (count = 0; count < len32; count++)
390                 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
391
392         if (final) {
393                 if (stm32_hash_wait_busy(hdev))
394                         return -ETIMEDOUT;
395
396                 stm32_hash_set_nblw(hdev, length);
397                 reg = stm32_hash_read(hdev, HASH_STR);
398                 reg |= HASH_STR_DCAL;
399                 stm32_hash_write(hdev, HASH_STR, reg);
400                 if (hdev->flags & HASH_FLAGS_HMAC) {
401                         if (stm32_hash_wait_busy(hdev))
402                                 return -ETIMEDOUT;
403                         stm32_hash_write_key(hdev);
404                 }
405                 return -EINPROGRESS;
406         }
407
408         return 0;
409 }
410
411 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
412 {
413         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
414         int bufcnt, err = 0, final;
415
416         dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
417
418         final = rctx->flags & HASH_FLAGS_FINAL;
419
420         while ((rctx->total >= rctx->buflen) ||
421                (rctx->bufcnt + rctx->total >= rctx->buflen)) {
422                 stm32_hash_append_sg(rctx);
423                 bufcnt = rctx->bufcnt;
424                 rctx->bufcnt = 0;
425                 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
426                 if (err)
427                         return err;
428         }
429
430         stm32_hash_append_sg(rctx);
431
432         if (final) {
433                 bufcnt = rctx->bufcnt;
434                 rctx->bufcnt = 0;
435                 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
436         }
437
438         return err;
439 }
440
441 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
442                                struct scatterlist *sg, int length, int mdma)
443 {
444         struct dma_async_tx_descriptor *in_desc;
445         dma_cookie_t cookie;
446         u32 reg;
447         int err;
448
449         in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
450                                           DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
451                                           DMA_CTRL_ACK);
452         if (!in_desc) {
453                 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
454                 return -ENOMEM;
455         }
456
457         reinit_completion(&hdev->dma_completion);
458         in_desc->callback = stm32_hash_dma_callback;
459         in_desc->callback_param = hdev;
460
461         hdev->flags |= HASH_FLAGS_FINAL;
462         hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
463
464         reg = stm32_hash_read(hdev, HASH_CR);
465
466         if (!hdev->pdata->has_mdmat) {
467                 if (mdma)
468                         reg |= HASH_CR_MDMAT;
469                 else
470                         reg &= ~HASH_CR_MDMAT;
471         }
472         reg |= HASH_CR_DMAE;
473
474         stm32_hash_write(hdev, HASH_CR, reg);
475
476         stm32_hash_set_nblw(hdev, length);
477
478         cookie = dmaengine_submit(in_desc);
479         err = dma_submit_error(cookie);
480         if (err)
481                 return -ENOMEM;
482
483         dma_async_issue_pending(hdev->dma_lch);
484
485         if (!wait_for_completion_timeout(&hdev->dma_completion,
486                                          msecs_to_jiffies(100)))
487                 err = -ETIMEDOUT;
488
489         if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
490                                      NULL, NULL) != DMA_COMPLETE)
491                 err = -ETIMEDOUT;
492
493         if (err) {
494                 dev_err(hdev->dev, "DMA Error %i\n", err);
495                 dmaengine_terminate_all(hdev->dma_lch);
496                 return err;
497         }
498
499         return -EINPROGRESS;
500 }
501
502 static void stm32_hash_dma_callback(void *param)
503 {
504         struct stm32_hash_dev *hdev = param;
505
506         complete(&hdev->dma_completion);
507
508         hdev->flags |= HASH_FLAGS_DMA_READY;
509 }
510
511 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
512 {
513         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
514         struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
515         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
516         int err;
517
518         if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
519                 err = stm32_hash_write_key(hdev);
520                 if (stm32_hash_wait_busy(hdev))
521                         return -ETIMEDOUT;
522         } else {
523                 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
524                         sg_init_one(&rctx->sg_key, ctx->key,
525                                     ALIGN(ctx->keylen, sizeof(u32)));
526
527                 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
528                                           DMA_TO_DEVICE);
529                 if (rctx->dma_ct == 0) {
530                         dev_err(hdev->dev, "dma_map_sg error\n");
531                         return -ENOMEM;
532                 }
533
534                 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
535
536                 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
537         }
538
539         return err;
540 }
541
542 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
543 {
544         struct dma_slave_config dma_conf;
545         struct dma_chan *chan;
546         int err;
547
548         memset(&dma_conf, 0, sizeof(dma_conf));
549
550         dma_conf.direction = DMA_MEM_TO_DEV;
551         dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
552         dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
553         dma_conf.src_maxburst = hdev->dma_maxburst;
554         dma_conf.dst_maxburst = hdev->dma_maxburst;
555         dma_conf.device_fc = false;
556
557         chan = dma_request_chan(hdev->dev, "in");
558         if (IS_ERR(chan))
559                 return PTR_ERR(chan);
560
561         hdev->dma_lch = chan;
562
563         err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
564         if (err) {
565                 dma_release_channel(hdev->dma_lch);
566                 hdev->dma_lch = NULL;
567                 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
568                 return err;
569         }
570
571         init_completion(&hdev->dma_completion);
572
573         return 0;
574 }
575
576 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
577 {
578         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
579         struct scatterlist sg[1], *tsg;
580         int err = 0, len = 0, reg, ncp = 0;
581         unsigned int i;
582         u32 *buffer = (void *)rctx->buffer;
583
584         rctx->sg = hdev->req->src;
585         rctx->total = hdev->req->nbytes;
586
587         rctx->nents = sg_nents(rctx->sg);
588
589         if (rctx->nents < 0)
590                 return -EINVAL;
591
592         stm32_hash_write_ctrl(hdev, rctx->total);
593
594         if (hdev->flags & HASH_FLAGS_HMAC) {
595                 err = stm32_hash_hmac_dma_send(hdev);
596                 if (err != -EINPROGRESS)
597                         return err;
598         }
599
600         for_each_sg(rctx->sg, tsg, rctx->nents, i) {
601                 len = sg->length;
602
603                 sg[0] = *tsg;
604                 if (sg_is_last(sg)) {
605                         if (hdev->dma_mode == 1) {
606                                 len = (ALIGN(sg->length, 16) - 16);
607
608                                 ncp = sg_pcopy_to_buffer(
609                                         rctx->sg, rctx->nents,
610                                         rctx->buffer, sg->length - len,
611                                         rctx->total - sg->length + len);
612
613                                 sg->length = len;
614                         } else {
615                                 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
616                                         len = sg->length;
617                                         sg->length = ALIGN(sg->length,
618                                                            sizeof(u32));
619                                 }
620                         }
621                 }
622
623                 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
624                                           DMA_TO_DEVICE);
625                 if (rctx->dma_ct == 0) {
626                         dev_err(hdev->dev, "dma_map_sg error\n");
627                         return -ENOMEM;
628                 }
629
630                 err = stm32_hash_xmit_dma(hdev, sg, len,
631                                           !sg_is_last(sg));
632
633                 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
634
635                 if (err == -ENOMEM)
636                         return err;
637         }
638
639         if (hdev->dma_mode == 1) {
640                 if (stm32_hash_wait_busy(hdev))
641                         return -ETIMEDOUT;
642                 reg = stm32_hash_read(hdev, HASH_CR);
643                 reg &= ~HASH_CR_DMAE;
644                 reg |= HASH_CR_DMAA;
645                 stm32_hash_write(hdev, HASH_CR, reg);
646
647                 if (ncp) {
648                         memset(buffer + ncp, 0,
649                                DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
650                         writesl(hdev->io_base + HASH_DIN, buffer,
651                                 DIV_ROUND_UP(ncp, sizeof(u32)));
652                 }
653                 stm32_hash_set_nblw(hdev, ncp);
654                 reg = stm32_hash_read(hdev, HASH_STR);
655                 reg |= HASH_STR_DCAL;
656                 stm32_hash_write(hdev, HASH_STR, reg);
657                 err = -EINPROGRESS;
658         }
659
660         if (hdev->flags & HASH_FLAGS_HMAC) {
661                 if (stm32_hash_wait_busy(hdev))
662                         return -ETIMEDOUT;
663                 err = stm32_hash_hmac_dma_send(hdev);
664         }
665
666         return err;
667 }
668
669 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
670 {
671         struct stm32_hash_dev *hdev = NULL, *tmp;
672
673         spin_lock_bh(&stm32_hash.lock);
674         if (!ctx->hdev) {
675                 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
676                         hdev = tmp;
677                         break;
678                 }
679                 ctx->hdev = hdev;
680         } else {
681                 hdev = ctx->hdev;
682         }
683
684         spin_unlock_bh(&stm32_hash.lock);
685
686         return hdev;
687 }
688
689 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
690 {
691         struct scatterlist *sg;
692         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
693         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
694         int i;
695
696         if (req->nbytes <= HASH_DMA_THRESHOLD)
697                 return false;
698
699         if (sg_nents(req->src) > 1) {
700                 if (hdev->dma_mode == 1)
701                         return false;
702                 for_each_sg(req->src, sg, sg_nents(req->src), i) {
703                         if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
704                             (!sg_is_last(sg)))
705                                 return false;
706                 }
707         }
708
709         if (req->src->offset % 4)
710                 return false;
711
712         return true;
713 }
714
715 static int stm32_hash_init(struct ahash_request *req)
716 {
717         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
718         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
719         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
720         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
721
722         rctx->hdev = hdev;
723
724         rctx->flags = HASH_FLAGS_CPU;
725
726         rctx->digcnt = crypto_ahash_digestsize(tfm);
727         switch (rctx->digcnt) {
728         case MD5_DIGEST_SIZE:
729                 rctx->flags |= HASH_FLAGS_MD5;
730                 break;
731         case SHA1_DIGEST_SIZE:
732                 rctx->flags |= HASH_FLAGS_SHA1;
733                 break;
734         case SHA224_DIGEST_SIZE:
735                 rctx->flags |= HASH_FLAGS_SHA224;
736                 break;
737         case SHA256_DIGEST_SIZE:
738                 rctx->flags |= HASH_FLAGS_SHA256;
739                 break;
740         default:
741                 return -EINVAL;
742         }
743
744         rctx->bufcnt = 0;
745         rctx->buflen = HASH_BUFLEN;
746         rctx->total = 0;
747         rctx->offset = 0;
748         rctx->data_type = HASH_DATA_8_BITS;
749
750         memset(rctx->buffer, 0, HASH_BUFLEN);
751
752         if (ctx->flags & HASH_FLAGS_HMAC)
753                 rctx->flags |= HASH_FLAGS_HMAC;
754
755         dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
756
757         return 0;
758 }
759
760 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
761 {
762         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
763
764         if (!(rctx->flags & HASH_FLAGS_CPU))
765                 return stm32_hash_dma_send(hdev);
766
767         return stm32_hash_update_cpu(hdev);
768 }
769
770 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
771 {
772         struct ahash_request *req = hdev->req;
773         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
774         int buflen = rctx->bufcnt;
775
776         if (rctx->flags & HASH_FLAGS_FINUP)
777                 return stm32_hash_update_req(hdev);
778
779         rctx->bufcnt = 0;
780
781         return stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
782 }
783
784 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
785 {
786         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
787         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
788         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
789         struct stm32_hash_dev *hdev = rctx->hdev;
790         int ret;
791
792         dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
793                 ctx->keylen);
794
795         if (!ctx->xtfm) {
796                 dev_err(hdev->dev, "no fallback engine\n");
797                 return;
798         }
799
800         if (ctx->keylen) {
801                 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
802                 if (ret) {
803                         dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
804                         return;
805                 }
806         }
807
808         ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
809         if (ret)
810                 dev_err(hdev->dev, "shash digest error\n");
811 }
812
813 static void stm32_hash_copy_hash(struct ahash_request *req)
814 {
815         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
816         struct stm32_hash_dev *hdev = rctx->hdev;
817         __be32 *hash = (void *)rctx->digest;
818         unsigned int i, hashsize;
819
820         if (hdev->pdata->broken_emptymsg && !req->nbytes)
821                 return stm32_hash_emptymsg_fallback(req);
822
823         switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
824         case HASH_FLAGS_MD5:
825                 hashsize = MD5_DIGEST_SIZE;
826                 break;
827         case HASH_FLAGS_SHA1:
828                 hashsize = SHA1_DIGEST_SIZE;
829                 break;
830         case HASH_FLAGS_SHA224:
831                 hashsize = SHA224_DIGEST_SIZE;
832                 break;
833         case HASH_FLAGS_SHA256:
834                 hashsize = SHA256_DIGEST_SIZE;
835                 break;
836         default:
837                 return;
838         }
839
840         for (i = 0; i < hashsize / sizeof(u32); i++) {
841                 if (hdev->pdata->ux500)
842                         hash[i] = cpu_to_be32(stm32_hash_read(hdev,
843                                               HASH_UX500_HREG(i)));
844                 else
845                         hash[i] = cpu_to_be32(stm32_hash_read(hdev,
846                                               HASH_HREG(i)));
847         }
848 }
849
850 static int stm32_hash_finish(struct ahash_request *req)
851 {
852         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
853
854         if (!req->result)
855                 return -EINVAL;
856
857         memcpy(req->result, rctx->digest, rctx->digcnt);
858
859         return 0;
860 }
861
862 static void stm32_hash_finish_req(struct ahash_request *req, int err)
863 {
864         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
865         struct stm32_hash_dev *hdev = rctx->hdev;
866
867         if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
868                 stm32_hash_copy_hash(req);
869                 err = stm32_hash_finish(req);
870                 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
871                                  HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
872                                  HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
873                                  HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
874                                  HASH_FLAGS_HMAC_KEY);
875         } else {
876                 rctx->flags |= HASH_FLAGS_ERRORS;
877         }
878
879         pm_runtime_mark_last_busy(hdev->dev);
880         pm_runtime_put_autosuspend(hdev->dev);
881
882         crypto_finalize_hash_request(hdev->engine, req, err);
883 }
884
885 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
886                               struct stm32_hash_request_ctx *rctx)
887 {
888         pm_runtime_get_sync(hdev->dev);
889
890         if (!(HASH_FLAGS_INIT & hdev->flags)) {
891                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
892                 stm32_hash_write(hdev, HASH_STR, 0);
893                 stm32_hash_write(hdev, HASH_DIN, 0);
894                 stm32_hash_write(hdev, HASH_IMR, 0);
895         }
896
897         return 0;
898 }
899
900 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
901 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
902
903 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
904                                    struct ahash_request *req)
905 {
906         return crypto_transfer_hash_request_to_engine(hdev->engine, req);
907 }
908
909 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
910 {
911         struct ahash_request *req = container_of(areq, struct ahash_request,
912                                                  base);
913         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
914         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
915         struct stm32_hash_request_ctx *rctx;
916
917         if (!hdev)
918                 return -ENODEV;
919
920         hdev->req = req;
921
922         rctx = ahash_request_ctx(req);
923
924         dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
925                 rctx->op, req->nbytes);
926
927         return stm32_hash_hw_init(hdev, rctx);
928 }
929
930 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
931 {
932         struct ahash_request *req = container_of(areq, struct ahash_request,
933                                                  base);
934         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
935         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
936         struct stm32_hash_request_ctx *rctx;
937         int err = 0;
938
939         if (!hdev)
940                 return -ENODEV;
941
942         hdev->req = req;
943
944         rctx = ahash_request_ctx(req);
945
946         if (rctx->op == HASH_OP_UPDATE)
947                 err = stm32_hash_update_req(hdev);
948         else if (rctx->op == HASH_OP_FINAL)
949                 err = stm32_hash_final_req(hdev);
950
951         /* If we have an IRQ, wait for that, else poll for completion */
952         if (err == -EINPROGRESS && hdev->polled) {
953                 if (stm32_hash_wait_busy(hdev))
954                         err = -ETIMEDOUT;
955                 else {
956                         hdev->flags |= HASH_FLAGS_OUTPUT_READY;
957                         err = 0;
958                 }
959         }
960
961         if (err != -EINPROGRESS)
962         /* done task will not finish it, so do it here */
963                 stm32_hash_finish_req(req, err);
964
965         return 0;
966 }
967
968 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
969 {
970         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
971         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
972         struct stm32_hash_dev *hdev = ctx->hdev;
973
974         rctx->op = op;
975
976         return stm32_hash_handle_queue(hdev, req);
977 }
978
979 static int stm32_hash_update(struct ahash_request *req)
980 {
981         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
982
983         if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
984                 return 0;
985
986         rctx->total = req->nbytes;
987         rctx->sg = req->src;
988         rctx->offset = 0;
989
990         if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
991                 stm32_hash_append_sg(rctx);
992                 return 0;
993         }
994
995         return stm32_hash_enqueue(req, HASH_OP_UPDATE);
996 }
997
998 static int stm32_hash_final(struct ahash_request *req)
999 {
1000         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1001
1002         rctx->flags |= HASH_FLAGS_FINAL;
1003
1004         return stm32_hash_enqueue(req, HASH_OP_FINAL);
1005 }
1006
1007 static int stm32_hash_finup(struct ahash_request *req)
1008 {
1009         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1010         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1011         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1012
1013         if (!req->nbytes)
1014                 goto out;
1015
1016         rctx->flags |= HASH_FLAGS_FINUP;
1017         rctx->total = req->nbytes;
1018         rctx->sg = req->src;
1019         rctx->offset = 0;
1020
1021         if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1022                 rctx->flags &= ~HASH_FLAGS_CPU;
1023
1024 out:
1025         return stm32_hash_final(req);
1026 }
1027
1028 static int stm32_hash_digest(struct ahash_request *req)
1029 {
1030         return stm32_hash_init(req) ?: stm32_hash_finup(req);
1031 }
1032
1033 static int stm32_hash_export(struct ahash_request *req, void *out)
1034 {
1035         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1036         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1037         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1038         u32 *preg;
1039         unsigned int i;
1040         int ret;
1041
1042         pm_runtime_get_sync(hdev->dev);
1043
1044         ret = stm32_hash_wait_busy(hdev);
1045         if (ret)
1046                 return ret;
1047
1048         rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
1049                                          sizeof(u32),
1050                                          GFP_KERNEL);
1051
1052         preg = rctx->hw_context;
1053
1054         if (!hdev->pdata->ux500)
1055                 *preg++ = stm32_hash_read(hdev, HASH_IMR);
1056         *preg++ = stm32_hash_read(hdev, HASH_STR);
1057         *preg++ = stm32_hash_read(hdev, HASH_CR);
1058         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1059                 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1060
1061         pm_runtime_mark_last_busy(hdev->dev);
1062         pm_runtime_put_autosuspend(hdev->dev);
1063
1064         memcpy(out, rctx, sizeof(*rctx));
1065
1066         return 0;
1067 }
1068
1069 static int stm32_hash_import(struct ahash_request *req, const void *in)
1070 {
1071         struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1072         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1073         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1074         const u32 *preg = in;
1075         u32 reg;
1076         unsigned int i;
1077
1078         memcpy(rctx, in, sizeof(*rctx));
1079
1080         preg = rctx->hw_context;
1081
1082         pm_runtime_get_sync(hdev->dev);
1083
1084         if (!hdev->pdata->ux500)
1085                 stm32_hash_write(hdev, HASH_IMR, *preg++);
1086         stm32_hash_write(hdev, HASH_STR, *preg++);
1087         stm32_hash_write(hdev, HASH_CR, *preg);
1088         reg = *preg++ | HASH_CR_INIT;
1089         stm32_hash_write(hdev, HASH_CR, reg);
1090
1091         for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1092                 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1093
1094         pm_runtime_mark_last_busy(hdev->dev);
1095         pm_runtime_put_autosuspend(hdev->dev);
1096
1097         kfree(rctx->hw_context);
1098
1099         return 0;
1100 }
1101
1102 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1103                              const u8 *key, unsigned int keylen)
1104 {
1105         struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1106
1107         if (keylen <= HASH_MAX_KEY_SIZE) {
1108                 memcpy(ctx->key, key, keylen);
1109                 ctx->keylen = keylen;
1110         } else {
1111                 return -ENOMEM;
1112         }
1113
1114         return 0;
1115 }
1116
1117 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1118 {
1119         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1120         struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1121         const char *name = crypto_tfm_alg_name(tfm);
1122         struct crypto_shash *xtfm;
1123
1124         /* The fallback is only needed on Ux500 */
1125         if (!hdev->pdata->ux500)
1126                 return 0;
1127
1128         xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1129         if (IS_ERR(xtfm)) {
1130                 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1131                         name);
1132                 return PTR_ERR(xtfm);
1133         }
1134         dev_info(hdev->dev, "allocated %s fallback\n", name);
1135         ctx->xtfm = xtfm;
1136
1137         return 0;
1138 }
1139
1140 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1141                                     const char *algs_hmac_name)
1142 {
1143         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1144
1145         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1146                                  sizeof(struct stm32_hash_request_ctx));
1147
1148         ctx->keylen = 0;
1149
1150         if (algs_hmac_name)
1151                 ctx->flags |= HASH_FLAGS_HMAC;
1152
1153         ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1154         ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1155         ctx->enginectx.op.unprepare_request = NULL;
1156
1157         return stm32_hash_init_fallback(tfm);
1158 }
1159
1160 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1161 {
1162         return stm32_hash_cra_init_algs(tfm, NULL);
1163 }
1164
1165 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1166 {
1167         return stm32_hash_cra_init_algs(tfm, "md5");
1168 }
1169
1170 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1171 {
1172         return stm32_hash_cra_init_algs(tfm, "sha1");
1173 }
1174
1175 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1176 {
1177         return stm32_hash_cra_init_algs(tfm, "sha224");
1178 }
1179
1180 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1181 {
1182         return stm32_hash_cra_init_algs(tfm, "sha256");
1183 }
1184
1185 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1186 {
1187         struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1188
1189         if (ctx->xtfm)
1190                 crypto_free_shash(ctx->xtfm);
1191 }
1192
1193 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1194 {
1195         struct stm32_hash_dev *hdev = dev_id;
1196
1197         if (HASH_FLAGS_CPU & hdev->flags) {
1198                 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1199                         hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1200                         goto finish;
1201                 }
1202         } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1203                 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1204                         hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1205                                 goto finish;
1206                 }
1207         }
1208
1209         return IRQ_HANDLED;
1210
1211 finish:
1212         /* Finish current request */
1213         stm32_hash_finish_req(hdev->req, 0);
1214
1215         return IRQ_HANDLED;
1216 }
1217
1218 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1219 {
1220         struct stm32_hash_dev *hdev = dev_id;
1221         u32 reg;
1222
1223         reg = stm32_hash_read(hdev, HASH_SR);
1224         if (reg & HASH_SR_OUTPUT_READY) {
1225                 reg &= ~HASH_SR_OUTPUT_READY;
1226                 stm32_hash_write(hdev, HASH_SR, reg);
1227                 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1228                 /* Disable IT*/
1229                 stm32_hash_write(hdev, HASH_IMR, 0);
1230                 return IRQ_WAKE_THREAD;
1231         }
1232
1233         return IRQ_NONE;
1234 }
1235
1236 static struct ahash_alg algs_md5[] = {
1237         {
1238                 .init = stm32_hash_init,
1239                 .update = stm32_hash_update,
1240                 .final = stm32_hash_final,
1241                 .finup = stm32_hash_finup,
1242                 .digest = stm32_hash_digest,
1243                 .export = stm32_hash_export,
1244                 .import = stm32_hash_import,
1245                 .halg = {
1246                         .digestsize = MD5_DIGEST_SIZE,
1247                         .statesize = sizeof(struct stm32_hash_request_ctx),
1248                         .base = {
1249                                 .cra_name = "md5",
1250                                 .cra_driver_name = "stm32-md5",
1251                                 .cra_priority = 200,
1252                                 .cra_flags = CRYPTO_ALG_ASYNC |
1253                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1254                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1255                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1256                                 .cra_alignmask = 3,
1257                                 .cra_init = stm32_hash_cra_init,
1258                                 .cra_exit = stm32_hash_cra_exit,
1259                                 .cra_module = THIS_MODULE,
1260                         }
1261                 }
1262         },
1263         {
1264                 .init = stm32_hash_init,
1265                 .update = stm32_hash_update,
1266                 .final = stm32_hash_final,
1267                 .finup = stm32_hash_finup,
1268                 .digest = stm32_hash_digest,
1269                 .export = stm32_hash_export,
1270                 .import = stm32_hash_import,
1271                 .setkey = stm32_hash_setkey,
1272                 .halg = {
1273                         .digestsize = MD5_DIGEST_SIZE,
1274                         .statesize = sizeof(struct stm32_hash_request_ctx),
1275                         .base = {
1276                                 .cra_name = "hmac(md5)",
1277                                 .cra_driver_name = "stm32-hmac-md5",
1278                                 .cra_priority = 200,
1279                                 .cra_flags = CRYPTO_ALG_ASYNC |
1280                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1281                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1282                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1283                                 .cra_alignmask = 3,
1284                                 .cra_init = stm32_hash_cra_md5_init,
1285                                 .cra_exit = stm32_hash_cra_exit,
1286                                 .cra_module = THIS_MODULE,
1287                         }
1288                 }
1289         },
1290 };
1291
1292 static struct ahash_alg algs_sha1[] = {
1293         {
1294                 .init = stm32_hash_init,
1295                 .update = stm32_hash_update,
1296                 .final = stm32_hash_final,
1297                 .finup = stm32_hash_finup,
1298                 .digest = stm32_hash_digest,
1299                 .export = stm32_hash_export,
1300                 .import = stm32_hash_import,
1301                 .halg = {
1302                         .digestsize = SHA1_DIGEST_SIZE,
1303                         .statesize = sizeof(struct stm32_hash_request_ctx),
1304                         .base = {
1305                                 .cra_name = "sha1",
1306                                 .cra_driver_name = "stm32-sha1",
1307                                 .cra_priority = 200,
1308                                 .cra_flags = CRYPTO_ALG_ASYNC |
1309                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1310                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1311                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1312                                 .cra_alignmask = 3,
1313                                 .cra_init = stm32_hash_cra_init,
1314                                 .cra_exit = stm32_hash_cra_exit,
1315                                 .cra_module = THIS_MODULE,
1316                         }
1317                 }
1318         },
1319         {
1320                 .init = stm32_hash_init,
1321                 .update = stm32_hash_update,
1322                 .final = stm32_hash_final,
1323                 .finup = stm32_hash_finup,
1324                 .digest = stm32_hash_digest,
1325                 .export = stm32_hash_export,
1326                 .import = stm32_hash_import,
1327                 .setkey = stm32_hash_setkey,
1328                 .halg = {
1329                         .digestsize = SHA1_DIGEST_SIZE,
1330                         .statesize = sizeof(struct stm32_hash_request_ctx),
1331                         .base = {
1332                                 .cra_name = "hmac(sha1)",
1333                                 .cra_driver_name = "stm32-hmac-sha1",
1334                                 .cra_priority = 200,
1335                                 .cra_flags = CRYPTO_ALG_ASYNC |
1336                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1337                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1338                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1339                                 .cra_alignmask = 3,
1340                                 .cra_init = stm32_hash_cra_sha1_init,
1341                                 .cra_exit = stm32_hash_cra_exit,
1342                                 .cra_module = THIS_MODULE,
1343                         }
1344                 }
1345         },
1346 };
1347
1348 static struct ahash_alg algs_sha224[] = {
1349         {
1350                 .init = stm32_hash_init,
1351                 .update = stm32_hash_update,
1352                 .final = stm32_hash_final,
1353                 .finup = stm32_hash_finup,
1354                 .digest = stm32_hash_digest,
1355                 .export = stm32_hash_export,
1356                 .import = stm32_hash_import,
1357                 .halg = {
1358                         .digestsize = SHA224_DIGEST_SIZE,
1359                         .statesize = sizeof(struct stm32_hash_request_ctx),
1360                         .base = {
1361                                 .cra_name = "sha224",
1362                                 .cra_driver_name = "stm32-sha224",
1363                                 .cra_priority = 200,
1364                                 .cra_flags = CRYPTO_ALG_ASYNC |
1365                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1366                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1367                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1368                                 .cra_alignmask = 3,
1369                                 .cra_init = stm32_hash_cra_init,
1370                                 .cra_exit = stm32_hash_cra_exit,
1371                                 .cra_module = THIS_MODULE,
1372                         }
1373                 }
1374         },
1375         {
1376                 .init = stm32_hash_init,
1377                 .update = stm32_hash_update,
1378                 .final = stm32_hash_final,
1379                 .finup = stm32_hash_finup,
1380                 .digest = stm32_hash_digest,
1381                 .setkey = stm32_hash_setkey,
1382                 .export = stm32_hash_export,
1383                 .import = stm32_hash_import,
1384                 .halg = {
1385                         .digestsize = SHA224_DIGEST_SIZE,
1386                         .statesize = sizeof(struct stm32_hash_request_ctx),
1387                         .base = {
1388                                 .cra_name = "hmac(sha224)",
1389                                 .cra_driver_name = "stm32-hmac-sha224",
1390                                 .cra_priority = 200,
1391                                 .cra_flags = CRYPTO_ALG_ASYNC |
1392                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1393                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1394                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1395                                 .cra_alignmask = 3,
1396                                 .cra_init = stm32_hash_cra_sha224_init,
1397                                 .cra_exit = stm32_hash_cra_exit,
1398                                 .cra_module = THIS_MODULE,
1399                         }
1400                 }
1401         },
1402 };
1403
1404 static struct ahash_alg algs_sha256[] = {
1405         {
1406                 .init = stm32_hash_init,
1407                 .update = stm32_hash_update,
1408                 .final = stm32_hash_final,
1409                 .finup = stm32_hash_finup,
1410                 .digest = stm32_hash_digest,
1411                 .export = stm32_hash_export,
1412                 .import = stm32_hash_import,
1413                 .halg = {
1414                         .digestsize = SHA256_DIGEST_SIZE,
1415                         .statesize = sizeof(struct stm32_hash_request_ctx),
1416                         .base = {
1417                                 .cra_name = "sha256",
1418                                 .cra_driver_name = "stm32-sha256",
1419                                 .cra_priority = 200,
1420                                 .cra_flags = CRYPTO_ALG_ASYNC |
1421                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1422                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1423                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1424                                 .cra_alignmask = 3,
1425                                 .cra_init = stm32_hash_cra_init,
1426                                 .cra_exit = stm32_hash_cra_exit,
1427                                 .cra_module = THIS_MODULE,
1428                         }
1429                 }
1430         },
1431         {
1432                 .init = stm32_hash_init,
1433                 .update = stm32_hash_update,
1434                 .final = stm32_hash_final,
1435                 .finup = stm32_hash_finup,
1436                 .digest = stm32_hash_digest,
1437                 .export = stm32_hash_export,
1438                 .import = stm32_hash_import,
1439                 .setkey = stm32_hash_setkey,
1440                 .halg = {
1441                         .digestsize = SHA256_DIGEST_SIZE,
1442                         .statesize = sizeof(struct stm32_hash_request_ctx),
1443                         .base = {
1444                                 .cra_name = "hmac(sha256)",
1445                                 .cra_driver_name = "stm32-hmac-sha256",
1446                                 .cra_priority = 200,
1447                                 .cra_flags = CRYPTO_ALG_ASYNC |
1448                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1449                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1450                                 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1451                                 .cra_alignmask = 3,
1452                                 .cra_init = stm32_hash_cra_sha256_init,
1453                                 .cra_exit = stm32_hash_cra_exit,
1454                                 .cra_module = THIS_MODULE,
1455                         }
1456                 }
1457         },
1458 };
1459
1460 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1461 {
1462         unsigned int i, j;
1463         int err;
1464
1465         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1466                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1467                         err = crypto_register_ahash(
1468                                 &hdev->pdata->algs_info[i].algs_list[j]);
1469                         if (err)
1470                                 goto err_algs;
1471                 }
1472         }
1473
1474         return 0;
1475 err_algs:
1476         dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1477         for (; i--; ) {
1478                 for (; j--;)
1479                         crypto_unregister_ahash(
1480                                 &hdev->pdata->algs_info[i].algs_list[j]);
1481         }
1482
1483         return err;
1484 }
1485
1486 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1487 {
1488         unsigned int i, j;
1489
1490         for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1491                 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1492                         crypto_unregister_ahash(
1493                                 &hdev->pdata->algs_info[i].algs_list[j]);
1494         }
1495
1496         return 0;
1497 }
1498
1499 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1500         {
1501                 .algs_list      = algs_sha1,
1502                 .size           = ARRAY_SIZE(algs_sha1),
1503         },
1504         {
1505                 .algs_list      = algs_sha256,
1506                 .size           = ARRAY_SIZE(algs_sha256),
1507         },
1508 };
1509
1510 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1511         .algs_info      = stm32_hash_algs_info_ux500,
1512         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
1513         .broken_emptymsg = true,
1514         .ux500          = true,
1515 };
1516
1517 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1518         {
1519                 .algs_list      = algs_md5,
1520                 .size           = ARRAY_SIZE(algs_md5),
1521         },
1522         {
1523                 .algs_list      = algs_sha1,
1524                 .size           = ARRAY_SIZE(algs_sha1),
1525         },
1526 };
1527
1528 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1529         .algs_info      = stm32_hash_algs_info_stm32f4,
1530         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1531         .has_sr         = true,
1532         .has_mdmat      = true,
1533 };
1534
1535 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1536         {
1537                 .algs_list      = algs_md5,
1538                 .size           = ARRAY_SIZE(algs_md5),
1539         },
1540         {
1541                 .algs_list      = algs_sha1,
1542                 .size           = ARRAY_SIZE(algs_sha1),
1543         },
1544         {
1545                 .algs_list      = algs_sha224,
1546                 .size           = ARRAY_SIZE(algs_sha224),
1547         },
1548         {
1549                 .algs_list      = algs_sha256,
1550                 .size           = ARRAY_SIZE(algs_sha256),
1551         },
1552 };
1553
1554 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1555         .algs_info      = stm32_hash_algs_info_stm32f7,
1556         .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1557         .has_sr         = true,
1558         .has_mdmat      = true,
1559 };
1560
1561 static const struct of_device_id stm32_hash_of_match[] = {
1562         {
1563                 .compatible = "stericsson,ux500-hash",
1564                 .data = &stm32_hash_pdata_ux500,
1565         },
1566         {
1567                 .compatible = "st,stm32f456-hash",
1568                 .data = &stm32_hash_pdata_stm32f4,
1569         },
1570         {
1571                 .compatible = "st,stm32f756-hash",
1572                 .data = &stm32_hash_pdata_stm32f7,
1573         },
1574         {},
1575 };
1576
1577 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1578
1579 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1580                                    struct device *dev)
1581 {
1582         hdev->pdata = of_device_get_match_data(dev);
1583         if (!hdev->pdata) {
1584                 dev_err(dev, "no compatible OF match\n");
1585                 return -EINVAL;
1586         }
1587
1588         if (of_property_read_u32(dev->of_node, "dma-maxburst",
1589                                  &hdev->dma_maxburst)) {
1590                 dev_info(dev, "dma-maxburst not specified, using 0\n");
1591                 hdev->dma_maxburst = 0;
1592         }
1593
1594         return 0;
1595 }
1596
1597 static int stm32_hash_probe(struct platform_device *pdev)
1598 {
1599         struct stm32_hash_dev *hdev;
1600         struct device *dev = &pdev->dev;
1601         struct resource *res;
1602         int ret, irq;
1603
1604         hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1605         if (!hdev)
1606                 return -ENOMEM;
1607
1608         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1609         hdev->io_base = devm_ioremap_resource(dev, res);
1610         if (IS_ERR(hdev->io_base))
1611                 return PTR_ERR(hdev->io_base);
1612
1613         hdev->phys_base = res->start;
1614
1615         ret = stm32_hash_get_of_match(hdev, dev);
1616         if (ret)
1617                 return ret;
1618
1619         irq = platform_get_irq_optional(pdev, 0);
1620         if (irq < 0 && irq != -ENXIO)
1621                 return irq;
1622
1623         if (irq > 0) {
1624                 ret = devm_request_threaded_irq(dev, irq,
1625                                                 stm32_hash_irq_handler,
1626                                                 stm32_hash_irq_thread,
1627                                                 IRQF_ONESHOT,
1628                                                 dev_name(dev), hdev);
1629                 if (ret) {
1630                         dev_err(dev, "Cannot grab IRQ\n");
1631                         return ret;
1632                 }
1633         } else {
1634                 dev_info(dev, "No IRQ, use polling mode\n");
1635                 hdev->polled = true;
1636         }
1637
1638         hdev->clk = devm_clk_get(&pdev->dev, NULL);
1639         if (IS_ERR(hdev->clk))
1640                 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1641                                      "failed to get clock for hash\n");
1642
1643         ret = clk_prepare_enable(hdev->clk);
1644         if (ret) {
1645                 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1646                 return ret;
1647         }
1648
1649         pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1650         pm_runtime_use_autosuspend(dev);
1651
1652         pm_runtime_get_noresume(dev);
1653         pm_runtime_set_active(dev);
1654         pm_runtime_enable(dev);
1655
1656         hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1657         if (IS_ERR(hdev->rst)) {
1658                 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1659                         ret = -EPROBE_DEFER;
1660                         goto err_reset;
1661                 }
1662         } else {
1663                 reset_control_assert(hdev->rst);
1664                 udelay(2);
1665                 reset_control_deassert(hdev->rst);
1666         }
1667
1668         hdev->dev = dev;
1669
1670         platform_set_drvdata(pdev, hdev);
1671
1672         ret = stm32_hash_dma_init(hdev);
1673         switch (ret) {
1674         case 0:
1675                 break;
1676         case -ENOENT:
1677         case -ENODEV:
1678                 dev_info(dev, "DMA mode not available\n");
1679                 break;
1680         default:
1681                 dev_err(dev, "DMA init error %d\n", ret);
1682                 goto err_dma;
1683         }
1684
1685         spin_lock(&stm32_hash.lock);
1686         list_add_tail(&hdev->list, &stm32_hash.dev_list);
1687         spin_unlock(&stm32_hash.lock);
1688
1689         /* Initialize crypto engine */
1690         hdev->engine = crypto_engine_alloc_init(dev, 1);
1691         if (!hdev->engine) {
1692                 ret = -ENOMEM;
1693                 goto err_engine;
1694         }
1695
1696         ret = crypto_engine_start(hdev->engine);
1697         if (ret)
1698                 goto err_engine_start;
1699
1700         if (hdev->pdata->ux500)
1701                 /* FIXME: implement DMA mode for Ux500 */
1702                 hdev->dma_mode = 0;
1703         else
1704                 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1705
1706         /* Register algos */
1707         ret = stm32_hash_register_algs(hdev);
1708         if (ret)
1709                 goto err_algs;
1710
1711         dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1712                  stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1713
1714         pm_runtime_put_sync(dev);
1715
1716         return 0;
1717
1718 err_algs:
1719 err_engine_start:
1720         crypto_engine_exit(hdev->engine);
1721 err_engine:
1722         spin_lock(&stm32_hash.lock);
1723         list_del(&hdev->list);
1724         spin_unlock(&stm32_hash.lock);
1725 err_dma:
1726         if (hdev->dma_lch)
1727                 dma_release_channel(hdev->dma_lch);
1728 err_reset:
1729         pm_runtime_disable(dev);
1730         pm_runtime_put_noidle(dev);
1731
1732         clk_disable_unprepare(hdev->clk);
1733
1734         return ret;
1735 }
1736
1737 static int stm32_hash_remove(struct platform_device *pdev)
1738 {
1739         struct stm32_hash_dev *hdev;
1740         int ret;
1741
1742         hdev = platform_get_drvdata(pdev);
1743         if (!hdev)
1744                 return -ENODEV;
1745
1746         ret = pm_runtime_resume_and_get(hdev->dev);
1747         if (ret < 0)
1748                 return ret;
1749
1750         stm32_hash_unregister_algs(hdev);
1751
1752         crypto_engine_exit(hdev->engine);
1753
1754         spin_lock(&stm32_hash.lock);
1755         list_del(&hdev->list);
1756         spin_unlock(&stm32_hash.lock);
1757
1758         if (hdev->dma_lch)
1759                 dma_release_channel(hdev->dma_lch);
1760
1761         pm_runtime_disable(hdev->dev);
1762         pm_runtime_put_noidle(hdev->dev);
1763
1764         clk_disable_unprepare(hdev->clk);
1765
1766         return 0;
1767 }
1768
1769 #ifdef CONFIG_PM
1770 static int stm32_hash_runtime_suspend(struct device *dev)
1771 {
1772         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1773
1774         clk_disable_unprepare(hdev->clk);
1775
1776         return 0;
1777 }
1778
1779 static int stm32_hash_runtime_resume(struct device *dev)
1780 {
1781         struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1782         int ret;
1783
1784         ret = clk_prepare_enable(hdev->clk);
1785         if (ret) {
1786                 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1787                 return ret;
1788         }
1789
1790         return 0;
1791 }
1792 #endif
1793
1794 static const struct dev_pm_ops stm32_hash_pm_ops = {
1795         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1796                                 pm_runtime_force_resume)
1797         SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1798                            stm32_hash_runtime_resume, NULL)
1799 };
1800
1801 static struct platform_driver stm32_hash_driver = {
1802         .probe          = stm32_hash_probe,
1803         .remove         = stm32_hash_remove,
1804         .driver         = {
1805                 .name   = "stm32-hash",
1806                 .pm = &stm32_hash_pm_ops,
1807                 .of_match_table = stm32_hash_of_match,
1808         }
1809 };
1810
1811 module_platform_driver(stm32_hash_driver);
1812
1813 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1814 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1815 MODULE_LICENSE("GPL v2");