1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file is part of STM32 Crypto driver for Linux.
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
23 #include <crypto/engine.h>
24 #include <crypto/hash.h>
25 #include <crypto/md5.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/sha1.h>
28 #include <crypto/sha2.h>
29 #include <crypto/internal/hash.h>
34 #define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04))
37 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
38 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
39 #define HASH_HWCFGR 0x3F0
40 #define HASH_VER 0x3F4
43 /* Control Register */
44 #define HASH_CR_INIT BIT(2)
45 #define HASH_CR_DMAE BIT(3)
46 #define HASH_CR_DATATYPE_POS 4
47 #define HASH_CR_MODE BIT(6)
48 #define HASH_CR_MDMAT BIT(13)
49 #define HASH_CR_DMAA BIT(14)
50 #define HASH_CR_LKEY BIT(16)
52 #define HASH_CR_ALGO_SHA1 0x0
53 #define HASH_CR_ALGO_MD5 0x80
54 #define HASH_CR_ALGO_SHA224 0x40000
55 #define HASH_CR_ALGO_SHA256 0x40080
57 #define HASH_CR_UX500_EMPTYMSG BIT(20)
58 #define HASH_CR_UX500_ALGO_SHA1 BIT(7)
59 #define HASH_CR_UX500_ALGO_SHA256 0x0
62 #define HASH_DINIE BIT(0)
63 #define HASH_DCIE BIT(1)
66 #define HASH_MASK_CALC_COMPLETION BIT(0)
67 #define HASH_MASK_DATA_INPUT BIT(1)
69 /* Context swap register */
70 #define HASH_CSR_REGISTER_NUMBER 54
73 #define HASH_SR_DATA_INPUT_READY BIT(0)
74 #define HASH_SR_OUTPUT_READY BIT(1)
75 #define HASH_SR_DMA_ACTIVE BIT(2)
76 #define HASH_SR_BUSY BIT(3)
79 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
80 #define HASH_STR_DCAL BIT(8)
82 #define HASH_FLAGS_INIT BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY BIT(1)
84 #define HASH_FLAGS_CPU BIT(2)
85 #define HASH_FLAGS_DMA_READY BIT(3)
86 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
87 #define HASH_FLAGS_HMAC_INIT BIT(5)
88 #define HASH_FLAGS_HMAC_FINAL BIT(6)
89 #define HASH_FLAGS_HMAC_KEY BIT(7)
91 #define HASH_FLAGS_FINAL BIT(15)
92 #define HASH_FLAGS_FINUP BIT(16)
93 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
94 #define HASH_FLAGS_MD5 BIT(18)
95 #define HASH_FLAGS_SHA1 BIT(19)
96 #define HASH_FLAGS_SHA224 BIT(20)
97 #define HASH_FLAGS_SHA256 BIT(21)
98 #define HASH_FLAGS_ERRORS BIT(22)
99 #define HASH_FLAGS_HMAC BIT(23)
101 #define HASH_OP_UPDATE 1
102 #define HASH_OP_FINAL 2
104 enum stm32_hash_data_format {
105 HASH_DATA_32_BITS = 0x0,
106 HASH_DATA_16_BITS = 0x1,
107 HASH_DATA_8_BITS = 0x2,
108 HASH_DATA_1_BIT = 0x3
111 #define HASH_BUFLEN 256
112 #define HASH_LONG_KEY 64
113 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
114 #define HASH_QUEUE_LENGTH 16
115 #define HASH_DMA_THRESHOLD 50
117 #define HASH_AUTOSUSPEND_DELAY 50
119 struct stm32_hash_ctx {
120 struct crypto_engine_ctx enginectx;
121 struct stm32_hash_dev *hdev;
122 struct crypto_shash *xtfm;
125 u8 key[HASH_MAX_KEY_SIZE];
129 struct stm32_hash_request_ctx {
130 struct stm32_hash_dev *hdev;
134 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
140 struct scatterlist *sg;
143 struct scatterlist sg_key;
151 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
157 struct stm32_hash_algs_info {
158 struct ahash_alg *algs_list;
162 struct stm32_hash_pdata {
163 struct stm32_hash_algs_info *algs_info;
164 size_t algs_info_size;
167 bool broken_emptymsg;
171 struct stm32_hash_dev {
172 struct list_head list;
175 struct reset_control *rst;
176 void __iomem *io_base;
177 phys_addr_t phys_base;
182 struct ahash_request *req;
183 struct crypto_engine *engine;
187 struct dma_chan *dma_lch;
188 struct completion dma_completion;
190 const struct stm32_hash_pdata *pdata;
193 struct stm32_hash_drv {
194 struct list_head dev_list;
195 spinlock_t lock; /* List protection access */
198 static struct stm32_hash_drv stm32_hash = {
199 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
200 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
203 static void stm32_hash_dma_callback(void *param);
205 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
207 return readl_relaxed(hdev->io_base + offset);
210 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
211 u32 offset, u32 value)
213 writel_relaxed(value, hdev->io_base + offset);
216 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
220 /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
221 if (!hdev->pdata->has_sr)
222 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
223 !(status & HASH_STR_DCAL), 10, 10000);
225 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
226 !(status & HASH_SR_BUSY), 10, 10000);
229 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
233 reg = stm32_hash_read(hdev, HASH_STR);
234 reg &= ~(HASH_STR_NBLW_MASK);
235 reg |= (8U * ((length) % 4U));
236 stm32_hash_write(hdev, HASH_STR, reg);
239 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
241 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
242 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
244 int keylen = ctx->keylen;
245 void *key = ctx->key;
248 stm32_hash_set_nblw(hdev, keylen);
251 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
256 reg = stm32_hash_read(hdev, HASH_STR);
257 reg |= HASH_STR_DCAL;
258 stm32_hash_write(hdev, HASH_STR, reg);
266 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
268 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
269 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
270 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
272 u32 reg = HASH_CR_INIT;
274 if (!(hdev->flags & HASH_FLAGS_INIT)) {
275 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
277 reg |= HASH_CR_ALGO_MD5;
279 case HASH_FLAGS_SHA1:
280 if (hdev->pdata->ux500)
281 reg |= HASH_CR_UX500_ALGO_SHA1;
283 reg |= HASH_CR_ALGO_SHA1;
285 case HASH_FLAGS_SHA224:
286 reg |= HASH_CR_ALGO_SHA224;
288 case HASH_FLAGS_SHA256:
289 if (hdev->pdata->ux500)
290 reg |= HASH_CR_UX500_ALGO_SHA256;
292 reg |= HASH_CR_ALGO_SHA256;
295 reg |= HASH_CR_ALGO_MD5;
298 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
300 if (rctx->flags & HASH_FLAGS_HMAC) {
301 hdev->flags |= HASH_FLAGS_HMAC;
303 if (ctx->keylen > HASH_LONG_KEY)
308 * On the Ux500 we need to set a special flag to indicate that
309 * the message is zero length.
311 if (hdev->pdata->ux500 && bufcnt == 0)
312 reg |= HASH_CR_UX500_EMPTYMSG;
315 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
317 stm32_hash_write(hdev, HASH_CR, reg);
319 hdev->flags |= HASH_FLAGS_INIT;
321 dev_dbg(hdev->dev, "Write Control %x\n", reg);
325 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
329 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
330 count = min(rctx->sg->length - rctx->offset, rctx->total);
331 count = min(count, rctx->buflen - rctx->bufcnt);
334 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
335 rctx->sg = sg_next(rctx->sg);
342 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
343 rctx->offset, count, 0);
345 rctx->bufcnt += count;
346 rctx->offset += count;
347 rctx->total -= count;
349 if (rctx->offset == rctx->sg->length) {
350 rctx->sg = sg_next(rctx->sg);
359 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
360 const u8 *buf, size_t length, int final)
362 unsigned int count, len32;
363 const u32 *buffer = (const u32 *)buf;
367 hdev->flags |= HASH_FLAGS_FINAL;
369 len32 = DIV_ROUND_UP(length, sizeof(u32));
371 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
372 __func__, length, final, len32);
374 hdev->flags |= HASH_FLAGS_CPU;
376 stm32_hash_write_ctrl(hdev, length);
378 if (stm32_hash_wait_busy(hdev))
381 if ((hdev->flags & HASH_FLAGS_HMAC) &&
382 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
383 hdev->flags |= HASH_FLAGS_HMAC_KEY;
384 stm32_hash_write_key(hdev);
385 if (stm32_hash_wait_busy(hdev))
389 for (count = 0; count < len32; count++)
390 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
393 if (stm32_hash_wait_busy(hdev))
396 stm32_hash_set_nblw(hdev, length);
397 reg = stm32_hash_read(hdev, HASH_STR);
398 reg |= HASH_STR_DCAL;
399 stm32_hash_write(hdev, HASH_STR, reg);
400 if (hdev->flags & HASH_FLAGS_HMAC) {
401 if (stm32_hash_wait_busy(hdev))
403 stm32_hash_write_key(hdev);
411 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
413 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
414 int bufcnt, err = 0, final;
416 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
418 final = rctx->flags & HASH_FLAGS_FINAL;
420 while ((rctx->total >= rctx->buflen) ||
421 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
422 stm32_hash_append_sg(rctx);
423 bufcnt = rctx->bufcnt;
425 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
430 stm32_hash_append_sg(rctx);
433 bufcnt = rctx->bufcnt;
435 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
441 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
442 struct scatterlist *sg, int length, int mdma)
444 struct dma_async_tx_descriptor *in_desc;
449 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
450 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
453 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
457 reinit_completion(&hdev->dma_completion);
458 in_desc->callback = stm32_hash_dma_callback;
459 in_desc->callback_param = hdev;
461 hdev->flags |= HASH_FLAGS_FINAL;
462 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
464 reg = stm32_hash_read(hdev, HASH_CR);
466 if (!hdev->pdata->has_mdmat) {
468 reg |= HASH_CR_MDMAT;
470 reg &= ~HASH_CR_MDMAT;
474 stm32_hash_write(hdev, HASH_CR, reg);
476 stm32_hash_set_nblw(hdev, length);
478 cookie = dmaengine_submit(in_desc);
479 err = dma_submit_error(cookie);
483 dma_async_issue_pending(hdev->dma_lch);
485 if (!wait_for_completion_timeout(&hdev->dma_completion,
486 msecs_to_jiffies(100)))
489 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
490 NULL, NULL) != DMA_COMPLETE)
494 dev_err(hdev->dev, "DMA Error %i\n", err);
495 dmaengine_terminate_all(hdev->dma_lch);
502 static void stm32_hash_dma_callback(void *param)
504 struct stm32_hash_dev *hdev = param;
506 complete(&hdev->dma_completion);
508 hdev->flags |= HASH_FLAGS_DMA_READY;
511 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
513 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
515 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
518 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
519 err = stm32_hash_write_key(hdev);
520 if (stm32_hash_wait_busy(hdev))
523 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
524 sg_init_one(&rctx->sg_key, ctx->key,
525 ALIGN(ctx->keylen, sizeof(u32)));
527 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
529 if (rctx->dma_ct == 0) {
530 dev_err(hdev->dev, "dma_map_sg error\n");
534 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
536 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
542 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
544 struct dma_slave_config dma_conf;
545 struct dma_chan *chan;
548 memset(&dma_conf, 0, sizeof(dma_conf));
550 dma_conf.direction = DMA_MEM_TO_DEV;
551 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
552 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
553 dma_conf.src_maxburst = hdev->dma_maxburst;
554 dma_conf.dst_maxburst = hdev->dma_maxburst;
555 dma_conf.device_fc = false;
557 chan = dma_request_chan(hdev->dev, "in");
559 return PTR_ERR(chan);
561 hdev->dma_lch = chan;
563 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
565 dma_release_channel(hdev->dma_lch);
566 hdev->dma_lch = NULL;
567 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
571 init_completion(&hdev->dma_completion);
576 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
578 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
579 struct scatterlist sg[1], *tsg;
580 int err = 0, len = 0, reg, ncp = 0;
582 u32 *buffer = (void *)rctx->buffer;
584 rctx->sg = hdev->req->src;
585 rctx->total = hdev->req->nbytes;
587 rctx->nents = sg_nents(rctx->sg);
592 stm32_hash_write_ctrl(hdev, rctx->total);
594 if (hdev->flags & HASH_FLAGS_HMAC) {
595 err = stm32_hash_hmac_dma_send(hdev);
596 if (err != -EINPROGRESS)
600 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
604 if (sg_is_last(sg)) {
605 if (hdev->dma_mode == 1) {
606 len = (ALIGN(sg->length, 16) - 16);
608 ncp = sg_pcopy_to_buffer(
609 rctx->sg, rctx->nents,
610 rctx->buffer, sg->length - len,
611 rctx->total - sg->length + len);
615 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
617 sg->length = ALIGN(sg->length,
623 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
625 if (rctx->dma_ct == 0) {
626 dev_err(hdev->dev, "dma_map_sg error\n");
630 err = stm32_hash_xmit_dma(hdev, sg, len,
633 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
639 if (hdev->dma_mode == 1) {
640 if (stm32_hash_wait_busy(hdev))
642 reg = stm32_hash_read(hdev, HASH_CR);
643 reg &= ~HASH_CR_DMAE;
645 stm32_hash_write(hdev, HASH_CR, reg);
648 memset(buffer + ncp, 0,
649 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
650 writesl(hdev->io_base + HASH_DIN, buffer,
651 DIV_ROUND_UP(ncp, sizeof(u32)));
653 stm32_hash_set_nblw(hdev, ncp);
654 reg = stm32_hash_read(hdev, HASH_STR);
655 reg |= HASH_STR_DCAL;
656 stm32_hash_write(hdev, HASH_STR, reg);
660 if (hdev->flags & HASH_FLAGS_HMAC) {
661 if (stm32_hash_wait_busy(hdev))
663 err = stm32_hash_hmac_dma_send(hdev);
669 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
671 struct stm32_hash_dev *hdev = NULL, *tmp;
673 spin_lock_bh(&stm32_hash.lock);
675 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
684 spin_unlock_bh(&stm32_hash.lock);
689 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
691 struct scatterlist *sg;
692 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
693 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
696 if (req->nbytes <= HASH_DMA_THRESHOLD)
699 if (sg_nents(req->src) > 1) {
700 if (hdev->dma_mode == 1)
702 for_each_sg(req->src, sg, sg_nents(req->src), i) {
703 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
709 if (req->src->offset % 4)
715 static int stm32_hash_init(struct ahash_request *req)
717 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
718 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
719 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
720 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
724 rctx->flags = HASH_FLAGS_CPU;
726 rctx->digcnt = crypto_ahash_digestsize(tfm);
727 switch (rctx->digcnt) {
728 case MD5_DIGEST_SIZE:
729 rctx->flags |= HASH_FLAGS_MD5;
731 case SHA1_DIGEST_SIZE:
732 rctx->flags |= HASH_FLAGS_SHA1;
734 case SHA224_DIGEST_SIZE:
735 rctx->flags |= HASH_FLAGS_SHA224;
737 case SHA256_DIGEST_SIZE:
738 rctx->flags |= HASH_FLAGS_SHA256;
745 rctx->buflen = HASH_BUFLEN;
748 rctx->data_type = HASH_DATA_8_BITS;
750 memset(rctx->buffer, 0, HASH_BUFLEN);
752 if (ctx->flags & HASH_FLAGS_HMAC)
753 rctx->flags |= HASH_FLAGS_HMAC;
755 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
760 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
762 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
764 if (!(rctx->flags & HASH_FLAGS_CPU))
765 return stm32_hash_dma_send(hdev);
767 return stm32_hash_update_cpu(hdev);
770 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
772 struct ahash_request *req = hdev->req;
773 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
774 int buflen = rctx->bufcnt;
776 if (rctx->flags & HASH_FLAGS_FINUP)
777 return stm32_hash_update_req(hdev);
781 return stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
784 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
786 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
787 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
788 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
789 struct stm32_hash_dev *hdev = rctx->hdev;
792 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
796 dev_err(hdev->dev, "no fallback engine\n");
801 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
803 dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
808 ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
810 dev_err(hdev->dev, "shash digest error\n");
813 static void stm32_hash_copy_hash(struct ahash_request *req)
815 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
816 struct stm32_hash_dev *hdev = rctx->hdev;
817 __be32 *hash = (void *)rctx->digest;
818 unsigned int i, hashsize;
820 if (hdev->pdata->broken_emptymsg && !req->nbytes)
821 return stm32_hash_emptymsg_fallback(req);
823 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
825 hashsize = MD5_DIGEST_SIZE;
827 case HASH_FLAGS_SHA1:
828 hashsize = SHA1_DIGEST_SIZE;
830 case HASH_FLAGS_SHA224:
831 hashsize = SHA224_DIGEST_SIZE;
833 case HASH_FLAGS_SHA256:
834 hashsize = SHA256_DIGEST_SIZE;
840 for (i = 0; i < hashsize / sizeof(u32); i++) {
841 if (hdev->pdata->ux500)
842 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
843 HASH_UX500_HREG(i)));
845 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
850 static int stm32_hash_finish(struct ahash_request *req)
852 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
857 memcpy(req->result, rctx->digest, rctx->digcnt);
862 static void stm32_hash_finish_req(struct ahash_request *req, int err)
864 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
865 struct stm32_hash_dev *hdev = rctx->hdev;
867 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
868 stm32_hash_copy_hash(req);
869 err = stm32_hash_finish(req);
870 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
871 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
872 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
873 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
874 HASH_FLAGS_HMAC_KEY);
876 rctx->flags |= HASH_FLAGS_ERRORS;
879 pm_runtime_mark_last_busy(hdev->dev);
880 pm_runtime_put_autosuspend(hdev->dev);
882 crypto_finalize_hash_request(hdev->engine, req, err);
885 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
886 struct stm32_hash_request_ctx *rctx)
888 pm_runtime_get_sync(hdev->dev);
890 if (!(HASH_FLAGS_INIT & hdev->flags)) {
891 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
892 stm32_hash_write(hdev, HASH_STR, 0);
893 stm32_hash_write(hdev, HASH_DIN, 0);
894 stm32_hash_write(hdev, HASH_IMR, 0);
900 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
901 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
903 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
904 struct ahash_request *req)
906 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
909 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
911 struct ahash_request *req = container_of(areq, struct ahash_request,
913 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
914 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
915 struct stm32_hash_request_ctx *rctx;
922 rctx = ahash_request_ctx(req);
924 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
925 rctx->op, req->nbytes);
927 return stm32_hash_hw_init(hdev, rctx);
930 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
932 struct ahash_request *req = container_of(areq, struct ahash_request,
934 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
935 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
936 struct stm32_hash_request_ctx *rctx;
944 rctx = ahash_request_ctx(req);
946 if (rctx->op == HASH_OP_UPDATE)
947 err = stm32_hash_update_req(hdev);
948 else if (rctx->op == HASH_OP_FINAL)
949 err = stm32_hash_final_req(hdev);
951 /* If we have an IRQ, wait for that, else poll for completion */
952 if (err == -EINPROGRESS && hdev->polled) {
953 if (stm32_hash_wait_busy(hdev))
956 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
961 if (err != -EINPROGRESS)
962 /* done task will not finish it, so do it here */
963 stm32_hash_finish_req(req, err);
968 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
970 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
971 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
972 struct stm32_hash_dev *hdev = ctx->hdev;
976 return stm32_hash_handle_queue(hdev, req);
979 static int stm32_hash_update(struct ahash_request *req)
981 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
986 rctx->total = req->nbytes;
990 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
991 stm32_hash_append_sg(rctx);
995 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
998 static int stm32_hash_final(struct ahash_request *req)
1000 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1002 rctx->flags |= HASH_FLAGS_FINAL;
1004 return stm32_hash_enqueue(req, HASH_OP_FINAL);
1007 static int stm32_hash_finup(struct ahash_request *req)
1009 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1010 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1011 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1016 rctx->flags |= HASH_FLAGS_FINUP;
1017 rctx->total = req->nbytes;
1018 rctx->sg = req->src;
1021 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1022 rctx->flags &= ~HASH_FLAGS_CPU;
1025 return stm32_hash_final(req);
1028 static int stm32_hash_digest(struct ahash_request *req)
1030 return stm32_hash_init(req) ?: stm32_hash_finup(req);
1033 static int stm32_hash_export(struct ahash_request *req, void *out)
1035 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1036 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1037 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1042 pm_runtime_get_sync(hdev->dev);
1044 ret = stm32_hash_wait_busy(hdev);
1048 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
1052 preg = rctx->hw_context;
1054 if (!hdev->pdata->ux500)
1055 *preg++ = stm32_hash_read(hdev, HASH_IMR);
1056 *preg++ = stm32_hash_read(hdev, HASH_STR);
1057 *preg++ = stm32_hash_read(hdev, HASH_CR);
1058 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1059 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1061 pm_runtime_mark_last_busy(hdev->dev);
1062 pm_runtime_put_autosuspend(hdev->dev);
1064 memcpy(out, rctx, sizeof(*rctx));
1069 static int stm32_hash_import(struct ahash_request *req, const void *in)
1071 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1072 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1073 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1074 const u32 *preg = in;
1078 memcpy(rctx, in, sizeof(*rctx));
1080 preg = rctx->hw_context;
1082 pm_runtime_get_sync(hdev->dev);
1084 if (!hdev->pdata->ux500)
1085 stm32_hash_write(hdev, HASH_IMR, *preg++);
1086 stm32_hash_write(hdev, HASH_STR, *preg++);
1087 stm32_hash_write(hdev, HASH_CR, *preg);
1088 reg = *preg++ | HASH_CR_INIT;
1089 stm32_hash_write(hdev, HASH_CR, reg);
1091 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1092 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1094 pm_runtime_mark_last_busy(hdev->dev);
1095 pm_runtime_put_autosuspend(hdev->dev);
1097 kfree(rctx->hw_context);
1102 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1103 const u8 *key, unsigned int keylen)
1105 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1107 if (keylen <= HASH_MAX_KEY_SIZE) {
1108 memcpy(ctx->key, key, keylen);
1109 ctx->keylen = keylen;
1117 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1119 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1120 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1121 const char *name = crypto_tfm_alg_name(tfm);
1122 struct crypto_shash *xtfm;
1124 /* The fallback is only needed on Ux500 */
1125 if (!hdev->pdata->ux500)
1128 xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1130 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1132 return PTR_ERR(xtfm);
1134 dev_info(hdev->dev, "allocated %s fallback\n", name);
1140 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1141 const char *algs_hmac_name)
1143 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1145 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1146 sizeof(struct stm32_hash_request_ctx));
1151 ctx->flags |= HASH_FLAGS_HMAC;
1153 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1154 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1155 ctx->enginectx.op.unprepare_request = NULL;
1157 return stm32_hash_init_fallback(tfm);
1160 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1162 return stm32_hash_cra_init_algs(tfm, NULL);
1165 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1167 return stm32_hash_cra_init_algs(tfm, "md5");
1170 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1172 return stm32_hash_cra_init_algs(tfm, "sha1");
1175 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1177 return stm32_hash_cra_init_algs(tfm, "sha224");
1180 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1182 return stm32_hash_cra_init_algs(tfm, "sha256");
1185 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1187 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1190 crypto_free_shash(ctx->xtfm);
1193 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1195 struct stm32_hash_dev *hdev = dev_id;
1197 if (HASH_FLAGS_CPU & hdev->flags) {
1198 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1199 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1202 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1203 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1204 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1212 /* Finish current request */
1213 stm32_hash_finish_req(hdev->req, 0);
1218 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1220 struct stm32_hash_dev *hdev = dev_id;
1223 reg = stm32_hash_read(hdev, HASH_SR);
1224 if (reg & HASH_SR_OUTPUT_READY) {
1225 reg &= ~HASH_SR_OUTPUT_READY;
1226 stm32_hash_write(hdev, HASH_SR, reg);
1227 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1229 stm32_hash_write(hdev, HASH_IMR, 0);
1230 return IRQ_WAKE_THREAD;
1236 static struct ahash_alg algs_md5[] = {
1238 .init = stm32_hash_init,
1239 .update = stm32_hash_update,
1240 .final = stm32_hash_final,
1241 .finup = stm32_hash_finup,
1242 .digest = stm32_hash_digest,
1243 .export = stm32_hash_export,
1244 .import = stm32_hash_import,
1246 .digestsize = MD5_DIGEST_SIZE,
1247 .statesize = sizeof(struct stm32_hash_request_ctx),
1250 .cra_driver_name = "stm32-md5",
1251 .cra_priority = 200,
1252 .cra_flags = CRYPTO_ALG_ASYNC |
1253 CRYPTO_ALG_KERN_DRIVER_ONLY,
1254 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1257 .cra_init = stm32_hash_cra_init,
1258 .cra_exit = stm32_hash_cra_exit,
1259 .cra_module = THIS_MODULE,
1264 .init = stm32_hash_init,
1265 .update = stm32_hash_update,
1266 .final = stm32_hash_final,
1267 .finup = stm32_hash_finup,
1268 .digest = stm32_hash_digest,
1269 .export = stm32_hash_export,
1270 .import = stm32_hash_import,
1271 .setkey = stm32_hash_setkey,
1273 .digestsize = MD5_DIGEST_SIZE,
1274 .statesize = sizeof(struct stm32_hash_request_ctx),
1276 .cra_name = "hmac(md5)",
1277 .cra_driver_name = "stm32-hmac-md5",
1278 .cra_priority = 200,
1279 .cra_flags = CRYPTO_ALG_ASYNC |
1280 CRYPTO_ALG_KERN_DRIVER_ONLY,
1281 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1282 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1284 .cra_init = stm32_hash_cra_md5_init,
1285 .cra_exit = stm32_hash_cra_exit,
1286 .cra_module = THIS_MODULE,
1292 static struct ahash_alg algs_sha1[] = {
1294 .init = stm32_hash_init,
1295 .update = stm32_hash_update,
1296 .final = stm32_hash_final,
1297 .finup = stm32_hash_finup,
1298 .digest = stm32_hash_digest,
1299 .export = stm32_hash_export,
1300 .import = stm32_hash_import,
1302 .digestsize = SHA1_DIGEST_SIZE,
1303 .statesize = sizeof(struct stm32_hash_request_ctx),
1306 .cra_driver_name = "stm32-sha1",
1307 .cra_priority = 200,
1308 .cra_flags = CRYPTO_ALG_ASYNC |
1309 CRYPTO_ALG_KERN_DRIVER_ONLY,
1310 .cra_blocksize = SHA1_BLOCK_SIZE,
1311 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1313 .cra_init = stm32_hash_cra_init,
1314 .cra_exit = stm32_hash_cra_exit,
1315 .cra_module = THIS_MODULE,
1320 .init = stm32_hash_init,
1321 .update = stm32_hash_update,
1322 .final = stm32_hash_final,
1323 .finup = stm32_hash_finup,
1324 .digest = stm32_hash_digest,
1325 .export = stm32_hash_export,
1326 .import = stm32_hash_import,
1327 .setkey = stm32_hash_setkey,
1329 .digestsize = SHA1_DIGEST_SIZE,
1330 .statesize = sizeof(struct stm32_hash_request_ctx),
1332 .cra_name = "hmac(sha1)",
1333 .cra_driver_name = "stm32-hmac-sha1",
1334 .cra_priority = 200,
1335 .cra_flags = CRYPTO_ALG_ASYNC |
1336 CRYPTO_ALG_KERN_DRIVER_ONLY,
1337 .cra_blocksize = SHA1_BLOCK_SIZE,
1338 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1340 .cra_init = stm32_hash_cra_sha1_init,
1341 .cra_exit = stm32_hash_cra_exit,
1342 .cra_module = THIS_MODULE,
1348 static struct ahash_alg algs_sha224[] = {
1350 .init = stm32_hash_init,
1351 .update = stm32_hash_update,
1352 .final = stm32_hash_final,
1353 .finup = stm32_hash_finup,
1354 .digest = stm32_hash_digest,
1355 .export = stm32_hash_export,
1356 .import = stm32_hash_import,
1358 .digestsize = SHA224_DIGEST_SIZE,
1359 .statesize = sizeof(struct stm32_hash_request_ctx),
1361 .cra_name = "sha224",
1362 .cra_driver_name = "stm32-sha224",
1363 .cra_priority = 200,
1364 .cra_flags = CRYPTO_ALG_ASYNC |
1365 CRYPTO_ALG_KERN_DRIVER_ONLY,
1366 .cra_blocksize = SHA224_BLOCK_SIZE,
1367 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1369 .cra_init = stm32_hash_cra_init,
1370 .cra_exit = stm32_hash_cra_exit,
1371 .cra_module = THIS_MODULE,
1376 .init = stm32_hash_init,
1377 .update = stm32_hash_update,
1378 .final = stm32_hash_final,
1379 .finup = stm32_hash_finup,
1380 .digest = stm32_hash_digest,
1381 .setkey = stm32_hash_setkey,
1382 .export = stm32_hash_export,
1383 .import = stm32_hash_import,
1385 .digestsize = SHA224_DIGEST_SIZE,
1386 .statesize = sizeof(struct stm32_hash_request_ctx),
1388 .cra_name = "hmac(sha224)",
1389 .cra_driver_name = "stm32-hmac-sha224",
1390 .cra_priority = 200,
1391 .cra_flags = CRYPTO_ALG_ASYNC |
1392 CRYPTO_ALG_KERN_DRIVER_ONLY,
1393 .cra_blocksize = SHA224_BLOCK_SIZE,
1394 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1396 .cra_init = stm32_hash_cra_sha224_init,
1397 .cra_exit = stm32_hash_cra_exit,
1398 .cra_module = THIS_MODULE,
1404 static struct ahash_alg algs_sha256[] = {
1406 .init = stm32_hash_init,
1407 .update = stm32_hash_update,
1408 .final = stm32_hash_final,
1409 .finup = stm32_hash_finup,
1410 .digest = stm32_hash_digest,
1411 .export = stm32_hash_export,
1412 .import = stm32_hash_import,
1414 .digestsize = SHA256_DIGEST_SIZE,
1415 .statesize = sizeof(struct stm32_hash_request_ctx),
1417 .cra_name = "sha256",
1418 .cra_driver_name = "stm32-sha256",
1419 .cra_priority = 200,
1420 .cra_flags = CRYPTO_ALG_ASYNC |
1421 CRYPTO_ALG_KERN_DRIVER_ONLY,
1422 .cra_blocksize = SHA256_BLOCK_SIZE,
1423 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1425 .cra_init = stm32_hash_cra_init,
1426 .cra_exit = stm32_hash_cra_exit,
1427 .cra_module = THIS_MODULE,
1432 .init = stm32_hash_init,
1433 .update = stm32_hash_update,
1434 .final = stm32_hash_final,
1435 .finup = stm32_hash_finup,
1436 .digest = stm32_hash_digest,
1437 .export = stm32_hash_export,
1438 .import = stm32_hash_import,
1439 .setkey = stm32_hash_setkey,
1441 .digestsize = SHA256_DIGEST_SIZE,
1442 .statesize = sizeof(struct stm32_hash_request_ctx),
1444 .cra_name = "hmac(sha256)",
1445 .cra_driver_name = "stm32-hmac-sha256",
1446 .cra_priority = 200,
1447 .cra_flags = CRYPTO_ALG_ASYNC |
1448 CRYPTO_ALG_KERN_DRIVER_ONLY,
1449 .cra_blocksize = SHA256_BLOCK_SIZE,
1450 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1452 .cra_init = stm32_hash_cra_sha256_init,
1453 .cra_exit = stm32_hash_cra_exit,
1454 .cra_module = THIS_MODULE,
1460 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1465 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1466 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1467 err = crypto_register_ahash(
1468 &hdev->pdata->algs_info[i].algs_list[j]);
1476 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1479 crypto_unregister_ahash(
1480 &hdev->pdata->algs_info[i].algs_list[j]);
1486 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1490 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1491 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1492 crypto_unregister_ahash(
1493 &hdev->pdata->algs_info[i].algs_list[j]);
1499 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1501 .algs_list = algs_sha1,
1502 .size = ARRAY_SIZE(algs_sha1),
1505 .algs_list = algs_sha256,
1506 .size = ARRAY_SIZE(algs_sha256),
1510 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1511 .algs_info = stm32_hash_algs_info_ux500,
1512 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
1513 .broken_emptymsg = true,
1517 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1519 .algs_list = algs_md5,
1520 .size = ARRAY_SIZE(algs_md5),
1523 .algs_list = algs_sha1,
1524 .size = ARRAY_SIZE(algs_sha1),
1528 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1529 .algs_info = stm32_hash_algs_info_stm32f4,
1530 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1535 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1537 .algs_list = algs_md5,
1538 .size = ARRAY_SIZE(algs_md5),
1541 .algs_list = algs_sha1,
1542 .size = ARRAY_SIZE(algs_sha1),
1545 .algs_list = algs_sha224,
1546 .size = ARRAY_SIZE(algs_sha224),
1549 .algs_list = algs_sha256,
1550 .size = ARRAY_SIZE(algs_sha256),
1554 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1555 .algs_info = stm32_hash_algs_info_stm32f7,
1556 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1561 static const struct of_device_id stm32_hash_of_match[] = {
1563 .compatible = "stericsson,ux500-hash",
1564 .data = &stm32_hash_pdata_ux500,
1567 .compatible = "st,stm32f456-hash",
1568 .data = &stm32_hash_pdata_stm32f4,
1571 .compatible = "st,stm32f756-hash",
1572 .data = &stm32_hash_pdata_stm32f7,
1577 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1579 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1582 hdev->pdata = of_device_get_match_data(dev);
1584 dev_err(dev, "no compatible OF match\n");
1588 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1589 &hdev->dma_maxburst)) {
1590 dev_info(dev, "dma-maxburst not specified, using 0\n");
1591 hdev->dma_maxburst = 0;
1597 static int stm32_hash_probe(struct platform_device *pdev)
1599 struct stm32_hash_dev *hdev;
1600 struct device *dev = &pdev->dev;
1601 struct resource *res;
1604 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1608 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1609 hdev->io_base = devm_ioremap_resource(dev, res);
1610 if (IS_ERR(hdev->io_base))
1611 return PTR_ERR(hdev->io_base);
1613 hdev->phys_base = res->start;
1615 ret = stm32_hash_get_of_match(hdev, dev);
1619 irq = platform_get_irq_optional(pdev, 0);
1620 if (irq < 0 && irq != -ENXIO)
1624 ret = devm_request_threaded_irq(dev, irq,
1625 stm32_hash_irq_handler,
1626 stm32_hash_irq_thread,
1628 dev_name(dev), hdev);
1630 dev_err(dev, "Cannot grab IRQ\n");
1634 dev_info(dev, "No IRQ, use polling mode\n");
1635 hdev->polled = true;
1638 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1639 if (IS_ERR(hdev->clk))
1640 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1641 "failed to get clock for hash\n");
1643 ret = clk_prepare_enable(hdev->clk);
1645 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1649 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1650 pm_runtime_use_autosuspend(dev);
1652 pm_runtime_get_noresume(dev);
1653 pm_runtime_set_active(dev);
1654 pm_runtime_enable(dev);
1656 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1657 if (IS_ERR(hdev->rst)) {
1658 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1659 ret = -EPROBE_DEFER;
1663 reset_control_assert(hdev->rst);
1665 reset_control_deassert(hdev->rst);
1670 platform_set_drvdata(pdev, hdev);
1672 ret = stm32_hash_dma_init(hdev);
1678 dev_info(dev, "DMA mode not available\n");
1681 dev_err(dev, "DMA init error %d\n", ret);
1685 spin_lock(&stm32_hash.lock);
1686 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1687 spin_unlock(&stm32_hash.lock);
1689 /* Initialize crypto engine */
1690 hdev->engine = crypto_engine_alloc_init(dev, 1);
1691 if (!hdev->engine) {
1696 ret = crypto_engine_start(hdev->engine);
1698 goto err_engine_start;
1700 if (hdev->pdata->ux500)
1701 /* FIXME: implement DMA mode for Ux500 */
1704 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1706 /* Register algos */
1707 ret = stm32_hash_register_algs(hdev);
1711 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1712 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1714 pm_runtime_put_sync(dev);
1720 crypto_engine_exit(hdev->engine);
1722 spin_lock(&stm32_hash.lock);
1723 list_del(&hdev->list);
1724 spin_unlock(&stm32_hash.lock);
1727 dma_release_channel(hdev->dma_lch);
1729 pm_runtime_disable(dev);
1730 pm_runtime_put_noidle(dev);
1732 clk_disable_unprepare(hdev->clk);
1737 static int stm32_hash_remove(struct platform_device *pdev)
1739 struct stm32_hash_dev *hdev;
1742 hdev = platform_get_drvdata(pdev);
1746 ret = pm_runtime_resume_and_get(hdev->dev);
1750 stm32_hash_unregister_algs(hdev);
1752 crypto_engine_exit(hdev->engine);
1754 spin_lock(&stm32_hash.lock);
1755 list_del(&hdev->list);
1756 spin_unlock(&stm32_hash.lock);
1759 dma_release_channel(hdev->dma_lch);
1761 pm_runtime_disable(hdev->dev);
1762 pm_runtime_put_noidle(hdev->dev);
1764 clk_disable_unprepare(hdev->clk);
1770 static int stm32_hash_runtime_suspend(struct device *dev)
1772 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1774 clk_disable_unprepare(hdev->clk);
1779 static int stm32_hash_runtime_resume(struct device *dev)
1781 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1784 ret = clk_prepare_enable(hdev->clk);
1786 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1794 static const struct dev_pm_ops stm32_hash_pm_ops = {
1795 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1796 pm_runtime_force_resume)
1797 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1798 stm32_hash_runtime_resume, NULL)
1801 static struct platform_driver stm32_hash_driver = {
1802 .probe = stm32_hash_probe,
1803 .remove = stm32_hash_remove,
1805 .name = "stm32-hash",
1806 .pm = &stm32_hash_pm_ops,
1807 .of_match_table = stm32_hash_of_match,
1811 module_platform_driver(stm32_hash_driver);
1813 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1814 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1815 MODULE_LICENSE("GPL v2");