1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
6 * Interface structure taken from omap-sham driver
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/platform_device.h>
18 #include <linux/scatterlist.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/md5.h>
22 #include <crypto/sha1.h>
23 #include <crypto/sha2.h>
26 #define CR_RESET_SET 1
27 #define CR_RESET_UNSET 0
29 #define CR_MESSAGE_LENGTH_H 0x4
30 #define CR_MESSAGE_LENGTH_L 0x8
32 #define CR_CONTROL 0xc
33 #define CR_CONTROL_BYTE_ORDER_3210 0
34 #define CR_CONTROL_BYTE_ORDER_0123 1
35 #define CR_CONTROL_BYTE_ORDER_2310 2
36 #define CR_CONTROL_BYTE_ORDER_1032 3
37 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
38 #define CR_CONTROL_ALGO_MD5 0
39 #define CR_CONTROL_ALGO_SHA1 1
40 #define CR_CONTROL_ALGO_SHA224 2
41 #define CR_CONTROL_ALGO_SHA256 3
43 #define CR_INTSTAT 0x10
44 #define CR_INTENAB 0x14
45 #define CR_INTCLEAR 0x18
46 #define CR_INT_RESULTS_AVAILABLE BIT(0)
47 #define CR_INT_NEW_RESULTS_SET BIT(1)
48 #define CR_INT_RESULT_READ_ERR BIT(2)
49 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
50 #define CR_INT_STATUS BIT(8)
52 #define CR_RESULT_QUEUE 0x1c
54 #define CR_CORE_REV 0x50
55 #define CR_CORE_DES1 0x60
56 #define CR_CORE_DES2 0x70
58 #define DRIVER_FLAGS_BUSY BIT(0)
59 #define DRIVER_FLAGS_FINAL BIT(1)
60 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
61 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
62 #define DRIVER_FLAGS_INIT BIT(4)
63 #define DRIVER_FLAGS_CPU BIT(5)
64 #define DRIVER_FLAGS_DMA_READY BIT(6)
65 #define DRIVER_FLAGS_ERROR BIT(7)
66 #define DRIVER_FLAGS_SG BIT(8)
67 #define DRIVER_FLAGS_SHA1 BIT(18)
68 #define DRIVER_FLAGS_SHA224 BIT(19)
69 #define DRIVER_FLAGS_SHA256 BIT(20)
70 #define DRIVER_FLAGS_MD5 BIT(21)
72 #define IMG_HASH_QUEUE_LENGTH 20
73 #define IMG_HASH_DMA_BURST 4
74 #define IMG_HASH_DMA_THRESHOLD 64
76 #ifdef __LITTLE_ENDIAN
77 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
79 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
84 struct img_hash_request_ctx {
85 struct img_hash_dev *hdev;
86 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
94 struct scatterlist *sgfirst;
96 struct scatterlist *sg;
105 struct ahash_request fallback_req;
107 /* Zero length buffer must remain last member of struct */
108 u8 buffer[] __aligned(sizeof(u32));
111 struct img_hash_ctx {
112 struct img_hash_dev *hdev;
114 struct crypto_ahash *fallback;
117 struct img_hash_dev {
118 struct list_head list;
120 struct clk *hash_clk;
122 void __iomem *io_base;
124 phys_addr_t bus_addr;
125 void __iomem *cpu_addr;
129 struct tasklet_struct done_task;
130 struct tasklet_struct dma_task;
133 struct crypto_queue queue;
134 struct ahash_request *req;
136 struct dma_chan *dma_lch;
139 struct img_hash_drv {
140 struct list_head dev_list;
144 static struct img_hash_drv img_hash = {
145 .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
146 .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
149 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
151 return readl_relaxed(hdev->io_base + offset);
154 static inline void img_hash_write(struct img_hash_dev *hdev,
155 u32 offset, u32 value)
157 writel_relaxed(value, hdev->io_base + offset);
160 static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
162 return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
165 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
167 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
168 u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
170 if (ctx->flags & DRIVER_FLAGS_MD5)
171 cr |= CR_CONTROL_ALGO_MD5;
172 else if (ctx->flags & DRIVER_FLAGS_SHA1)
173 cr |= CR_CONTROL_ALGO_SHA1;
174 else if (ctx->flags & DRIVER_FLAGS_SHA224)
175 cr |= CR_CONTROL_ALGO_SHA224;
176 else if (ctx->flags & DRIVER_FLAGS_SHA256)
177 cr |= CR_CONTROL_ALGO_SHA256;
178 dev_dbg(hdev->dev, "Starting hash process\n");
179 img_hash_write(hdev, CR_CONTROL, cr);
182 * The hardware block requires two cycles between writing the control
183 * register and writing the first word of data in non DMA mode, to
184 * ensure the first data write is not grouped in burst with the control
185 * register write a read is issued to 'flush' the bus.
188 img_hash_read(hdev, CR_CONTROL);
191 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
192 size_t length, int final)
195 const u32 *buffer = (const u32 *)buf;
197 dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
200 hdev->flags |= DRIVER_FLAGS_FINAL;
202 len32 = DIV_ROUND_UP(length, sizeof(u32));
204 for (count = 0; count < len32; count++)
205 writel_relaxed(buffer[count], hdev->cpu_addr);
210 static void img_hash_dma_callback(void *data)
212 struct img_hash_dev *hdev = data;
213 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
216 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
220 tasklet_schedule(&hdev->dma_task);
223 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
225 struct dma_async_tx_descriptor *desc;
226 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
228 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
229 if (ctx->dma_ct == 0) {
230 dev_err(hdev->dev, "Invalid DMA sg\n");
235 desc = dmaengine_prep_slave_sg(hdev->dma_lch,
239 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
241 dev_err(hdev->dev, "Null DMA descriptor\n");
243 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
246 desc->callback = img_hash_dma_callback;
247 desc->callback_param = hdev;
248 dmaengine_submit(desc);
249 dma_async_issue_pending(hdev->dma_lch);
254 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
256 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
258 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
259 ctx->buffer, hdev->req->nbytes);
261 ctx->total = hdev->req->nbytes;
264 hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
266 img_hash_start(hdev, false);
268 return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
271 static int img_hash_finish(struct ahash_request *req)
273 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
278 memcpy(req->result, ctx->digest, ctx->digsize);
283 static void img_hash_copy_hash(struct ahash_request *req)
285 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
286 __be32 *hash = (__be32 *)ctx->digest;
289 for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
290 hash[i] = img_hash_read_result_queue(ctx->hdev);
293 static void img_hash_finish_req(struct ahash_request *req, int err)
295 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
296 struct img_hash_dev *hdev = ctx->hdev;
299 img_hash_copy_hash(req);
300 if (DRIVER_FLAGS_FINAL & hdev->flags)
301 err = img_hash_finish(req);
303 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
304 ctx->flags |= DRIVER_FLAGS_ERROR;
307 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
308 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
310 if (req->base.complete)
311 ahash_request_complete(req, err);
314 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
316 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
318 img_hash_start(hdev, true);
320 dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
323 hdev->flags |= DRIVER_FLAGS_FINAL;
325 hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
327 tasklet_schedule(&hdev->dma_task);
332 static int img_hash_dma_init(struct img_hash_dev *hdev)
334 struct dma_slave_config dma_conf;
337 hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
338 if (IS_ERR(hdev->dma_lch)) {
339 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
340 return PTR_ERR(hdev->dma_lch);
342 dma_conf.direction = DMA_MEM_TO_DEV;
343 dma_conf.dst_addr = hdev->bus_addr;
344 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
345 dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
346 dma_conf.device_fc = false;
348 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
350 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
351 dma_release_channel(hdev->dma_lch);
358 static void img_hash_dma_task(unsigned long d)
360 struct img_hash_dev *hdev = (struct img_hash_dev *)d;
361 struct img_hash_request_ctx *ctx;
363 size_t nbytes, bleft, wsend, len, tbc;
364 struct scatterlist tsg;
369 ctx = ahash_request_ctx(hdev->req);
373 addr = sg_virt(ctx->sg);
374 nbytes = ctx->sg->length - ctx->offset;
377 * The hash accelerator does not support a data valid mask. This means
378 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
379 * padding bytes in the last word written by that dma would erroneously
380 * be included in the hash. To avoid this we round down the transfer,
381 * and add the excess to the start of the next dma. It does not matter
382 * that the final dma may not be a multiple of 4 bytes as the hashing
383 * block is programmed to accept the correct number of bytes.
387 wsend = (nbytes / 4);
390 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
391 if (img_hash_xmit_dma(hdev, &tsg)) {
392 dev_err(hdev->dev, "DMA failed, falling back to CPU");
393 ctx->flags |= DRIVER_FLAGS_CPU;
395 img_hash_xmit_cpu(hdev, addr + ctx->offset,
397 ctx->sent += wsend * 4;
400 ctx->sent += wsend * 4;
405 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
406 ctx->buffer, bleft, ctx->sent);
408 ctx->sg = sg_next(ctx->sg);
409 while (ctx->sg && (ctx->bufcnt < 4)) {
410 len = ctx->sg->length;
411 if (likely(len > (4 - ctx->bufcnt)))
412 len = 4 - ctx->bufcnt;
413 tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
414 ctx->buffer + ctx->bufcnt, len,
415 ctx->sent + ctx->bufcnt);
417 if (tbc >= ctx->sg->length) {
418 ctx->sg = sg_next(ctx->sg);
423 ctx->sent += ctx->bufcnt;
427 img_hash_dma_callback(hdev);
430 ctx->sg = sg_next(ctx->sg);
434 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
436 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
438 if (ctx->flags & DRIVER_FLAGS_SG)
439 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
444 static int img_hash_process_data(struct img_hash_dev *hdev)
446 struct ahash_request *req = hdev->req;
447 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
452 if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
453 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
455 err = img_hash_write_via_dma(hdev);
457 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
459 err = img_hash_write_via_cpu(hdev);
464 static int img_hash_hw_init(struct img_hash_dev *hdev)
466 unsigned long long nbits;
469 img_hash_write(hdev, CR_RESET, CR_RESET_SET);
470 img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
471 img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
473 nbits = (u64)hdev->req->nbytes << 3;
476 img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
477 img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
479 if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
480 hdev->flags |= DRIVER_FLAGS_INIT;
483 dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
487 static int img_hash_init(struct ahash_request *req)
489 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
490 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
491 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
493 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
494 rctx->fallback_req.base.flags = req->base.flags
495 & CRYPTO_TFM_REQ_MAY_SLEEP;
497 return crypto_ahash_init(&rctx->fallback_req);
500 static int img_hash_handle_queue(struct img_hash_dev *hdev,
501 struct ahash_request *req)
503 struct crypto_async_request *async_req, *backlog;
504 struct img_hash_request_ctx *ctx;
506 int err = 0, res = 0;
508 spin_lock_irqsave(&hdev->lock, flags);
511 res = ahash_enqueue_request(&hdev->queue, req);
513 if (DRIVER_FLAGS_BUSY & hdev->flags) {
514 spin_unlock_irqrestore(&hdev->lock, flags);
518 backlog = crypto_get_backlog(&hdev->queue);
519 async_req = crypto_dequeue_request(&hdev->queue);
521 hdev->flags |= DRIVER_FLAGS_BUSY;
523 spin_unlock_irqrestore(&hdev->lock, flags);
529 crypto_request_complete(backlog, -EINPROGRESS);
531 req = ahash_request_cast(async_req);
534 ctx = ahash_request_ctx(req);
536 dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
537 ctx->op, req->nbytes);
539 err = img_hash_hw_init(hdev);
542 err = img_hash_process_data(hdev);
544 if (err != -EINPROGRESS) {
545 /* done_task will not finish so do it here */
546 img_hash_finish_req(req, err);
551 static int img_hash_update(struct ahash_request *req)
553 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
554 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
555 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
557 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
558 rctx->fallback_req.base.flags = req->base.flags
559 & CRYPTO_TFM_REQ_MAY_SLEEP;
560 rctx->fallback_req.nbytes = req->nbytes;
561 rctx->fallback_req.src = req->src;
563 return crypto_ahash_update(&rctx->fallback_req);
566 static int img_hash_final(struct ahash_request *req)
568 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
569 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
570 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
572 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
573 rctx->fallback_req.base.flags = req->base.flags
574 & CRYPTO_TFM_REQ_MAY_SLEEP;
575 rctx->fallback_req.result = req->result;
577 return crypto_ahash_final(&rctx->fallback_req);
580 static int img_hash_finup(struct ahash_request *req)
582 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
583 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
584 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
586 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
587 rctx->fallback_req.base.flags = req->base.flags
588 & CRYPTO_TFM_REQ_MAY_SLEEP;
589 rctx->fallback_req.nbytes = req->nbytes;
590 rctx->fallback_req.src = req->src;
591 rctx->fallback_req.result = req->result;
593 return crypto_ahash_finup(&rctx->fallback_req);
596 static int img_hash_import(struct ahash_request *req, const void *in)
598 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
599 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
600 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
602 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
603 rctx->fallback_req.base.flags = req->base.flags
604 & CRYPTO_TFM_REQ_MAY_SLEEP;
606 return crypto_ahash_import(&rctx->fallback_req, in);
609 static int img_hash_export(struct ahash_request *req, void *out)
611 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
612 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
613 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
615 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
616 rctx->fallback_req.base.flags = req->base.flags
617 & CRYPTO_TFM_REQ_MAY_SLEEP;
619 return crypto_ahash_export(&rctx->fallback_req, out);
622 static int img_hash_digest(struct ahash_request *req)
624 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
625 struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
626 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
627 struct img_hash_dev *hdev = NULL;
628 struct img_hash_dev *tmp;
631 spin_lock(&img_hash.lock);
633 list_for_each_entry(tmp, &img_hash.dev_list, list) {
643 spin_unlock(&img_hash.lock);
646 ctx->digsize = crypto_ahash_digestsize(tfm);
648 switch (ctx->digsize) {
649 case SHA1_DIGEST_SIZE:
650 ctx->flags |= DRIVER_FLAGS_SHA1;
652 case SHA256_DIGEST_SIZE:
653 ctx->flags |= DRIVER_FLAGS_SHA256;
655 case SHA224_DIGEST_SIZE:
656 ctx->flags |= DRIVER_FLAGS_SHA224;
658 case MD5_DIGEST_SIZE:
659 ctx->flags |= DRIVER_FLAGS_MD5;
668 ctx->total = req->nbytes;
670 ctx->sgfirst = req->src;
671 ctx->nents = sg_nents(ctx->sg);
673 err = img_hash_handle_queue(tctx->hdev, req);
678 static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
680 struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
682 ctx->fallback = crypto_alloc_ahash(alg_name, 0,
683 CRYPTO_ALG_NEED_FALLBACK);
684 if (IS_ERR(ctx->fallback)) {
685 pr_err("img_hash: Could not load fallback driver.\n");
686 return PTR_ERR(ctx->fallback);
688 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
689 sizeof(struct img_hash_request_ctx) +
690 crypto_ahash_reqsize(ctx->fallback) +
691 IMG_HASH_DMA_THRESHOLD);
696 static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
698 return img_hash_cra_init(tfm, "md5-generic");
701 static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
703 return img_hash_cra_init(tfm, "sha1-generic");
706 static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
708 return img_hash_cra_init(tfm, "sha224-generic");
711 static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
713 return img_hash_cra_init(tfm, "sha256-generic");
716 static void img_hash_cra_exit(struct crypto_tfm *tfm)
718 struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
720 crypto_free_ahash(tctx->fallback);
723 static irqreturn_t img_irq_handler(int irq, void *dev_id)
725 struct img_hash_dev *hdev = dev_id;
728 reg = img_hash_read(hdev, CR_INTSTAT);
729 img_hash_write(hdev, CR_INTCLEAR, reg);
731 if (reg & CR_INT_NEW_RESULTS_SET) {
732 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
733 if (DRIVER_FLAGS_BUSY & hdev->flags) {
734 hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
735 if (!(DRIVER_FLAGS_CPU & hdev->flags))
736 hdev->flags |= DRIVER_FLAGS_DMA_READY;
737 tasklet_schedule(&hdev->done_task);
740 "HASH interrupt when no active requests.\n");
742 } else if (reg & CR_INT_RESULTS_AVAILABLE) {
744 "IRQ triggered before the hash had completed\n");
745 } else if (reg & CR_INT_RESULT_READ_ERR) {
747 "Attempt to read from an empty result queue\n");
748 } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
750 "Data written before the hardware was configured\n");
755 static struct ahash_alg img_algs[] = {
757 .init = img_hash_init,
758 .update = img_hash_update,
759 .final = img_hash_final,
760 .finup = img_hash_finup,
761 .export = img_hash_export,
762 .import = img_hash_import,
763 .digest = img_hash_digest,
765 .digestsize = MD5_DIGEST_SIZE,
766 .statesize = sizeof(struct md5_state),
769 .cra_driver_name = "img-md5",
773 CRYPTO_ALG_NEED_FALLBACK,
774 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
775 .cra_ctxsize = sizeof(struct img_hash_ctx),
776 .cra_init = img_hash_cra_md5_init,
777 .cra_exit = img_hash_cra_exit,
778 .cra_module = THIS_MODULE,
783 .init = img_hash_init,
784 .update = img_hash_update,
785 .final = img_hash_final,
786 .finup = img_hash_finup,
787 .export = img_hash_export,
788 .import = img_hash_import,
789 .digest = img_hash_digest,
791 .digestsize = SHA1_DIGEST_SIZE,
792 .statesize = sizeof(struct sha1_state),
795 .cra_driver_name = "img-sha1",
799 CRYPTO_ALG_NEED_FALLBACK,
800 .cra_blocksize = SHA1_BLOCK_SIZE,
801 .cra_ctxsize = sizeof(struct img_hash_ctx),
802 .cra_init = img_hash_cra_sha1_init,
803 .cra_exit = img_hash_cra_exit,
804 .cra_module = THIS_MODULE,
809 .init = img_hash_init,
810 .update = img_hash_update,
811 .final = img_hash_final,
812 .finup = img_hash_finup,
813 .export = img_hash_export,
814 .import = img_hash_import,
815 .digest = img_hash_digest,
817 .digestsize = SHA224_DIGEST_SIZE,
818 .statesize = sizeof(struct sha256_state),
820 .cra_name = "sha224",
821 .cra_driver_name = "img-sha224",
825 CRYPTO_ALG_NEED_FALLBACK,
826 .cra_blocksize = SHA224_BLOCK_SIZE,
827 .cra_ctxsize = sizeof(struct img_hash_ctx),
828 .cra_init = img_hash_cra_sha224_init,
829 .cra_exit = img_hash_cra_exit,
830 .cra_module = THIS_MODULE,
835 .init = img_hash_init,
836 .update = img_hash_update,
837 .final = img_hash_final,
838 .finup = img_hash_finup,
839 .export = img_hash_export,
840 .import = img_hash_import,
841 .digest = img_hash_digest,
843 .digestsize = SHA256_DIGEST_SIZE,
844 .statesize = sizeof(struct sha256_state),
846 .cra_name = "sha256",
847 .cra_driver_name = "img-sha256",
851 CRYPTO_ALG_NEED_FALLBACK,
852 .cra_blocksize = SHA256_BLOCK_SIZE,
853 .cra_ctxsize = sizeof(struct img_hash_ctx),
854 .cra_init = img_hash_cra_sha256_init,
855 .cra_exit = img_hash_cra_exit,
856 .cra_module = THIS_MODULE,
862 static int img_register_algs(struct img_hash_dev *hdev)
866 for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
867 err = crypto_register_ahash(&img_algs[i]);
875 crypto_unregister_ahash(&img_algs[i]);
880 static int img_unregister_algs(struct img_hash_dev *hdev)
884 for (i = 0; i < ARRAY_SIZE(img_algs); i++)
885 crypto_unregister_ahash(&img_algs[i]);
889 static void img_hash_done_task(unsigned long data)
891 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
894 if (hdev->err == -EINVAL) {
899 if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
900 img_hash_handle_queue(hdev, NULL);
904 if (DRIVER_FLAGS_CPU & hdev->flags) {
905 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
906 hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
909 } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
910 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
911 hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
912 img_hash_write_via_dma_stop(hdev);
918 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
919 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
920 DRIVER_FLAGS_OUTPUT_READY);
927 img_hash_finish_req(hdev->req, err);
930 static const struct of_device_id img_hash_match[] __maybe_unused = {
931 { .compatible = "img,hash-accelerator" },
934 MODULE_DEVICE_TABLE(of, img_hash_match);
936 static int img_hash_probe(struct platform_device *pdev)
938 struct img_hash_dev *hdev;
939 struct device *dev = &pdev->dev;
940 struct resource *hash_res;
944 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
948 spin_lock_init(&hdev->lock);
952 platform_set_drvdata(pdev, hdev);
954 INIT_LIST_HEAD(&hdev->list);
956 tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
957 tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
959 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
962 hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
963 if (IS_ERR(hdev->io_base)) {
964 err = PTR_ERR(hdev->io_base);
968 /* Write port (DMA or CPU) */
969 hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
970 if (IS_ERR(hdev->cpu_addr)) {
971 err = PTR_ERR(hdev->cpu_addr);
974 hdev->bus_addr = hash_res->start;
976 irq = platform_get_irq(pdev, 0);
982 err = devm_request_irq(dev, irq, img_irq_handler, 0,
983 dev_name(dev), hdev);
985 dev_err(dev, "unable to request irq\n");
988 dev_dbg(dev, "using IRQ channel %d\n", irq);
990 hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
991 if (IS_ERR(hdev->hash_clk)) {
992 dev_err(dev, "clock initialization failed.\n");
993 err = PTR_ERR(hdev->hash_clk);
997 hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
998 if (IS_ERR(hdev->sys_clk)) {
999 dev_err(dev, "clock initialization failed.\n");
1000 err = PTR_ERR(hdev->sys_clk);
1004 err = clk_prepare_enable(hdev->hash_clk);
1008 err = clk_prepare_enable(hdev->sys_clk);
1012 err = img_hash_dma_init(hdev);
1016 dev_dbg(dev, "using %s for DMA transfers\n",
1017 dma_chan_name(hdev->dma_lch));
1019 spin_lock(&img_hash.lock);
1020 list_add_tail(&hdev->list, &img_hash.dev_list);
1021 spin_unlock(&img_hash.lock);
1023 err = img_register_algs(hdev);
1026 dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1031 spin_lock(&img_hash.lock);
1032 list_del(&hdev->list);
1033 spin_unlock(&img_hash.lock);
1034 dma_release_channel(hdev->dma_lch);
1036 clk_disable_unprepare(hdev->sys_clk);
1038 clk_disable_unprepare(hdev->hash_clk);
1040 tasklet_kill(&hdev->done_task);
1041 tasklet_kill(&hdev->dma_task);
1046 static int img_hash_remove(struct platform_device *pdev)
1048 struct img_hash_dev *hdev;
1050 hdev = platform_get_drvdata(pdev);
1051 spin_lock(&img_hash.lock);
1052 list_del(&hdev->list);
1053 spin_unlock(&img_hash.lock);
1055 img_unregister_algs(hdev);
1057 tasklet_kill(&hdev->done_task);
1058 tasklet_kill(&hdev->dma_task);
1060 dma_release_channel(hdev->dma_lch);
1062 clk_disable_unprepare(hdev->hash_clk);
1063 clk_disable_unprepare(hdev->sys_clk);
1068 #ifdef CONFIG_PM_SLEEP
1069 static int img_hash_suspend(struct device *dev)
1071 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1073 clk_disable_unprepare(hdev->hash_clk);
1074 clk_disable_unprepare(hdev->sys_clk);
1079 static int img_hash_resume(struct device *dev)
1081 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1084 ret = clk_prepare_enable(hdev->hash_clk);
1088 ret = clk_prepare_enable(hdev->sys_clk);
1090 clk_disable_unprepare(hdev->hash_clk);
1096 #endif /* CONFIG_PM_SLEEP */
1098 static const struct dev_pm_ops img_hash_pm_ops = {
1099 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1102 static struct platform_driver img_hash_driver = {
1103 .probe = img_hash_probe,
1104 .remove = img_hash_remove,
1106 .name = "img-hash-accelerator",
1107 .pm = &img_hash_pm_ops,
1108 .of_match_table = img_hash_match,
1111 module_platform_driver(img_hash_driver);
1113 MODULE_LICENSE("GPL v2");
1114 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1115 MODULE_AUTHOR("Will Thomas.");
1116 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");