1 // SPDX-License-Identifier: GPL-2.0-only
5 * Support for SAHARA cryptographic accelerator.
7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
11 * Based on omap-aes.c and tegra-aes.c
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
31 #include <linux/of_device.h>
32 #include <linux/platform_device.h>
34 #define SHA_BUFFER_LEN PAGE_SIZE
35 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
37 #define SAHARA_NAME "sahara"
38 #define SAHARA_VERSION_3 3
39 #define SAHARA_VERSION_4 4
40 #define SAHARA_TIMEOUT_MS 1000
41 #define SAHARA_MAX_HW_DESC 2
42 #define SAHARA_MAX_HW_LINK 20
44 #define FLAGS_MODE_MASK 0x000f
45 #define FLAGS_ENCRYPT BIT(0)
46 #define FLAGS_CBC BIT(1)
47 #define FLAGS_NEW_KEY BIT(3)
49 #define SAHARA_HDR_BASE 0x00800000
50 #define SAHARA_HDR_SKHA_ALG_AES 0
51 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
52 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
53 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
54 #define SAHARA_HDR_FORM_DATA (5 << 16)
55 #define SAHARA_HDR_FORM_KEY (8 << 16)
56 #define SAHARA_HDR_LLO (1 << 24)
57 #define SAHARA_HDR_CHA_SKHA (1 << 28)
58 #define SAHARA_HDR_CHA_MDHA (2 << 28)
59 #define SAHARA_HDR_PARITY_BIT (1 << 31)
61 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
62 #define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
63 #define SAHARA_HDR_MDHA_HASH 0xA0850000
64 #define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
65 #define SAHARA_HDR_MDHA_ALG_SHA1 0
66 #define SAHARA_HDR_MDHA_ALG_MD5 1
67 #define SAHARA_HDR_MDHA_ALG_SHA256 2
68 #define SAHARA_HDR_MDHA_ALG_SHA224 3
69 #define SAHARA_HDR_MDHA_PDATA (1 << 2)
70 #define SAHARA_HDR_MDHA_HMAC (1 << 3)
71 #define SAHARA_HDR_MDHA_INIT (1 << 5)
72 #define SAHARA_HDR_MDHA_IPAD (1 << 6)
73 #define SAHARA_HDR_MDHA_OPAD (1 << 7)
74 #define SAHARA_HDR_MDHA_SWAP (1 << 8)
75 #define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
76 #define SAHARA_HDR_MDHA_SSL (1 << 10)
78 /* SAHARA can only process one request at a time */
79 #define SAHARA_QUEUE_LENGTH 1
81 #define SAHARA_REG_VERSION 0x00
82 #define SAHARA_REG_DAR 0x04
83 #define SAHARA_REG_CONTROL 0x08
84 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
85 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
86 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
87 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
88 #define SAHARA_REG_CMD 0x0C
89 #define SAHARA_CMD_RESET (1 << 0)
90 #define SAHARA_CMD_CLEAR_INT (1 << 8)
91 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
92 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
93 #define SAHARA_CMD_MODE_BATCH (1 << 16)
94 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
95 #define SAHARA_REG_STATUS 0x10
96 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
97 #define SAHARA_STATE_IDLE 0
98 #define SAHARA_STATE_BUSY 1
99 #define SAHARA_STATE_ERR 2
100 #define SAHARA_STATE_FAULT 3
101 #define SAHARA_STATE_COMPLETE 4
102 #define SAHARA_STATE_COMP_FLAG (1 << 2)
103 #define SAHARA_STATUS_DAR_FULL (1 << 3)
104 #define SAHARA_STATUS_ERROR (1 << 4)
105 #define SAHARA_STATUS_SECURE (1 << 5)
106 #define SAHARA_STATUS_FAIL (1 << 6)
107 #define SAHARA_STATUS_INIT (1 << 7)
108 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
109 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
110 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
111 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
112 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
113 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
114 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
115 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
116 #define SAHARA_REG_ERRSTATUS 0x14
117 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
118 #define SAHARA_ERRSOURCE_CHA 14
119 #define SAHARA_ERRSOURCE_DMA 15
120 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
121 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
122 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
123 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
124 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
125 #define SAHARA_REG_FADDR 0x18
126 #define SAHARA_REG_CDAR 0x1C
127 #define SAHARA_REG_IDAR 0x20
129 struct sahara_hw_desc {
138 struct sahara_hw_link {
147 /* AES-specific context */
149 u8 key[AES_KEYSIZE_128];
150 struct crypto_skcipher *fallback;
153 struct sahara_aes_reqctx {
155 struct skcipher_request fallback_req; // keep at the end
159 * struct sahara_sha_reqctx - private data per request
160 * @buf: holds data for requests smaller than block_size
161 * @rembuf: used to prepare one block_size-aligned request
162 * @context: hw-specific context for request. Digest is extracted from this
163 * @mode: specifies what type of hw-descriptor needs to be built
164 * @digest_size: length of digest for this request
165 * @context_size: length of hw-context for this request.
166 * Always digest_size + 4
167 * @buf_cnt: number of bytes saved in buf
168 * @sg_in_idx: number of hw links
169 * @in_sg: scatterlist for input data
170 * @in_sg_chain: scatterlists for chained input data
171 * @total: total number of bytes for transfer
172 * @last: is this the last block
173 * @first: is this the first block
174 * @active: inside a transfer
176 struct sahara_sha_reqctx {
177 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
178 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 u8 context[SHA256_DIGEST_SIZE + 4];
181 unsigned int digest_size;
182 unsigned int context_size;
183 unsigned int buf_cnt;
184 unsigned int sg_in_idx;
185 struct scatterlist *in_sg;
186 struct scatterlist in_sg_chain[2];
194 struct device *device;
195 unsigned int version;
196 void __iomem *regs_base;
199 struct mutex queue_mutex;
200 struct task_struct *kthread;
201 struct completion dma_completion;
203 struct sahara_ctx *ctx;
204 struct crypto_queue queue;
207 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
208 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
211 dma_addr_t key_phys_base;
214 dma_addr_t iv_phys_base;
217 dma_addr_t context_phys_base;
219 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
220 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
223 struct scatterlist *in_sg;
225 struct scatterlist *out_sg;
231 static struct sahara_dev *dev_ptr;
233 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
235 writel(data, dev->regs_base + reg);
238 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
240 return readl(dev->regs_base + reg);
243 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
245 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
246 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
247 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
249 if (dev->flags & FLAGS_CBC) {
250 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
251 hdr ^= SAHARA_HDR_PARITY_BIT;
254 if (dev->flags & FLAGS_ENCRYPT) {
255 hdr |= SAHARA_HDR_SKHA_OP_ENC;
256 hdr ^= SAHARA_HDR_PARITY_BIT;
262 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
264 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
265 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
268 static const char *sahara_err_src[16] = {
271 "Descriptor length error",
272 "Descriptor length or pointer error",
274 "Link pointer error",
275 "Input buffer error",
276 "Output buffer error",
277 "Output buffer starvation",
278 "Internal state fault",
279 "General descriptor problem",
281 "Descriptor address error",
282 "Link address error",
287 static const char *sahara_err_dmasize[4] = {
289 "Half-word transfer",
294 static const char *sahara_err_dmasrc[8] = {
297 "Internal IP bus error",
299 "DMA crosses 256 byte boundary",
305 static const char *sahara_cha_errsrc[12] = {
306 "Input buffer non-empty",
311 "Write during processing",
312 "CTX read during processing",
314 "Input buffer disabled/underflow",
315 "Output buffer disabled/overflow",
316 "DES key parity error",
320 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
322 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
324 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
325 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
327 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
329 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
331 if (source == SAHARA_ERRSOURCE_DMA) {
332 if (error & SAHARA_ERRSTATUS_DMA_DIR)
333 dev_err(dev->device, " * DMA read.\n");
335 dev_err(dev->device, " * DMA write.\n");
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
339 dev_err(dev->device, " * %s.\n",
340 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
341 } else if (source == SAHARA_ERRSOURCE_CHA) {
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_errsrc[chasrc]);
344 dev_err(dev->device, " * %s.\n",
345 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
347 dev_err(dev->device, "\n");
350 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
352 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
356 if (!__is_defined(DEBUG))
359 state = SAHARA_STATUS_GET_STATE(status);
361 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
364 dev_dbg(dev->device, " - State = %d:\n", state);
365 if (state & SAHARA_STATE_COMP_FLAG)
366 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
368 dev_dbg(dev->device, " * %s.\n",
369 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
371 if (status & SAHARA_STATUS_DAR_FULL)
372 dev_dbg(dev->device, " - DAR Full.\n");
373 if (status & SAHARA_STATUS_ERROR)
374 dev_dbg(dev->device, " - Error.\n");
375 if (status & SAHARA_STATUS_SECURE)
376 dev_dbg(dev->device, " - Secure.\n");
377 if (status & SAHARA_STATUS_FAIL)
378 dev_dbg(dev->device, " - Fail.\n");
379 if (status & SAHARA_STATUS_RNG_RESEED)
380 dev_dbg(dev->device, " - RNG Reseed Request.\n");
381 if (status & SAHARA_STATUS_ACTIVE_RNG)
382 dev_dbg(dev->device, " - RNG Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_MDHA)
384 dev_dbg(dev->device, " - MDHA Active.\n");
385 if (status & SAHARA_STATUS_ACTIVE_SKHA)
386 dev_dbg(dev->device, " - SKHA Active.\n");
388 if (status & SAHARA_STATUS_MODE_BATCH)
389 dev_dbg(dev->device, " - Batch Mode.\n");
390 else if (status & SAHARA_STATUS_MODE_DEDICATED)
391 dev_dbg(dev->device, " - Dedicated Mode.\n");
392 else if (status & SAHARA_STATUS_MODE_DEBUG)
393 dev_dbg(dev->device, " - Debug Mode.\n");
395 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
396 SAHARA_STATUS_GET_ISTATE(status));
398 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
399 sahara_read(dev, SAHARA_REG_CDAR));
400 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
401 sahara_read(dev, SAHARA_REG_IDAR));
404 static void sahara_dump_descriptors(struct sahara_dev *dev)
408 if (!__is_defined(DEBUG))
411 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
412 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
413 i, &dev->hw_phys_desc[i]);
414 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
415 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
416 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
417 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
418 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
419 dev_dbg(dev->device, "\tnext = 0x%08x\n",
420 dev->hw_desc[i]->next);
422 dev_dbg(dev->device, "\n");
425 static void sahara_dump_links(struct sahara_dev *dev)
429 if (!__is_defined(DEBUG))
432 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
433 dev_dbg(dev->device, "Link (%d) (%pad):\n",
434 i, &dev->hw_phys_link[i]);
435 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
436 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
437 dev_dbg(dev->device, "\tnext = 0x%08x\n",
438 dev->hw_link[i]->next);
440 dev_dbg(dev->device, "\n");
443 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
445 struct sahara_ctx *ctx = dev->ctx;
446 struct scatterlist *sg;
451 /* Copy new key if necessary */
452 if (ctx->flags & FLAGS_NEW_KEY) {
453 memcpy(dev->key_base, ctx->key, ctx->keylen);
454 ctx->flags &= ~FLAGS_NEW_KEY;
456 if (dev->flags & FLAGS_CBC) {
457 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
458 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
460 dev->hw_desc[idx]->len1 = 0;
461 dev->hw_desc[idx]->p1 = 0;
463 dev->hw_desc[idx]->len2 = ctx->keylen;
464 dev->hw_desc[idx]->p2 = dev->key_phys_base;
465 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
467 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
472 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
473 if (dev->nb_in_sg < 0) {
474 dev_err(dev->device, "Invalid numbers of src SG.\n");
475 return dev->nb_in_sg;
477 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
478 if (dev->nb_out_sg < 0) {
479 dev_err(dev->device, "Invalid numbers of dst SG.\n");
480 return dev->nb_out_sg;
482 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
483 dev_err(dev->device, "not enough hw links (%d)\n",
484 dev->nb_in_sg + dev->nb_out_sg);
488 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
490 if (ret != dev->nb_in_sg) {
491 dev_err(dev->device, "couldn't map in sg\n");
494 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
496 if (ret != dev->nb_out_sg) {
497 dev_err(dev->device, "couldn't map out sg\n");
501 /* Create input links */
502 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
504 for (i = 0; i < dev->nb_in_sg; i++) {
505 dev->hw_link[i]->len = sg->length;
506 dev->hw_link[i]->p = sg->dma_address;
507 if (i == (dev->nb_in_sg - 1)) {
508 dev->hw_link[i]->next = 0;
510 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
515 /* Create output links */
516 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
518 for (j = i; j < dev->nb_out_sg + i; j++) {
519 dev->hw_link[j]->len = sg->length;
520 dev->hw_link[j]->p = sg->dma_address;
521 if (j == (dev->nb_out_sg + i - 1)) {
522 dev->hw_link[j]->next = 0;
524 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
529 /* Fill remaining fields of hw_desc[1] */
530 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 dev->hw_desc[idx]->len1 = dev->total;
532 dev->hw_desc[idx]->len2 = dev->total;
533 dev->hw_desc[idx]->next = 0;
535 sahara_dump_descriptors(dev);
536 sahara_dump_links(dev);
538 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
543 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
546 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
552 static int sahara_aes_process(struct skcipher_request *req)
554 struct sahara_dev *dev = dev_ptr;
555 struct sahara_ctx *ctx;
556 struct sahara_aes_reqctx *rctx;
558 unsigned long timeout;
560 /* Request is ready to be dispatched by the device */
562 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
563 req->cryptlen, req->src, req->dst);
565 /* assign new request to device */
566 dev->total = req->cryptlen;
567 dev->in_sg = req->src;
568 dev->out_sg = req->dst;
570 rctx = skcipher_request_ctx(req);
571 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
572 rctx->mode &= FLAGS_MODE_MASK;
573 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
575 if ((dev->flags & FLAGS_CBC) && req->iv)
576 memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
578 /* assign new context to device */
581 reinit_completion(&dev->dma_completion);
583 ret = sahara_hw_descriptor_create(dev);
587 timeout = wait_for_completion_timeout(&dev->dma_completion,
588 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
590 dev_err(dev->device, "AES timeout\n");
594 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
596 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
602 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
605 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
607 ctx->keylen = keylen;
609 /* SAHARA only supports 128bit keys */
610 if (keylen == AES_KEYSIZE_128) {
611 memcpy(ctx->key, key, keylen);
612 ctx->flags |= FLAGS_NEW_KEY;
616 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
620 * The requested key size is not supported by HW, do a fallback.
622 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
623 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
624 CRYPTO_TFM_REQ_MASK);
625 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
628 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
630 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
631 struct sahara_dev *dev = dev_ptr;
634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
637 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
639 "request size is not exact amount of AES blocks\n");
645 mutex_lock(&dev->queue_mutex);
646 err = crypto_enqueue_request(&dev->queue, &req->base);
647 mutex_unlock(&dev->queue_mutex);
649 wake_up_process(dev->kthread);
654 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
656 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
657 struct sahara_ctx *ctx = crypto_skcipher_ctx(
658 crypto_skcipher_reqtfm(req));
660 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
661 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
662 skcipher_request_set_callback(&rctx->fallback_req,
666 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
667 req->dst, req->cryptlen, req->iv);
668 return crypto_skcipher_encrypt(&rctx->fallback_req);
671 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
674 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
676 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
677 struct sahara_ctx *ctx = crypto_skcipher_ctx(
678 crypto_skcipher_reqtfm(req));
680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
682 skcipher_request_set_callback(&rctx->fallback_req,
686 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
687 req->dst, req->cryptlen, req->iv);
688 return crypto_skcipher_decrypt(&rctx->fallback_req);
691 return sahara_aes_crypt(req, 0);
694 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
696 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
697 struct sahara_ctx *ctx = crypto_skcipher_ctx(
698 crypto_skcipher_reqtfm(req));
700 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
701 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
702 skcipher_request_set_callback(&rctx->fallback_req,
706 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
707 req->dst, req->cryptlen, req->iv);
708 return crypto_skcipher_encrypt(&rctx->fallback_req);
711 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
714 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
716 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
717 struct sahara_ctx *ctx = crypto_skcipher_ctx(
718 crypto_skcipher_reqtfm(req));
720 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
721 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
722 skcipher_request_set_callback(&rctx->fallback_req,
726 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
727 req->dst, req->cryptlen, req->iv);
728 return crypto_skcipher_decrypt(&rctx->fallback_req);
731 return sahara_aes_crypt(req, FLAGS_CBC);
734 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
736 const char *name = crypto_tfm_alg_name(&tfm->base);
737 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
739 ctx->fallback = crypto_alloc_skcipher(name, 0,
740 CRYPTO_ALG_NEED_FALLBACK);
741 if (IS_ERR(ctx->fallback)) {
742 pr_err("Error allocating fallback algo %s\n", name);
743 return PTR_ERR(ctx->fallback);
746 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
747 crypto_skcipher_reqsize(ctx->fallback));
752 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
754 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
756 crypto_free_skcipher(ctx->fallback);
759 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
760 struct sahara_sha_reqctx *rctx)
767 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
768 hdr |= SAHARA_HDR_MDHA_INIT;
770 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
774 hdr |= SAHARA_HDR_MDHA_PDATA;
776 if (hweight_long(hdr) % 2 == 0)
777 hdr |= SAHARA_HDR_PARITY_BIT;
782 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
783 struct sahara_sha_reqctx *rctx,
786 struct scatterlist *sg;
790 dev->in_sg = rctx->in_sg;
792 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
793 if (dev->nb_in_sg < 0) {
794 dev_err(dev->device, "Invalid numbers of src SG.\n");
795 return dev->nb_in_sg;
797 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
798 dev_err(dev->device, "not enough hw links (%d)\n",
799 dev->nb_in_sg + dev->nb_out_sg);
804 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
808 for (i = start; i < dev->nb_in_sg + start; i++) {
809 dev->hw_link[i]->len = sg->length;
810 dev->hw_link[i]->p = sg->dma_address;
811 if (i == (dev->nb_in_sg + start - 1)) {
812 dev->hw_link[i]->next = 0;
814 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
822 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
823 struct sahara_sha_reqctx *rctx,
824 struct ahash_request *req,
831 /* Create initial descriptor: #8*/
832 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
834 /* Create hash descriptor: #10. Must follow #6. */
835 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
837 dev->hw_desc[index]->len1 = rctx->total;
838 if (dev->hw_desc[index]->len1 == 0) {
839 /* if len1 is 0, p1 must be 0, too */
840 dev->hw_desc[index]->p1 = 0;
843 /* Create input links */
844 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
845 i = sahara_sha_hw_links_create(dev, rctx, index);
847 rctx->sg_in_idx = index;
852 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
854 /* Save the context for the next operation */
855 result_len = rctx->context_size;
856 dev->hw_link[i]->p = dev->context_phys_base;
858 dev->hw_link[i]->len = result_len;
859 dev->hw_desc[index]->len2 = result_len;
861 dev->hw_link[i]->next = 0;
867 * Load descriptor aka #6
869 * To load a previously saved context back to the MDHA unit
875 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
876 struct sahara_sha_reqctx *rctx,
877 struct ahash_request *req,
880 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
882 dev->hw_desc[index]->len1 = rctx->context_size;
883 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
884 dev->hw_desc[index]->len2 = 0;
885 dev->hw_desc[index]->p2 = 0;
887 dev->hw_link[index]->len = rctx->context_size;
888 dev->hw_link[index]->p = dev->context_phys_base;
889 dev->hw_link[index]->next = 0;
894 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
896 if (!sg || !sg->length)
899 while (nbytes && sg) {
900 if (nbytes <= sg->length) {
905 nbytes -= sg->length;
912 static int sahara_sha_prepare_request(struct ahash_request *req)
914 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
915 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
916 unsigned int hash_later;
917 unsigned int block_size;
920 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
922 /* append bytes from previous operation */
923 len = rctx->buf_cnt + req->nbytes;
925 /* only the last transfer can be padded in hardware */
926 if (!rctx->last && (len < block_size)) {
927 /* to few data, save for next operation */
928 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
930 rctx->buf_cnt += req->nbytes;
935 /* add data from previous operation first */
937 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
939 /* data must always be a multiple of block_size */
940 hash_later = rctx->last ? 0 : len & (block_size - 1);
942 unsigned int offset = req->nbytes - hash_later;
943 /* Save remaining bytes for later use */
944 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
948 /* nbytes should now be multiple of blocksize */
949 req->nbytes = req->nbytes - hash_later;
951 sahara_walk_and_recalc(req->src, req->nbytes);
953 /* have data from previous operation and current */
954 if (rctx->buf_cnt && req->nbytes) {
955 sg_init_table(rctx->in_sg_chain, 2);
956 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
958 sg_chain(rctx->in_sg_chain, 2, req->src);
960 rctx->total = req->nbytes + rctx->buf_cnt;
961 rctx->in_sg = rctx->in_sg_chain;
963 req->src = rctx->in_sg_chain;
964 /* only data from previous operation */
965 } else if (rctx->buf_cnt) {
967 rctx->in_sg = req->src;
969 rctx->in_sg = rctx->in_sg_chain;
970 /* buf was copied into rembuf above */
971 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
972 rctx->total = rctx->buf_cnt;
973 /* no data from previous operation */
975 rctx->in_sg = req->src;
976 rctx->total = req->nbytes;
977 req->src = rctx->in_sg;
980 /* on next call, we only have the remaining data in the buffer */
981 rctx->buf_cnt = hash_later;
986 static int sahara_sha_process(struct ahash_request *req)
988 struct sahara_dev *dev = dev_ptr;
989 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
991 unsigned long timeout;
993 ret = sahara_sha_prepare_request(req);
998 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
999 dev->hw_desc[0]->next = 0;
1002 memcpy(dev->context_base, rctx->context, rctx->context_size);
1004 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1005 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1006 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1007 dev->hw_desc[1]->next = 0;
1010 sahara_dump_descriptors(dev);
1011 sahara_dump_links(dev);
1013 reinit_completion(&dev->dma_completion);
1015 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1017 timeout = wait_for_completion_timeout(&dev->dma_completion,
1018 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1020 dev_err(dev->device, "SHA timeout\n");
1024 if (rctx->sg_in_idx)
1025 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1028 memcpy(rctx->context, dev->context_base, rctx->context_size);
1031 memcpy(req->result, rctx->context, rctx->digest_size);
1036 static int sahara_queue_manage(void *data)
1038 struct sahara_dev *dev = (struct sahara_dev *)data;
1039 struct crypto_async_request *async_req;
1040 struct crypto_async_request *backlog;
1044 __set_current_state(TASK_INTERRUPTIBLE);
1046 mutex_lock(&dev->queue_mutex);
1047 backlog = crypto_get_backlog(&dev->queue);
1048 async_req = crypto_dequeue_request(&dev->queue);
1049 mutex_unlock(&dev->queue_mutex);
1052 backlog->complete(backlog, -EINPROGRESS);
1055 if (crypto_tfm_alg_type(async_req->tfm) ==
1056 CRYPTO_ALG_TYPE_AHASH) {
1057 struct ahash_request *req =
1058 ahash_request_cast(async_req);
1060 ret = sahara_sha_process(req);
1062 struct skcipher_request *req =
1063 skcipher_request_cast(async_req);
1065 ret = sahara_aes_process(req);
1068 async_req->complete(async_req, ret);
1074 } while (!kthread_should_stop());
1079 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1081 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1082 struct sahara_dev *dev = dev_ptr;
1085 if (!req->nbytes && !last)
1090 if (!rctx->active) {
1095 mutex_lock(&dev->queue_mutex);
1096 ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 mutex_unlock(&dev->queue_mutex);
1099 wake_up_process(dev->kthread);
1104 static int sahara_sha_init(struct ahash_request *req)
1106 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1107 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1109 memset(rctx, 0, sizeof(*rctx));
1111 switch (crypto_ahash_digestsize(tfm)) {
1112 case SHA1_DIGEST_SIZE:
1113 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1114 rctx->digest_size = SHA1_DIGEST_SIZE;
1116 case SHA256_DIGEST_SIZE:
1117 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1118 rctx->digest_size = SHA256_DIGEST_SIZE;
1124 rctx->context_size = rctx->digest_size + 4;
1130 static int sahara_sha_update(struct ahash_request *req)
1132 return sahara_sha_enqueue(req, 0);
1135 static int sahara_sha_final(struct ahash_request *req)
1138 return sahara_sha_enqueue(req, 1);
1141 static int sahara_sha_finup(struct ahash_request *req)
1143 return sahara_sha_enqueue(req, 1);
1146 static int sahara_sha_digest(struct ahash_request *req)
1148 sahara_sha_init(req);
1150 return sahara_sha_finup(req);
1153 static int sahara_sha_export(struct ahash_request *req, void *out)
1155 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1157 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1162 static int sahara_sha_import(struct ahash_request *req, const void *in)
1164 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1166 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1171 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1173 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1174 sizeof(struct sahara_sha_reqctx) +
1175 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1180 static struct skcipher_alg aes_algs[] = {
1182 .base.cra_name = "ecb(aes)",
1183 .base.cra_driver_name = "sahara-ecb-aes",
1184 .base.cra_priority = 300,
1185 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1186 .base.cra_blocksize = AES_BLOCK_SIZE,
1187 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1188 .base.cra_alignmask = 0x0,
1189 .base.cra_module = THIS_MODULE,
1191 .init = sahara_aes_init_tfm,
1192 .exit = sahara_aes_exit_tfm,
1193 .min_keysize = AES_MIN_KEY_SIZE ,
1194 .max_keysize = AES_MAX_KEY_SIZE,
1195 .setkey = sahara_aes_setkey,
1196 .encrypt = sahara_aes_ecb_encrypt,
1197 .decrypt = sahara_aes_ecb_decrypt,
1199 .base.cra_name = "cbc(aes)",
1200 .base.cra_driver_name = "sahara-cbc-aes",
1201 .base.cra_priority = 300,
1202 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 .base.cra_blocksize = AES_BLOCK_SIZE,
1204 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1205 .base.cra_alignmask = 0x0,
1206 .base.cra_module = THIS_MODULE,
1208 .init = sahara_aes_init_tfm,
1209 .exit = sahara_aes_exit_tfm,
1210 .min_keysize = AES_MIN_KEY_SIZE ,
1211 .max_keysize = AES_MAX_KEY_SIZE,
1212 .ivsize = AES_BLOCK_SIZE,
1213 .setkey = sahara_aes_setkey,
1214 .encrypt = sahara_aes_cbc_encrypt,
1215 .decrypt = sahara_aes_cbc_decrypt,
1219 static struct ahash_alg sha_v3_algs[] = {
1221 .init = sahara_sha_init,
1222 .update = sahara_sha_update,
1223 .final = sahara_sha_final,
1224 .finup = sahara_sha_finup,
1225 .digest = sahara_sha_digest,
1226 .export = sahara_sha_export,
1227 .import = sahara_sha_import,
1228 .halg.digestsize = SHA1_DIGEST_SIZE,
1229 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1232 .cra_driver_name = "sahara-sha1",
1233 .cra_priority = 300,
1234 .cra_flags = CRYPTO_ALG_ASYNC |
1235 CRYPTO_ALG_NEED_FALLBACK,
1236 .cra_blocksize = SHA1_BLOCK_SIZE,
1237 .cra_ctxsize = sizeof(struct sahara_ctx),
1239 .cra_module = THIS_MODULE,
1240 .cra_init = sahara_sha_cra_init,
1245 static struct ahash_alg sha_v4_algs[] = {
1247 .init = sahara_sha_init,
1248 .update = sahara_sha_update,
1249 .final = sahara_sha_final,
1250 .finup = sahara_sha_finup,
1251 .digest = sahara_sha_digest,
1252 .export = sahara_sha_export,
1253 .import = sahara_sha_import,
1254 .halg.digestsize = SHA256_DIGEST_SIZE,
1255 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1257 .cra_name = "sha256",
1258 .cra_driver_name = "sahara-sha256",
1259 .cra_priority = 300,
1260 .cra_flags = CRYPTO_ALG_ASYNC |
1261 CRYPTO_ALG_NEED_FALLBACK,
1262 .cra_blocksize = SHA256_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct sahara_ctx),
1265 .cra_module = THIS_MODULE,
1266 .cra_init = sahara_sha_cra_init,
1271 static irqreturn_t sahara_irq_handler(int irq, void *data)
1273 struct sahara_dev *dev = (struct sahara_dev *)data;
1274 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1275 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1277 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1280 sahara_decode_status(dev, stat);
1282 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1284 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1287 sahara_decode_error(dev, err);
1288 dev->error = -EINVAL;
1291 complete(&dev->dma_completion);
1297 static int sahara_register_algs(struct sahara_dev *dev)
1300 unsigned int i, j, k, l;
1302 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1303 err = crypto_register_skcipher(&aes_algs[i]);
1308 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1309 err = crypto_register_ahash(&sha_v3_algs[k]);
1311 goto err_sha_v3_algs;
1314 if (dev->version > SAHARA_VERSION_3)
1315 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1316 err = crypto_register_ahash(&sha_v4_algs[l]);
1318 goto err_sha_v4_algs;
1324 for (j = 0; j < l; j++)
1325 crypto_unregister_ahash(&sha_v4_algs[j]);
1328 for (j = 0; j < k; j++)
1329 crypto_unregister_ahash(&sha_v3_algs[j]);
1332 for (j = 0; j < i; j++)
1333 crypto_unregister_skcipher(&aes_algs[j]);
1338 static void sahara_unregister_algs(struct sahara_dev *dev)
1342 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1343 crypto_unregister_skcipher(&aes_algs[i]);
1345 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1346 crypto_unregister_ahash(&sha_v3_algs[i]);
1348 if (dev->version > SAHARA_VERSION_3)
1349 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1350 crypto_unregister_ahash(&sha_v4_algs[i]);
1353 static const struct of_device_id sahara_dt_ids[] = {
1354 { .compatible = "fsl,imx53-sahara" },
1355 { .compatible = "fsl,imx27-sahara" },
1358 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1360 static int sahara_probe(struct platform_device *pdev)
1362 struct sahara_dev *dev;
1368 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1372 dev->device = &pdev->dev;
1373 platform_set_drvdata(pdev, dev);
1375 /* Get the base address */
1376 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1377 if (IS_ERR(dev->regs_base))
1378 return PTR_ERR(dev->regs_base);
1381 irq = platform_get_irq(pdev, 0);
1385 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1386 0, dev_name(&pdev->dev), dev);
1388 dev_err(&pdev->dev, "failed to request irq\n");
1393 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1394 if (IS_ERR(dev->clk_ipg)) {
1395 dev_err(&pdev->dev, "Could not get ipg clock\n");
1396 return PTR_ERR(dev->clk_ipg);
1399 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1400 if (IS_ERR(dev->clk_ahb)) {
1401 dev_err(&pdev->dev, "Could not get ahb clock\n");
1402 return PTR_ERR(dev->clk_ahb);
1405 /* Allocate HW descriptors */
1406 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1407 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1408 &dev->hw_phys_desc[0], GFP_KERNEL);
1409 if (!dev->hw_desc[0]) {
1410 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1413 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1414 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1415 sizeof(struct sahara_hw_desc);
1417 /* Allocate space for iv and key */
1418 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1419 &dev->key_phys_base, GFP_KERNEL);
1420 if (!dev->key_base) {
1421 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1424 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1425 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1427 /* Allocate space for context: largest digest + message length field */
1428 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1429 SHA256_DIGEST_SIZE + 4,
1430 &dev->context_phys_base, GFP_KERNEL);
1431 if (!dev->context_base) {
1432 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1436 /* Allocate space for HW links */
1437 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1438 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1439 &dev->hw_phys_link[0], GFP_KERNEL);
1440 if (!dev->hw_link[0]) {
1441 dev_err(&pdev->dev, "Could not allocate hw links\n");
1444 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1445 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1446 sizeof(struct sahara_hw_link);
1447 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1450 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1452 mutex_init(&dev->queue_mutex);
1456 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1457 if (IS_ERR(dev->kthread)) {
1458 return PTR_ERR(dev->kthread);
1461 init_completion(&dev->dma_completion);
1463 err = clk_prepare_enable(dev->clk_ipg);
1466 err = clk_prepare_enable(dev->clk_ahb);
1468 goto clk_ipg_disable;
1470 version = sahara_read(dev, SAHARA_REG_VERSION);
1471 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1472 if (version != SAHARA_VERSION_3)
1474 } else if (of_device_is_compatible(pdev->dev.of_node,
1475 "fsl,imx53-sahara")) {
1476 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1478 version = (version >> 8) & 0xff;
1480 if (err == -ENODEV) {
1481 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1486 dev->version = version;
1488 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1490 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1491 SAHARA_CONTROL_SET_MAXBURST(8) |
1492 SAHARA_CONTROL_RNG_AUTORSD |
1493 SAHARA_CONTROL_ENABLE_INT,
1494 SAHARA_REG_CONTROL);
1496 err = sahara_register_algs(dev);
1500 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1505 kthread_stop(dev->kthread);
1507 clk_disable_unprepare(dev->clk_ahb);
1509 clk_disable_unprepare(dev->clk_ipg);
1514 static int sahara_remove(struct platform_device *pdev)
1516 struct sahara_dev *dev = platform_get_drvdata(pdev);
1518 kthread_stop(dev->kthread);
1520 sahara_unregister_algs(dev);
1522 clk_disable_unprepare(dev->clk_ipg);
1523 clk_disable_unprepare(dev->clk_ahb);
1530 static struct platform_driver sahara_driver = {
1531 .probe = sahara_probe,
1532 .remove = sahara_remove,
1534 .name = SAHARA_NAME,
1535 .of_match_table = sahara_dt_ids,
1539 module_platform_driver(sahara_driver);
1541 MODULE_LICENSE("GPL");
1542 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1543 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1544 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");