4 * Support for SAHARA cryptographic accelerator.
6 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Based on omap-aes.c and tegra-aes.c
16 #include <crypto/algapi.h>
17 #include <crypto/aes.h>
19 #include <linux/clk.h>
20 #include <linux/crypto.h>
21 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
27 #include <linux/platform_device.h>
29 #define SAHARA_NAME "sahara"
30 #define SAHARA_VERSION_3 3
31 #define SAHARA_TIMEOUT_MS 1000
32 #define SAHARA_MAX_HW_DESC 2
33 #define SAHARA_MAX_HW_LINK 20
35 #define FLAGS_MODE_MASK 0x000f
36 #define FLAGS_ENCRYPT BIT(0)
37 #define FLAGS_CBC BIT(1)
38 #define FLAGS_NEW_KEY BIT(3)
41 #define SAHARA_HDR_BASE 0x00800000
42 #define SAHARA_HDR_SKHA_ALG_AES 0
43 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
44 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
45 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
46 #define SAHARA_HDR_FORM_DATA (5 << 16)
47 #define SAHARA_HDR_FORM_KEY (8 << 16)
48 #define SAHARA_HDR_LLO (1 << 24)
49 #define SAHARA_HDR_CHA_SKHA (1 << 28)
50 #define SAHARA_HDR_CHA_MDHA (2 << 28)
51 #define SAHARA_HDR_PARITY_BIT (1 << 31)
53 /* SAHARA can only process one request at a time */
54 #define SAHARA_QUEUE_LENGTH 1
56 #define SAHARA_REG_VERSION 0x00
57 #define SAHARA_REG_DAR 0x04
58 #define SAHARA_REG_CONTROL 0x08
59 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
60 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
61 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
62 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
63 #define SAHARA_REG_CMD 0x0C
64 #define SAHARA_CMD_RESET (1 << 0)
65 #define SAHARA_CMD_CLEAR_INT (1 << 8)
66 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
67 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
68 #define SAHARA_CMD_MODE_BATCH (1 << 16)
69 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
70 #define SAHARA_REG_STATUS 0x10
71 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
72 #define SAHARA_STATE_IDLE 0
73 #define SAHARA_STATE_BUSY 1
74 #define SAHARA_STATE_ERR 2
75 #define SAHARA_STATE_FAULT 3
76 #define SAHARA_STATE_COMPLETE 4
77 #define SAHARA_STATE_COMP_FLAG (1 << 2)
78 #define SAHARA_STATUS_DAR_FULL (1 << 3)
79 #define SAHARA_STATUS_ERROR (1 << 4)
80 #define SAHARA_STATUS_SECURE (1 << 5)
81 #define SAHARA_STATUS_FAIL (1 << 6)
82 #define SAHARA_STATUS_INIT (1 << 7)
83 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
84 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
85 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
86 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
87 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
88 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
89 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
90 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
91 #define SAHARA_REG_ERRSTATUS 0x14
92 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
93 #define SAHARA_ERRSOURCE_CHA 14
94 #define SAHARA_ERRSOURCE_DMA 15
95 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
96 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
97 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
98 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
99 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
100 #define SAHARA_REG_FADDR 0x18
101 #define SAHARA_REG_CDAR 0x1C
102 #define SAHARA_REG_IDAR 0x20
104 struct sahara_hw_desc {
113 struct sahara_hw_link {
120 struct sahara_dev *dev;
123 u8 key[AES_KEYSIZE_128];
124 struct crypto_ablkcipher *fallback;
127 struct sahara_aes_reqctx {
132 struct device *device;
133 void __iomem *regs_base;
137 struct sahara_ctx *ctx;
139 struct crypto_queue queue;
142 struct tasklet_struct done_task;
143 struct tasklet_struct queue_task;
145 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
146 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
149 dma_addr_t key_phys_base;
152 dma_addr_t iv_phys_base;
154 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
155 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
157 struct ablkcipher_request *req;
159 struct scatterlist *in_sg;
160 unsigned int nb_in_sg;
161 struct scatterlist *out_sg;
162 unsigned int nb_out_sg;
165 struct timer_list watchdog;
168 static struct sahara_dev *dev_ptr;
170 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
172 writel(data, dev->regs_base + reg);
175 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
177 return readl(dev->regs_base + reg);
180 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
182 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
183 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
184 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
186 if (dev->flags & FLAGS_CBC) {
187 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
188 hdr ^= SAHARA_HDR_PARITY_BIT;
191 if (dev->flags & FLAGS_ENCRYPT) {
192 hdr |= SAHARA_HDR_SKHA_OP_ENC;
193 hdr ^= SAHARA_HDR_PARITY_BIT;
199 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
201 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
202 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
205 static int sahara_sg_length(struct scatterlist *sg,
210 struct scatterlist *sg_list;
216 len = min(sg_list->length, total);
221 sg_list = sg_next(sg_list);
229 static char *sahara_err_src[16] = {
232 "Descriptor length error",
233 "Descriptor length or pointer error",
235 "Link pointer error",
236 "Input buffer error",
237 "Output buffer error",
238 "Output buffer starvation",
239 "Internal state fault",
240 "General descriptor problem",
242 "Descriptor address error",
243 "Link address error",
248 static char *sahara_err_dmasize[4] = {
250 "Half-word transfer",
255 static char *sahara_err_dmasrc[8] = {
258 "Internal IP bus error",
260 "DMA crosses 256 byte boundary",
266 static char *sahara_cha_errsrc[12] = {
267 "Input buffer non-empty",
272 "Write during processing",
273 "CTX read during processing",
275 "Input buffer disabled/underflow",
276 "Output buffer disabled/overflow",
277 "DES key parity error",
281 static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
283 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
285 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
286 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
288 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
290 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
292 if (source == SAHARA_ERRSOURCE_DMA) {
293 if (error & SAHARA_ERRSTATUS_DMA_DIR)
294 dev_err(dev->device, " * DMA read.\n");
296 dev_err(dev->device, " * DMA write.\n");
298 dev_err(dev->device, " * %s.\n",
299 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
300 dev_err(dev->device, " * %s.\n",
301 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
302 } else if (source == SAHARA_ERRSOURCE_CHA) {
303 dev_err(dev->device, " * %s.\n",
304 sahara_cha_errsrc[chasrc]);
305 dev_err(dev->device, " * %s.\n",
306 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
308 dev_err(dev->device, "\n");
311 static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
313 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
317 if (!IS_ENABLED(DEBUG))
320 state = SAHARA_STATUS_GET_STATE(status);
322 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
325 dev_dbg(dev->device, " - State = %d:\n", state);
326 if (state & SAHARA_STATE_COMP_FLAG)
327 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
329 dev_dbg(dev->device, " * %s.\n",
330 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
332 if (status & SAHARA_STATUS_DAR_FULL)
333 dev_dbg(dev->device, " - DAR Full.\n");
334 if (status & SAHARA_STATUS_ERROR)
335 dev_dbg(dev->device, " - Error.\n");
336 if (status & SAHARA_STATUS_SECURE)
337 dev_dbg(dev->device, " - Secure.\n");
338 if (status & SAHARA_STATUS_FAIL)
339 dev_dbg(dev->device, " - Fail.\n");
340 if (status & SAHARA_STATUS_RNG_RESEED)
341 dev_dbg(dev->device, " - RNG Reseed Request.\n");
342 if (status & SAHARA_STATUS_ACTIVE_RNG)
343 dev_dbg(dev->device, " - RNG Active.\n");
344 if (status & SAHARA_STATUS_ACTIVE_MDHA)
345 dev_dbg(dev->device, " - MDHA Active.\n");
346 if (status & SAHARA_STATUS_ACTIVE_SKHA)
347 dev_dbg(dev->device, " - SKHA Active.\n");
349 if (status & SAHARA_STATUS_MODE_BATCH)
350 dev_dbg(dev->device, " - Batch Mode.\n");
351 else if (status & SAHARA_STATUS_MODE_DEDICATED)
352 dev_dbg(dev->device, " - Decidated Mode.\n");
353 else if (status & SAHARA_STATUS_MODE_DEBUG)
354 dev_dbg(dev->device, " - Debug Mode.\n");
356 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
357 SAHARA_STATUS_GET_ISTATE(status));
359 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
360 sahara_read(dev, SAHARA_REG_CDAR));
361 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
362 sahara_read(dev, SAHARA_REG_IDAR));
365 static void sahara_dump_descriptors(struct sahara_dev *dev)
369 if (!IS_ENABLED(DEBUG))
372 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
373 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
374 i, dev->hw_phys_desc[i]);
375 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
376 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
377 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
378 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
379 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
380 dev_dbg(dev->device, "\tnext = 0x%08x\n",
381 dev->hw_desc[i]->next);
383 dev_dbg(dev->device, "\n");
386 static void sahara_dump_links(struct sahara_dev *dev)
390 if (!IS_ENABLED(DEBUG))
393 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
394 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
395 i, dev->hw_phys_link[i]);
396 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
397 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
398 dev_dbg(dev->device, "\tnext = 0x%08x\n",
399 dev->hw_link[i]->next);
401 dev_dbg(dev->device, "\n");
404 static void sahara_aes_done_task(unsigned long data)
406 struct sahara_dev *dev = (struct sahara_dev *)data;
408 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
410 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
413 spin_lock(&dev->lock);
414 clear_bit(FLAGS_BUSY, &dev->flags);
415 spin_unlock(&dev->lock);
417 dev->req->base.complete(&dev->req->base, dev->error);
420 static void sahara_watchdog(unsigned long data)
422 struct sahara_dev *dev = (struct sahara_dev *)data;
423 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
424 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
426 sahara_decode_status(dev, stat);
427 sahara_decode_error(dev, err);
428 dev->error = -ETIMEDOUT;
429 sahara_aes_done_task(data);
432 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
434 struct sahara_ctx *ctx = dev->ctx;
435 struct scatterlist *sg;
439 /* Copy new key if necessary */
440 if (ctx->flags & FLAGS_NEW_KEY) {
441 memcpy(dev->key_base, ctx->key, ctx->keylen);
442 ctx->flags &= ~FLAGS_NEW_KEY;
444 if (dev->flags & FLAGS_CBC) {
445 dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
446 dev->hw_desc[0]->p1 = dev->iv_phys_base;
448 dev->hw_desc[0]->len1 = 0;
449 dev->hw_desc[0]->p1 = 0;
451 dev->hw_desc[0]->len2 = ctx->keylen;
452 dev->hw_desc[0]->p2 = dev->key_phys_base;
453 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
455 dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
457 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
458 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
459 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
460 dev_err(dev->device, "not enough hw links (%d)\n",
461 dev->nb_in_sg + dev->nb_out_sg);
465 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
467 if (ret != dev->nb_in_sg) {
468 dev_err(dev->device, "couldn't map in sg\n");
471 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
473 if (ret != dev->nb_out_sg) {
474 dev_err(dev->device, "couldn't map out sg\n");
478 /* Create input links */
479 dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
481 for (i = 0; i < dev->nb_in_sg; i++) {
482 dev->hw_link[i]->len = sg->length;
483 dev->hw_link[i]->p = sg->dma_address;
484 if (i == (dev->nb_in_sg - 1)) {
485 dev->hw_link[i]->next = 0;
487 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
492 /* Create output links */
493 dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
495 for (j = i; j < dev->nb_out_sg + i; j++) {
496 dev->hw_link[j]->len = sg->length;
497 dev->hw_link[j]->p = sg->dma_address;
498 if (j == (dev->nb_out_sg + i - 1)) {
499 dev->hw_link[j]->next = 0;
501 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
506 /* Fill remaining fields of hw_desc[1] */
507 dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
508 dev->hw_desc[1]->len1 = dev->total;
509 dev->hw_desc[1]->len2 = dev->total;
510 dev->hw_desc[1]->next = 0;
512 sahara_dump_descriptors(dev);
513 sahara_dump_links(dev);
515 /* Start processing descriptor chain. */
516 mod_timer(&dev->watchdog,
517 jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
518 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
523 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
526 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
532 static void sahara_aes_queue_task(unsigned long data)
534 struct sahara_dev *dev = (struct sahara_dev *)data;
535 struct crypto_async_request *async_req, *backlog;
536 struct sahara_ctx *ctx;
537 struct sahara_aes_reqctx *rctx;
538 struct ablkcipher_request *req;
541 spin_lock(&dev->lock);
542 backlog = crypto_get_backlog(&dev->queue);
543 async_req = crypto_dequeue_request(&dev->queue);
545 clear_bit(FLAGS_BUSY, &dev->flags);
546 spin_unlock(&dev->lock);
552 backlog->complete(backlog, -EINPROGRESS);
554 req = ablkcipher_request_cast(async_req);
556 /* Request is ready to be dispatched by the device */
558 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
559 req->nbytes, req->src, req->dst);
561 /* assign new request to device */
563 dev->total = req->nbytes;
564 dev->in_sg = req->src;
565 dev->out_sg = req->dst;
567 rctx = ablkcipher_request_ctx(req);
568 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
569 rctx->mode &= FLAGS_MODE_MASK;
570 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
572 if ((dev->flags & FLAGS_CBC) && req->info)
573 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
575 /* assign new context to device */
579 ret = sahara_hw_descriptor_create(dev);
581 spin_lock(&dev->lock);
582 clear_bit(FLAGS_BUSY, &dev->flags);
583 spin_unlock(&dev->lock);
584 dev->req->base.complete(&dev->req->base, ret);
588 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
591 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
594 ctx->keylen = keylen;
596 /* SAHARA only supports 128bit keys */
597 if (keylen == AES_KEYSIZE_128) {
598 memcpy(ctx->key, key, keylen);
599 ctx->flags |= FLAGS_NEW_KEY;
603 if (keylen != AES_KEYSIZE_128 &&
604 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
608 * The requested key size is not supported by HW, do a fallback.
610 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
611 ctx->fallback->base.crt_flags |=
612 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
614 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
616 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
618 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
619 tfm_aux->crt_flags |=
620 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
625 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
627 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
628 crypto_ablkcipher_reqtfm(req));
629 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
630 struct sahara_dev *dev = dev_ptr;
634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
637 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
639 "request size is not exact amount of AES blocks\n");
646 spin_lock_bh(&dev->lock);
647 err = ablkcipher_enqueue_request(&dev->queue, req);
648 busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
649 spin_unlock_bh(&dev->lock);
652 tasklet_schedule(&dev->queue_task);
657 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
659 struct crypto_tfm *tfm =
660 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
661 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
662 crypto_ablkcipher_reqtfm(req));
665 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
666 ablkcipher_request_set_tfm(req, ctx->fallback);
667 err = crypto_ablkcipher_encrypt(req);
668 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
672 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
675 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
677 struct crypto_tfm *tfm =
678 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
679 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
680 crypto_ablkcipher_reqtfm(req));
683 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
684 ablkcipher_request_set_tfm(req, ctx->fallback);
685 err = crypto_ablkcipher_decrypt(req);
686 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
690 return sahara_aes_crypt(req, 0);
693 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
695 struct crypto_tfm *tfm =
696 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
697 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
698 crypto_ablkcipher_reqtfm(req));
701 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
702 ablkcipher_request_set_tfm(req, ctx->fallback);
703 err = crypto_ablkcipher_encrypt(req);
704 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
708 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
711 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
713 struct crypto_tfm *tfm =
714 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
715 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
716 crypto_ablkcipher_reqtfm(req));
719 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
720 ablkcipher_request_set_tfm(req, ctx->fallback);
721 err = crypto_ablkcipher_decrypt(req);
722 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
726 return sahara_aes_crypt(req, FLAGS_CBC);
729 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
731 const char *name = tfm->__crt_alg->cra_name;
732 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
734 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
735 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
736 if (IS_ERR(ctx->fallback)) {
737 pr_err("Error allocating fallback algo %s\n", name);
738 return PTR_ERR(ctx->fallback);
741 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
746 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
748 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
751 crypto_free_ablkcipher(ctx->fallback);
752 ctx->fallback = NULL;
755 static struct crypto_alg aes_algs[] = {
757 .cra_name = "ecb(aes)",
758 .cra_driver_name = "sahara-ecb-aes",
760 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
761 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
762 .cra_blocksize = AES_BLOCK_SIZE,
763 .cra_ctxsize = sizeof(struct sahara_ctx),
764 .cra_alignmask = 0x0,
765 .cra_type = &crypto_ablkcipher_type,
766 .cra_module = THIS_MODULE,
767 .cra_init = sahara_aes_cra_init,
768 .cra_exit = sahara_aes_cra_exit,
769 .cra_u.ablkcipher = {
770 .min_keysize = AES_MIN_KEY_SIZE ,
771 .max_keysize = AES_MAX_KEY_SIZE,
772 .setkey = sahara_aes_setkey,
773 .encrypt = sahara_aes_ecb_encrypt,
774 .decrypt = sahara_aes_ecb_decrypt,
777 .cra_name = "cbc(aes)",
778 .cra_driver_name = "sahara-cbc-aes",
780 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
781 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
782 .cra_blocksize = AES_BLOCK_SIZE,
783 .cra_ctxsize = sizeof(struct sahara_ctx),
784 .cra_alignmask = 0x0,
785 .cra_type = &crypto_ablkcipher_type,
786 .cra_module = THIS_MODULE,
787 .cra_init = sahara_aes_cra_init,
788 .cra_exit = sahara_aes_cra_exit,
789 .cra_u.ablkcipher = {
790 .min_keysize = AES_MIN_KEY_SIZE ,
791 .max_keysize = AES_MAX_KEY_SIZE,
792 .ivsize = AES_BLOCK_SIZE,
793 .setkey = sahara_aes_setkey,
794 .encrypt = sahara_aes_cbc_encrypt,
795 .decrypt = sahara_aes_cbc_decrypt,
800 static irqreturn_t sahara_irq_handler(int irq, void *data)
802 struct sahara_dev *dev = (struct sahara_dev *)data;
803 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
804 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
806 del_timer(&dev->watchdog);
808 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
811 sahara_decode_status(dev, stat);
813 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
815 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
818 sahara_decode_error(dev, err);
819 dev->error = -EINVAL;
822 tasklet_schedule(&dev->done_task);
828 static int sahara_register_algs(struct sahara_dev *dev)
832 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
833 INIT_LIST_HEAD(&aes_algs[i].cra_list);
834 err = crypto_register_alg(&aes_algs[i]);
842 for (j = 0; j < i; j++)
843 crypto_unregister_alg(&aes_algs[j]);
848 static void sahara_unregister_algs(struct sahara_dev *dev)
852 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
853 crypto_unregister_alg(&aes_algs[i]);
856 static struct platform_device_id sahara_platform_ids[] = {
857 { .name = "sahara-imx27" },
860 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
862 static struct of_device_id sahara_dt_ids[] = {
863 { .compatible = "fsl,imx27-sahara" },
866 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
868 static int sahara_probe(struct platform_device *pdev)
870 struct sahara_dev *dev;
871 struct resource *res;
877 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
879 dev_err(&pdev->dev, "unable to alloc data struct.\n");
883 dev->device = &pdev->dev;
884 platform_set_drvdata(pdev, dev);
886 /* Get the base address */
887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 dev_err(&pdev->dev, "failed to get memory region resource\n");
893 if (devm_request_mem_region(&pdev->dev, res->start,
894 resource_size(res), SAHARA_NAME) == NULL) {
895 dev_err(&pdev->dev, "failed to request memory region\n");
898 dev->regs_base = devm_ioremap(&pdev->dev, res->start,
900 if (!dev->regs_base) {
901 dev_err(&pdev->dev, "failed to ioremap address region\n");
906 irq = platform_get_irq(pdev, 0);
908 dev_err(&pdev->dev, "failed to get irq resource\n");
912 if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
913 0, SAHARA_NAME, dev) < 0) {
914 dev_err(&pdev->dev, "failed to request irq\n");
919 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
920 if (IS_ERR(dev->clk_ipg)) {
921 dev_err(&pdev->dev, "Could not get ipg clock\n");
922 return PTR_ERR(dev->clk_ipg);
925 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
926 if (IS_ERR(dev->clk_ahb)) {
927 dev_err(&pdev->dev, "Could not get ahb clock\n");
928 return PTR_ERR(dev->clk_ahb);
931 /* Allocate HW descriptors */
932 dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
933 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
934 &dev->hw_phys_desc[0], GFP_KERNEL);
935 if (!dev->hw_desc[0]) {
936 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
939 dev->hw_desc[1] = dev->hw_desc[0] + 1;
940 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
941 sizeof(struct sahara_hw_desc);
943 /* Allocate space for iv and key */
944 dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
945 &dev->key_phys_base, GFP_KERNEL);
946 if (!dev->key_base) {
947 dev_err(&pdev->dev, "Could not allocate memory for key\n");
951 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
952 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
954 /* Allocate space for HW links */
955 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
956 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
957 &dev->hw_phys_link[0], GFP_KERNEL);
958 if (!dev->hw_link[0]) {
959 dev_err(&pdev->dev, "Could not allocate hw links\n");
963 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
964 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
965 sizeof(struct sahara_hw_link);
966 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
969 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
973 tasklet_init(&dev->queue_task, sahara_aes_queue_task,
975 tasklet_init(&dev->done_task, sahara_aes_done_task,
978 init_timer(&dev->watchdog);
979 dev->watchdog.function = &sahara_watchdog;
980 dev->watchdog.data = (unsigned long)dev;
982 clk_prepare_enable(dev->clk_ipg);
983 clk_prepare_enable(dev->clk_ahb);
985 version = sahara_read(dev, SAHARA_REG_VERSION);
986 if (version != SAHARA_VERSION_3) {
987 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
993 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
995 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
996 SAHARA_CONTROL_SET_MAXBURST(8) |
997 SAHARA_CONTROL_RNG_AUTORSD |
998 SAHARA_CONTROL_ENABLE_INT,
1001 err = sahara_register_algs(dev);
1005 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1010 dma_free_coherent(&pdev->dev,
1011 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1012 dev->hw_link[0], dev->hw_phys_link[0]);
1013 clk_disable_unprepare(dev->clk_ipg);
1014 clk_disable_unprepare(dev->clk_ahb);
1017 dma_free_coherent(&pdev->dev,
1018 2 * AES_KEYSIZE_128,
1019 dev->key_base, dev->key_phys_base);
1021 dma_free_coherent(&pdev->dev,
1022 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1023 dev->hw_desc[0], dev->hw_phys_desc[0]);
1028 static int sahara_remove(struct platform_device *pdev)
1030 struct sahara_dev *dev = platform_get_drvdata(pdev);
1032 dma_free_coherent(&pdev->dev,
1033 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1034 dev->hw_link[0], dev->hw_phys_link[0]);
1035 dma_free_coherent(&pdev->dev,
1036 2 * AES_KEYSIZE_128,
1037 dev->key_base, dev->key_phys_base);
1038 dma_free_coherent(&pdev->dev,
1039 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1040 dev->hw_desc[0], dev->hw_phys_desc[0]);
1042 tasklet_kill(&dev->done_task);
1043 tasklet_kill(&dev->queue_task);
1045 sahara_unregister_algs(dev);
1047 clk_disable_unprepare(dev->clk_ipg);
1048 clk_disable_unprepare(dev->clk_ahb);
1055 static struct platform_driver sahara_driver = {
1056 .probe = sahara_probe,
1057 .remove = sahara_remove,
1059 .name = SAHARA_NAME,
1060 .owner = THIS_MODULE,
1061 .of_match_table = sahara_dt_ids,
1063 .id_table = sahara_platform_ids,
1066 module_platform_driver(sahara_driver);
1068 MODULE_LICENSE("GPL");
1069 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1070 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");