4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
15 #define pr_fmt(fmt) "%s: " fmt, __func__
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/platform_device.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/omap-dma.h>
27 #include <linux/pm_runtime.h>
29 #include <linux/crypto.h>
30 #include <linux/interrupt.h>
31 #include <crypto/scatterwalk.h>
32 #include <crypto/aes.h>
34 #define DST_MAXBURST 4
35 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
37 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
38 number. For example 7:0 */
39 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
40 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
42 #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
43 #define AES_REG_IV(x) (0x20 + ((x) * 0x04))
45 #define AES_REG_CTRL 0x30
46 #define AES_REG_CTRL_CTR_WIDTH (1 << 7)
47 #define AES_REG_CTRL_CTR (1 << 6)
48 #define AES_REG_CTRL_CBC (1 << 5)
49 #define AES_REG_CTRL_KEY_SIZE (3 << 3)
50 #define AES_REG_CTRL_DIRECTION (1 << 2)
51 #define AES_REG_CTRL_INPUT_READY (1 << 1)
52 #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
54 #define AES_REG_DATA 0x34
55 #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
57 #define AES_REG_REV 0x44
58 #define AES_REG_REV_MAJOR 0xF0
59 #define AES_REG_REV_MINOR 0x0F
61 #define AES_REG_MASK 0x48
62 #define AES_REG_MASK_SIDLE (1 << 6)
63 #define AES_REG_MASK_START (1 << 5)
64 #define AES_REG_MASK_DMA_OUT_EN (1 << 3)
65 #define AES_REG_MASK_DMA_IN_EN (1 << 2)
66 #define AES_REG_MASK_SOFTRESET (1 << 1)
67 #define AES_REG_AUTOIDLE (1 << 0)
69 #define AES_REG_SYSSTATUS 0x4C
70 #define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
72 #define DEFAULT_TIMEOUT (5*HZ)
74 #define FLAGS_MODE_MASK 0x000f
75 #define FLAGS_ENCRYPT BIT(0)
76 #define FLAGS_CBC BIT(1)
77 #define FLAGS_GIV BIT(2)
79 #define FLAGS_INIT BIT(4)
80 #define FLAGS_FAST BIT(5)
81 #define FLAGS_BUSY BIT(6)
84 struct omap_aes_dev *dd;
87 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
91 struct omap_aes_reqctx {
95 #define OMAP_AES_QUEUE_LENGTH 1
96 #define OMAP_AES_CACHE_SIZE 0
99 struct list_head list;
100 unsigned long phys_base;
101 void __iomem *io_base;
102 struct omap_aes_ctx *ctx;
108 struct crypto_queue queue;
110 struct tasklet_struct done_task;
111 struct tasklet_struct queue_task;
113 struct ablkcipher_request *req;
115 struct scatterlist *in_sg;
116 struct scatterlist in_sgl;
118 struct scatterlist *out_sg;
119 struct scatterlist out_sgl;
126 struct dma_chan *dma_lch_in;
127 dma_addr_t dma_addr_in;
130 struct dma_chan *dma_lch_out;
131 dma_addr_t dma_addr_out;
134 /* keep registered devices data here */
135 static LIST_HEAD(dev_list);
136 static DEFINE_SPINLOCK(list_lock);
138 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
140 return __raw_readl(dd->io_base + offset);
143 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
146 __raw_writel(value, dd->io_base + offset);
149 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
154 val = omap_aes_read(dd, offset);
157 omap_aes_write(dd, offset, val);
160 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
161 u32 *value, int count)
163 for (; count--; value++, offset += 4)
164 omap_aes_write(dd, offset, *value);
167 static int omap_aes_hw_init(struct omap_aes_dev *dd)
170 * clocks are enabled when request starts and disabled when finished.
171 * It may be long delays between requests.
172 * Device might go to off mode to save power.
174 pm_runtime_get_sync(dd->dev);
176 if (!(dd->flags & FLAGS_INIT)) {
177 dd->flags |= FLAGS_INIT;
184 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
190 err = omap_aes_hw_init(dd);
195 if (dd->dma_lch_out != NULL)
196 val |= AES_REG_MASK_DMA_OUT_EN;
197 if (dd->dma_lch_in != NULL)
198 val |= AES_REG_MASK_DMA_IN_EN;
200 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
202 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
204 key32 = dd->ctx->keylen / sizeof(u32);
206 /* it seems a key should always be set even if it has not changed */
207 for (i = 0; i < key32; i++) {
208 omap_aes_write(dd, AES_REG_KEY(i),
209 __le32_to_cpu(dd->ctx->key[i]));
212 if ((dd->flags & FLAGS_CBC) && dd->req->info)
213 omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
215 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
216 if (dd->flags & FLAGS_CBC)
217 val |= AES_REG_CTRL_CBC;
218 if (dd->flags & FLAGS_ENCRYPT)
219 val |= AES_REG_CTRL_DIRECTION;
221 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
222 AES_REG_CTRL_KEY_SIZE;
224 omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
229 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
231 struct omap_aes_dev *dd = NULL, *tmp;
233 spin_lock_bh(&list_lock);
235 list_for_each_entry(tmp, &dev_list, list) {
236 /* FIXME: take fist available aes core */
242 /* already found before */
245 spin_unlock_bh(&list_lock);
250 static void omap_aes_dma_out_callback(void *data)
252 struct omap_aes_dev *dd = data;
254 /* dma_lch_out - completed */
255 tasklet_schedule(&dd->done_task);
258 static int omap_aes_dma_init(struct omap_aes_dev *dd)
263 dd->dma_lch_out = NULL;
264 dd->dma_lch_in = NULL;
266 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
267 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
268 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
269 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
271 if (!dd->buf_in || !dd->buf_out) {
272 dev_err(dd->dev, "unable to alloc pages.\n");
277 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
279 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
280 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
285 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
287 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
288 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
294 dma_cap_set(DMA_SLAVE, mask);
296 dd->dma_lch_in = dma_request_channel(mask, omap_dma_filter_fn,
298 if (!dd->dma_lch_in) {
299 dev_err(dd->dev, "Unable to request in DMA channel\n");
303 dd->dma_lch_out = dma_request_channel(mask, omap_dma_filter_fn,
305 if (!dd->dma_lch_out) {
306 dev_err(dd->dev, "Unable to request out DMA channel\n");
313 dma_release_channel(dd->dma_lch_in);
315 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
318 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
320 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
321 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
324 pr_err("error: %d\n", err);
328 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
330 dma_release_channel(dd->dma_lch_out);
331 dma_release_channel(dd->dma_lch_in);
332 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
334 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
335 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
336 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
339 static void sg_copy_buf(void *buf, struct scatterlist *sg,
340 unsigned int start, unsigned int nbytes, int out)
342 struct scatter_walk walk;
347 scatterwalk_start(&walk, sg);
348 scatterwalk_advance(&walk, start);
349 scatterwalk_copychunks(buf, &walk, nbytes, out);
350 scatterwalk_done(&walk, out, 0);
353 static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
354 size_t buflen, size_t total, int out)
356 unsigned int count, off = 0;
358 while (buflen && total) {
359 count = min((*sg)->length - *offset, total);
360 count = min(count, buflen);
366 * buflen and total are AES_BLOCK_SIZE size aligned,
367 * so count should be also aligned
370 sg_copy_buf(buf + off, *sg, *offset, count, out);
377 if (*offset == (*sg)->length) {
389 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
390 struct scatterlist *in_sg, struct scatterlist *out_sg)
392 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
393 struct omap_aes_dev *dd = ctx->dd;
394 struct dma_async_tx_descriptor *tx_in, *tx_out;
395 struct dma_slave_config cfg;
396 dma_addr_t dma_addr_in = sg_dma_address(in_sg);
397 int ret, length = sg_dma_len(in_sg);
399 pr_debug("len: %d\n", length);
401 dd->dma_size = length;
403 if (!(dd->flags & FLAGS_FAST))
404 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
407 memset(&cfg, 0, sizeof(cfg));
409 cfg.src_addr = dd->phys_base + AES_REG_DATA;
410 cfg.dst_addr = dd->phys_base + AES_REG_DATA;
411 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
412 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
413 cfg.src_maxburst = DST_MAXBURST;
414 cfg.dst_maxburst = DST_MAXBURST;
417 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
419 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
424 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
426 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
428 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
432 /* No callback necessary */
433 tx_in->callback_param = dd;
436 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
438 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
443 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
445 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
447 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
451 tx_out->callback = omap_aes_dma_out_callback;
452 tx_out->callback_param = dd;
454 dmaengine_submit(tx_in);
455 dmaengine_submit(tx_out);
457 dma_async_issue_pending(dd->dma_lch_in);
458 dma_async_issue_pending(dd->dma_lch_out);
460 /* start DMA or disable idle mode */
461 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
467 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
469 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
470 crypto_ablkcipher_reqtfm(dd->req));
471 int err, fast = 0, in, out;
473 dma_addr_t addr_in, addr_out;
474 struct scatterlist *in_sg, *out_sg;
477 pr_debug("total: %d\n", dd->total);
479 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
480 /* check for alignment */
481 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
482 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
488 count = min(dd->total, sg_dma_len(dd->in_sg));
489 count = min(count, sg_dma_len(dd->out_sg));
491 if (count != dd->total) {
492 pr_err("request length != buffer length\n");
498 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
500 dev_err(dd->dev, "dma_map_sg() error\n");
504 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
506 dev_err(dd->dev, "dma_map_sg() error\n");
507 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
511 addr_in = sg_dma_address(dd->in_sg);
512 addr_out = sg_dma_address(dd->out_sg);
517 dd->flags |= FLAGS_FAST;
520 /* use cache buffers */
521 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
522 dd->buflen, dd->total, 0);
524 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
527 * The data going into the AES module has been copied
528 * to a local buffer and the data coming out will go
529 * into a local buffer so set up local SG entries for
532 sg_init_table(&dd->in_sgl, 1);
533 dd->in_sgl.offset = dd->in_offset;
534 sg_dma_len(&dd->in_sgl) = len32;
535 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
537 sg_init_table(&dd->out_sgl, 1);
538 dd->out_sgl.offset = dd->out_offset;
539 sg_dma_len(&dd->out_sgl) = len32;
540 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
543 out_sg = &dd->out_sgl;
545 addr_in = dd->dma_addr_in;
546 addr_out = dd->dma_addr_out;
548 dd->flags &= ~FLAGS_FAST;
554 err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
556 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
557 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
563 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
565 struct ablkcipher_request *req = dd->req;
567 pr_debug("err: %d\n", err);
569 pm_runtime_put_sync(dd->dev);
570 dd->flags &= ~FLAGS_BUSY;
572 req->base.complete(&req->base, err);
575 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
580 pr_debug("total: %d\n", dd->total);
582 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
584 dmaengine_terminate_all(dd->dma_lch_in);
585 dmaengine_terminate_all(dd->dma_lch_out);
587 if (dd->flags & FLAGS_FAST) {
588 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
589 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
591 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
592 dd->dma_size, DMA_FROM_DEVICE);
595 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
596 dd->buflen, dd->dma_size, 1);
597 if (count != dd->dma_size) {
599 pr_err("not all data converted: %u\n", count);
606 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
607 struct ablkcipher_request *req)
609 struct crypto_async_request *async_req, *backlog;
610 struct omap_aes_ctx *ctx;
611 struct omap_aes_reqctx *rctx;
615 spin_lock_irqsave(&dd->lock, flags);
617 ret = ablkcipher_enqueue_request(&dd->queue, req);
618 if (dd->flags & FLAGS_BUSY) {
619 spin_unlock_irqrestore(&dd->lock, flags);
622 backlog = crypto_get_backlog(&dd->queue);
623 async_req = crypto_dequeue_request(&dd->queue);
625 dd->flags |= FLAGS_BUSY;
626 spin_unlock_irqrestore(&dd->lock, flags);
632 backlog->complete(backlog, -EINPROGRESS);
634 req = ablkcipher_request_cast(async_req);
636 /* assign new request to device */
638 dd->total = req->nbytes;
640 dd->in_sg = req->src;
642 dd->out_sg = req->dst;
644 rctx = ablkcipher_request_ctx(req);
645 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
646 rctx->mode &= FLAGS_MODE_MASK;
647 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
652 err = omap_aes_write_ctrl(dd);
654 err = omap_aes_crypt_dma_start(dd);
656 /* aes_task will not finish it, so do it here */
657 omap_aes_finish_req(dd, err);
658 tasklet_schedule(&dd->queue_task);
661 return ret; /* return ret, which is enqueue return value */
664 static void omap_aes_done_task(unsigned long data)
666 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
671 err = omap_aes_crypt_dma_stop(dd);
673 err = dd->err ? : err;
675 if (dd->total && !err) {
676 err = omap_aes_crypt_dma_start(dd);
678 return; /* DMA started. Not fininishing. */
681 omap_aes_finish_req(dd, err);
682 omap_aes_handle_queue(dd, NULL);
687 static void omap_aes_queue_task(unsigned long data)
689 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
691 omap_aes_handle_queue(dd, NULL);
694 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
696 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
697 crypto_ablkcipher_reqtfm(req));
698 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
699 struct omap_aes_dev *dd;
701 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
702 !!(mode & FLAGS_ENCRYPT),
703 !!(mode & FLAGS_CBC));
705 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
706 pr_err("request size is not exact amount of AES blocks\n");
710 dd = omap_aes_find_dev(ctx);
716 return omap_aes_handle_queue(dd, req);
719 /* ********************** ALG API ************************************ */
721 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
724 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
726 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
727 keylen != AES_KEYSIZE_256)
730 pr_debug("enter, keylen: %d\n", keylen);
732 memcpy(ctx->key, key, keylen);
733 ctx->keylen = keylen;
738 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
740 return omap_aes_crypt(req, FLAGS_ENCRYPT);
743 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
745 return omap_aes_crypt(req, 0);
748 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
750 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
753 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
755 return omap_aes_crypt(req, FLAGS_CBC);
758 static int omap_aes_cra_init(struct crypto_tfm *tfm)
762 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
767 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
772 /* ********************** ALGS ************************************ */
774 static struct crypto_alg algs[] = {
776 .cra_name = "ecb(aes)",
777 .cra_driver_name = "ecb-aes-omap",
779 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
780 CRYPTO_ALG_KERN_DRIVER_ONLY |
782 .cra_blocksize = AES_BLOCK_SIZE,
783 .cra_ctxsize = sizeof(struct omap_aes_ctx),
785 .cra_type = &crypto_ablkcipher_type,
786 .cra_module = THIS_MODULE,
787 .cra_init = omap_aes_cra_init,
788 .cra_exit = omap_aes_cra_exit,
789 .cra_u.ablkcipher = {
790 .min_keysize = AES_MIN_KEY_SIZE,
791 .max_keysize = AES_MAX_KEY_SIZE,
792 .setkey = omap_aes_setkey,
793 .encrypt = omap_aes_ecb_encrypt,
794 .decrypt = omap_aes_ecb_decrypt,
798 .cra_name = "cbc(aes)",
799 .cra_driver_name = "cbc-aes-omap",
801 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
802 CRYPTO_ALG_KERN_DRIVER_ONLY |
804 .cra_blocksize = AES_BLOCK_SIZE,
805 .cra_ctxsize = sizeof(struct omap_aes_ctx),
807 .cra_type = &crypto_ablkcipher_type,
808 .cra_module = THIS_MODULE,
809 .cra_init = omap_aes_cra_init,
810 .cra_exit = omap_aes_cra_exit,
811 .cra_u.ablkcipher = {
812 .min_keysize = AES_MIN_KEY_SIZE,
813 .max_keysize = AES_MAX_KEY_SIZE,
814 .ivsize = AES_BLOCK_SIZE,
815 .setkey = omap_aes_setkey,
816 .encrypt = omap_aes_cbc_encrypt,
817 .decrypt = omap_aes_cbc_decrypt,
822 static int omap_aes_probe(struct platform_device *pdev)
824 struct device *dev = &pdev->dev;
825 struct omap_aes_dev *dd;
826 struct resource *res;
827 int err = -ENOMEM, i, j;
830 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
832 dev_err(dev, "unable to alloc data struct.\n");
836 platform_set_drvdata(pdev, dd);
838 spin_lock_init(&dd->lock);
839 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
841 /* Get the base address */
842 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
844 dev_err(dev, "invalid resource type\n");
848 dd->phys_base = res->start;
851 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
853 dev_info(dev, "no DMA info\n");
855 dd->dma_out = res->start;
858 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
860 dev_info(dev, "no DMA info\n");
862 dd->dma_in = res->start;
864 dd->io_base = ioremap(dd->phys_base, SZ_4K);
866 dev_err(dev, "can't ioremap\n");
871 pm_runtime_enable(dev);
872 pm_runtime_get_sync(dev);
874 reg = omap_aes_read(dd, AES_REG_REV);
875 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
876 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
878 pm_runtime_put_sync(dev);
880 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
881 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
883 err = omap_aes_dma_init(dd);
887 INIT_LIST_HEAD(&dd->list);
888 spin_lock(&list_lock);
889 list_add_tail(&dd->list, &dev_list);
890 spin_unlock(&list_lock);
892 for (i = 0; i < ARRAY_SIZE(algs); i++) {
893 pr_debug("i: %d\n", i);
894 err = crypto_register_alg(&algs[i]);
901 for (j = 0; j < i; j++)
902 crypto_unregister_alg(&algs[j]);
903 omap_aes_dma_cleanup(dd);
905 tasklet_kill(&dd->done_task);
906 tasklet_kill(&dd->queue_task);
907 iounmap(dd->io_base);
908 pm_runtime_disable(dev);
913 dev_err(dev, "initialization failed.\n");
917 static int omap_aes_remove(struct platform_device *pdev)
919 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
925 spin_lock(&list_lock);
927 spin_unlock(&list_lock);
929 for (i = 0; i < ARRAY_SIZE(algs); i++)
930 crypto_unregister_alg(&algs[i]);
932 tasklet_kill(&dd->done_task);
933 tasklet_kill(&dd->queue_task);
934 omap_aes_dma_cleanup(dd);
935 iounmap(dd->io_base);
936 pm_runtime_disable(dd->dev);
943 #ifdef CONFIG_PM_SLEEP
944 static int omap_aes_suspend(struct device *dev)
946 pm_runtime_put_sync(dev);
950 static int omap_aes_resume(struct device *dev)
952 pm_runtime_get_sync(dev);
957 static const struct dev_pm_ops omap_aes_pm_ops = {
958 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
961 static struct platform_driver omap_aes_driver = {
962 .probe = omap_aes_probe,
963 .remove = omap_aes_remove,
966 .owner = THIS_MODULE,
967 .pm = &omap_aes_pm_ops,
971 static int __init omap_aes_mod_init(void)
973 return platform_driver_register(&omap_aes_driver);
976 static void __exit omap_aes_mod_exit(void)
978 platform_driver_unregister(&omap_aes_driver);
981 module_init(omap_aes_mod_init);
982 module_exit(omap_aes_mod_exit);
984 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
985 MODULE_LICENSE("GPL v2");
986 MODULE_AUTHOR("Dmitry Kasatkin");